From 93e23e769f1891cc3db010e2616be09980c714a1 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 2 Oct 2019 17:03:43 +0100 Subject: [PATCH 01/82] Code clean up Deletes debug/logging pair. Defines logging and bitcoin_cli as system-wide variables --- pisa/__init__.py | 18 ++- pisa/api.py | 27 ++--- pisa/appointment.py | 2 +- pisa/inspector.py | 47 +++----- pisa/pisad.py | 27 ++--- pisa/responder.py | 131 +++++++++------------ pisa/tools.py | 29 +++-- pisa/utils/{authproxy.py => auth_proxy.py} | 0 pisa/{ => utils}/zmq_subscriber.py | 7 +- pisa/watcher.py | 84 ++++++------- 10 files changed, 158 insertions(+), 214 deletions(-) rename pisa/utils/{authproxy.py => auth_proxy.py} (100%) rename pisa/{ => utils}/zmq_subscriber.py (84%) diff --git a/pisa/__init__.py b/pisa/__init__.py index 5fd2667..70e02c2 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -1,2 +1,18 @@ +from pisa.utils.auth_proxy import AuthServiceProxy +import pisa.conf as conf +import logging + HOST = 'localhost' -PORT = 9814 \ No newline at end of file +PORT = 9814 + +# Configure logging +logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[ + logging.FileHandler(conf.SERVER_LOG_FILE), + logging.StreamHandler() +]) + +# Create RPC connection with bitcoind +# TODO: Check if a long lived connection like this may create problems (timeouts) +bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, + conf.BTC_RPC_PORT)) + diff --git a/pisa/api.py b/pisa/api.py index 15bf4a2..8f26037 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,15 +1,10 @@ -from pisa import * +from pisa import HOST, PORT, logging, bitcoin_cli from pisa.watcher import Watcher from pisa.inspector import Inspector from pisa.appointment import Appointment from flask import Flask, request, Response, abort, jsonify import json - -# FIXME: HERE FOR TESTING (get_block_count). REMOVE WHEN REMOVING THE FUNCTION -from pisa.utils.authproxy import AuthServiceProxy -from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT - # ToDo: #5-add-async-to-api app = Flask(__name__) HTTP_OK = 200 @@ -22,15 +17,14 @@ def add_appointment(): remote_addr = request.environ.get('REMOTE_ADDR') remote_port = request.environ.get('REMOTE_PORT') - if debug: - logging.info('[API] connection accepted from {}:{}'.format(remote_addr, remote_port)) + logging.info('[API] connection accepted from {}:{}'.format(remote_addr, remote_port)) # Check content type once if properly defined request_data = json.loads(request.get_json()) appointment = inspector.inspect(request_data) if type(appointment) == Appointment: - appointment_added = watcher.add_appointment(appointment, debug, logging) + appointment_added = watcher.add_appointment(appointment) # ToDo: #13-create-server-side-signature-receipt if appointment_added: @@ -49,9 +43,7 @@ def add_appointment(): rcode = HTTP_BAD_REQUEST response = "appointment rejected. Request does not match the standard" - if debug: - logging.info('[API] sending response and disconnecting: {} --> {}:{}'.format(response, remote_addr, - remote_port)) + logging.info('[API] sending response and disconnecting: {} --> {}:{}'.format(response, remote_addr, remote_port)) return Response(response, status=rcode, mimetype='text/plain') @@ -115,21 +107,16 @@ def get_all_appointments(): @app.route('/get_block_count', methods=['GET']) def get_block_count(): - bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, - BTC_RPC_PORT)) - return jsonify({"block_count": bitcoin_cli.getblockcount()}) -def start_api(d, l): +def start_api(): # FIXME: Pretty ugly but I haven't found a proper way to pass it to add_appointment - global debug, logging, watcher, inspector - debug = d - logging = l + global watcher, inspector # ToDo: #18-separate-api-from-watcher watcher = Watcher() - inspector = Inspector(debug, logging) + inspector = Inspector() # Setting Flask log t ERROR only so it does not mess with out logging logging.getLogger('werkzeug').setLevel(logging.ERROR) diff --git a/pisa/appointment.py b/pisa/appointment.py index af304ee..f58538b 100644 --- a/pisa/appointment.py +++ b/pisa/appointment.py @@ -19,7 +19,7 @@ class Appointment: return appointment - # ToDO: #3-improve-appointment-strcuture + # ToDO: #3-improve-appointment-structure diff --git a/pisa/inspector.py b/pisa/inspector.py index f2c2bb0..5b23cb3 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -1,16 +1,12 @@ import re -from pisa.appointment import Appointment +import pisa.conf as conf from pisa import errors -from pisa.utils.authproxy import AuthServiceProxy, JSONRPCException -from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MIN_DISPUTE_DELTA, \ - SUPPORTED_CIPHERS, SUPPORTED_HASH_FUNCTIONS +from pisa import logging, bitcoin_cli +from pisa.appointment import Appointment +from pisa.utils.auth_proxy import JSONRPCException class Inspector: - def __init__(self, debug=False, logging=None): - self.debug = debug - self.logging = logging - def inspect(self, data): locator = data.get('locator') start_time = data.get('start_time') @@ -20,8 +16,6 @@ class Inspector: cipher = data.get('cipher') hash_function = data.get('hash_function') - bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, - BTC_RPC_PORT)) try: block_height = bitcoin_cli.getblockcount() @@ -45,8 +39,7 @@ class Inspector: r = (rcode, message) except JSONRPCException as e: - if self.debug: - self.logging.error("[Inspector] JSONRPCException. Error code {}".format(e)) + logging.error("[Inspector] JSONRPCException. Error code {}".format(e)) # In case of an unknown exception, assign a special rcode and reason. r = (errors.UNKNOWN_JSON_RPC_EXCEPTION, "Unexpected error occurred") @@ -71,8 +64,7 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong locator format ({})".format(locator) - if self.debug and message: - self.logging.error("[Inspector] {}".format(message)) + logging.error("[Inspector] {}".format(message)) return rcode, message @@ -95,8 +87,7 @@ class Inspector: else: message = "start_time too close to current height" - if self.debug and message: - self.logging.error("[Inspector] {}".format(message)) + logging.error("[Inspector] {}".format(message)) return rcode, message @@ -122,8 +113,7 @@ class Inspector: rcode = errors.APPOINTMENT_FIELD_TOO_SMALL message = 'end_time is in the past' - if self.debug and message: - self.logging.error("[Inspector] {}".format(message)) + logging.error("[Inspector] {}".format(message)) return rcode, message @@ -139,13 +129,12 @@ class Inspector: elif t != int: rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE message = "wrong dispute_delta data type ({})".format(t) - elif dispute_delta < MIN_DISPUTE_DELTA: + elif dispute_delta < conf.MIN_DISPUTE_DELTA: rcode = errors.APPOINTMENT_FIELD_TOO_SMALL message = "dispute delta too small. The dispute delta should be at least {} (current: {})".format( - MIN_DISPUTE_DELTA, dispute_delta) + conf.MIN_DISPUTE_DELTA, dispute_delta) - if self.debug and message: - self.logging.error("[Inspector] {}".format(message)) + logging.error("[Inspector] {}".format(message)) return rcode, message @@ -166,8 +155,8 @@ class Inspector: # ToDo: #6 We may want to define this to be at least as long as one block of the cipher we are using rcode = errors.APPOINTMENT_WRONG_FIELD message = "wrong encrypted_blob" - if self.debug and message: - self.logging.error("[Inspector] {}".format(message)) + + logging.error("[Inspector] {}".format(message)) return rcode, message @@ -183,12 +172,11 @@ class Inspector: elif t != str: rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE message = "wrong cipher data type ({})".format(t) - elif cipher not in SUPPORTED_CIPHERS: + elif cipher not in conf.SUPPORTED_CIPHERS: rcode = errors.APPOINTMENT_CIPHER_NOT_SUPPORTED message = "cipher not supported: {}".format(cipher) - if self.debug and message: - self.logging.error("[Inspector] {}".format(message)) + logging.error("[Inspector] {}".format(message)) return rcode, message @@ -204,11 +192,10 @@ class Inspector: elif t != str: rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE message = "wrong hash_function data type ({})".format(t) - elif hash_function not in SUPPORTED_HASH_FUNCTIONS: + elif hash_function not in conf.SUPPORTED_HASH_FUNCTIONS: rcode = errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED message = "hash_function not supported {}".format(hash_function) - if self.debug and message: - self.logging.error("[Inspector] {}".format(message)) + logging.error("[Inspector] {}".format(message)) return rcode, message diff --git a/pisa/pisad.py b/pisa/pisad.py index 29ba8a1..bd1cc82 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,36 +1,25 @@ -import logging from sys import argv from getopt import getopt -from threading import Thread +from pisa import logging from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network -from pisa.utils.authproxy import AuthServiceProxy -from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, BTC_NETWORK, SERVER_LOG_FILE if __name__ == '__main__': debug = False opts, _ = getopt(argv[1:], 'd', ['debug']) for opt, arg in opts: - if opt in ['-d', '--debug']: - debug = True + # FIXME: Leaving this here for future option/arguments + pass - # Configure logging - logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[ - logging.FileHandler(SERVER_LOG_FILE), - logging.StreamHandler() - ]) + if can_connect_to_bitcoind(): + if in_correct_network(): + # Fire the api + start_api() - bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, - BTC_RPC_PORT)) - - if can_connect_to_bitcoind(bitcoin_cli): - if in_correct_network(bitcoin_cli, BTC_NETWORK): - # ToDo: This may not have to be a thead. The main thread only creates this and terminates. - api_thread = Thread(target=start_api, args=[debug, logging]) - api_thread.start() else: logging.error("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " "Shutting down") + else: logging.error("[Pisad] can't connect to bitcoind. Shutting down") diff --git a/pisa/responder.py b/pisa/responder.py index 9146599..c4b6616 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -2,11 +2,11 @@ from queue import Queue from threading import Thread from hashlib import sha256 from binascii import unhexlify -from pisa.zmq_subscriber import ZMQHandler +from pisa import logging, bitcoin_cli from pisa.rpc_errors import * from pisa.tools import check_tx_in_chain -from pisa.utils.authproxy import AuthServiceProxy, JSONRPCException -from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT +from pisa.utils.zmq_subscriber import ZMQHandler +from pisa.utils.auth_proxy import JSONRPCException CONFIRMATIONS_BEFORE_RETRY = 6 MIN_CONFIRMATIONS = 6 @@ -42,31 +42,24 @@ class Responder: self.asleep = True self.zmq_subscriber = None - def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging, - retry=False): - - bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, - BTC_RPC_PORT)) + def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=False): try: - if debug: - if self.asleep: - logging.info("[Responder] waking up!") - logging.info("[Responder] pushing transaction to the network (txid: {})".format(justice_txid)) + if self.asleep: + logging.info("[Responder] waking up!") + logging.info("[Responder] pushing transaction to the network (txid: {})".format(justice_txid)) bitcoin_cli.sendrawtransaction(justice_rawtx) # handle_responses can call add_response recursively if a broadcast transaction does not get confirmations # retry holds such information. - self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging, - retry=retry) + self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry) except JSONRPCException as e: - self.handle_send_failures(e, bitcoin_cli, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, - debug, logging, retry) + self.handle_send_failures(e, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry) - def create_job(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging, - confirmations=0, retry=False): + def create_job(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations=0, + retry=False): # ToDo: #23-define-behaviour-approaching-end if retry: @@ -81,25 +74,22 @@ class Responder: else: self.tx_job_map[justice_txid] = [uuid] - if debug: - logging.info('[Responder] new job added (dispute txid = {}, justice txid = {}, appointment end = {})'. - format(dispute_txid, justice_txid, appointment_end)) + logging.info('[Responder] new job added (dispute txid = {}, justice txid = {}, appointment end = {})' + .format(dispute_txid, justice_txid, appointment_end)) if self.asleep: self.asleep = False self.block_queue = Queue() - zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue, debug, logging]) - responder = Thread(target=self.handle_responses, args=[debug, logging]) + zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue]) + responder = Thread(target=self.handle_responses) zmq_thread.start() responder.start() - def do_subscribe(self, block_queue, debug, logging): + def do_subscribe(self, block_queue): self.zmq_subscriber = ZMQHandler(parent='Responder') - self.zmq_subscriber.handle(block_queue, debug, logging) + self.zmq_subscriber.handle(block_queue) - def handle_responses(self, debug, logging): - bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, - BTC_RPC_PORT)) + def handle_responses(self): prev_block_hash = 0 while len(self.jobs) > 0: # We get notified for every new received block @@ -110,14 +100,12 @@ class Responder: txs = block.get('tx') height = block.get('height') - if debug: - logging.info("[Responder] new block received {}".format(block_hash)) - logging.info("[Responder] prev. block hash {}".format(block.get('previousblockhash'))) - logging.info("[Responder] list of transactions: {}".format(txs)) + logging.info("[Responder] new block received {}".format(block_hash)) + logging.info("[Responder] prev. block hash {}".format(block.get('previousblockhash'))) + logging.info("[Responder] list of transactions: {}".format(txs)) except JSONRPCException as e: - if debug: - logging.error("[Responder] couldn't get block from bitcoind. Error code {}".format(e)) + logging.error("[Responder] couldn't get block from bitcoind. Error code {}".format(e)) continue @@ -129,20 +117,19 @@ class Responder: if justice_txid in txs or self.jobs[uuid].confirmations > 0: self.jobs[uuid].confirmations += 1 - if debug: - logging.info("[Responder] new confirmation received for job = {}, txid = {}".format( - uuid, justice_txid)) + logging.info("[Responder] new confirmation received for job = {}, txid = {}".format( + uuid, justice_txid)) elif self.jobs[uuid].missed_confirmations >= CONFIRMATIONS_BEFORE_RETRY: # If a transactions has missed too many confirmations for a while we'll try to rebroadcast # ToDO: #22-discuss-confirmations-before-retry # ToDo: #23-define-behaviour-approaching-end self.add_response(uuid, self.jobs[uuid].dispute_txid, justice_txid, - self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, debug, - logging, retry=True) - if debug: - logging.warning("[Responder] txid = {} has missed {} confirmations. Rebroadcasting" - .format(justice_txid, CONFIRMATIONS_BEFORE_RETRY)) + self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, + retry=True) + + logging.warning("[Responder] txid = {} has missed {} confirmations. Rebroadcasting" + .format(justice_txid, CONFIRMATIONS_BEFORE_RETRY)) else: # Otherwise we increase the number of missed confirmations @@ -153,14 +140,13 @@ class Responder: # The end of the appointment has been reached completed_jobs.append(uuid) - self.remove_completed_jobs(completed_jobs, height, debug, logging) + self.remove_completed_jobs(completed_jobs, height) else: - if debug: - logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}" - .format(prev_block_hash, block.get('previousblockhash'))) + logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}" + .format(prev_block_hash, block.get('previousblockhash'))) - self.handle_reorgs(bitcoin_cli, debug, logging) + self.handle_reorgs() prev_block_hash = block.get('hash') @@ -168,11 +154,9 @@ class Responder: self.asleep = True self.zmq_subscriber.terminate = True - if debug: - logging.info("[Responder] no more pending jobs, going back to sleep") + logging.info("[Responder] no more pending jobs, going back to sleep") - def handle_send_failures(self, e, bitcoin_cli, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, - debug, logging, retry): + def handle_send_failures(self, e, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry): # Since we're pushing a raw transaction to the network we can get two kind of rejections: # RPC_VERIFY_REJECTED and RPC_VERIFY_ALREADY_IN_CHAIN. The former implies that the transaction is rejected # due to network rules, whereas the later implies that the transaction is already in the blockchain. @@ -185,38 +169,36 @@ class Responder: elif e.error.get('code') == RPC_VERIFY_ALREADY_IN_CHAIN: try: - if debug: - logging.info("[Responder] {} is already in the blockchain. Getting the confirmation count and " - "start monitoring the transaction".format(justice_txid)) + logging.info("[Responder] {} is already in the blockchain. Getting the confirmation count and start " + "monitoring the transaction".format(justice_txid)) # If the transaction is already in the chain, we get the number of confirmations and watch the job # until the end of the appointment tx_info = bitcoin_cli.getrawtransaction(justice_txid, 1) confirmations = int(tx_info.get("confirmations")) - self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging, - retry=retry, confirmations=confirmations) + self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry, + confirmations=confirmations) except JSONRPCException as e: # While it's quite unlikely, the transaction that was already in the blockchain could have been # reorged while we were querying bitcoind to get the confirmation count. In such a case we just # restart the job if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - self.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, - logging, retry=retry) - elif debug: + self.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry) + + else: # If something else happens (unlikely but possible) log it so we can treat it in future releases logging.error("[Responder] JSONRPCException. Error {}".format(e)) - elif debug: + else: # If something else happens (unlikely but possible) log it so we can treat it in future releases logging.error("[Responder] JSONRPCException. Error {}".format(e)) - def remove_completed_jobs(self, completed_jobs, height, debug, logging): + def remove_completed_jobs(self, completed_jobs, height): for uuid in completed_jobs: - if debug: - logging.info("[Responder] job completed (uuid = {}, justice_txid = {}). Appointment ended at " - "block {} after {} confirmations".format(uuid, self.jobs[uuid].justice_txid, height, - self.jobs[uuid].confirmations)) + logging.info("[Responder] job completed (uuid = {}, justice_txid = {}). Appointment ended at " + "block {} after {} confirmations".format(uuid, self.jobs[uuid].justice_txid, height, + self.jobs[uuid].confirmations)) # ToDo: #9-add-data-persistency justice_txid = self.jobs[uuid].justice_txid @@ -225,30 +207,25 @@ class Responder: if len(self.tx_job_map[justice_txid]) == 1: self.tx_job_map.pop(justice_txid) - if debug: - logging.info("[Responder] no more jobs for justice_txid {}".format(justice_txid)) + logging.info("[Responder] no more jobs for justice_txid {}".format(justice_txid)) else: self.tx_job_map[justice_txid].remove(uuid) - def handle_reorgs(self, bitcoin_cli, debug, logging): + def handle_reorgs(self): for uuid, job in self.jobs.items(): # First we check if the dispute transaction is still in the blockchain. If not, the justice can not be # there either, so we'll need to call the reorg manager straight away - dispute_in_chain, _ = check_tx_in_chain(bitcoin_cli, job.dispute_txid, debug, logging, - parent='Responder', - tx_label='dispute tx') + dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, parent='Responder', tx_label='dispute tx') # If the dispute is there, we can check the justice tx if dispute_in_chain: - justice_in_chain, justice_confirmations = check_tx_in_chain(bitcoin_cli, job.justice_txid, debug, - logging, parent='Responder', + justice_in_chain, justice_confirmations = check_tx_in_chain(job.justice_txid, parent='Responder', tx_label='justice tx') # If both transactions are there, we only need to update the justice tx confirmation count if justice_in_chain: - if debug: - logging.info("[Responder] updating confirmation count for {}: prev. {}, current {}".format( + logging.info("[Responder] updating confirmation count for {}: prev. {}, current {}".format( job.justice_txid, job.confirmations, justice_confirmations)) job.confirmations = justice_confirmations @@ -258,9 +235,7 @@ class Responder: # DISCUSS: Adding job back, should we flag it as retried? # FIXME: Whether we decide to increase the retried counter or not, the current counter should be # maintained. There is no way of doing so with the current approach. Update if required - self.add_response(uuid, job.dispute_txid, job.justice_txid, job.justice_rawtx, - job.appointment_end, - debug, logging) + self.add_response(uuid, job.dispute_txid, job.justice_txid, job.justice_rawtx, job.appointment_end) else: # ToDo: #24-properly-handle-reorgs diff --git a/pisa/tools.py b/pisa/tools.py index 42ad73f..3162ee2 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -1,10 +1,12 @@ import re -from pisa.utils.authproxy import JSONRPCException +import pisa.conf as conf +from pisa import logging, bitcoin_cli +from pisa.utils.auth_proxy import JSONRPCException from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY from http.client import HTTPException -def check_tx_in_chain(bitcoin_cli, tx_id, debug, logging, parent='', tx_label='transaction'): +def check_tx_in_chain(tx_id, parent='', tx_label='transaction'): tx_in_chain = False confirmations = 0 @@ -14,22 +16,23 @@ def check_tx_in_chain(bitcoin_cli, tx_id, debug, logging, parent='', tx_label='t if tx_info.get("confirmations"): confirmations = int(tx_info.get("confirmations")) tx_in_chain = True - if debug: - logging.error("[{}] {} found in the blockchain (txid: {}) ".format(parent, tx_label, tx_id)) - elif debug: + logging.error("[{}] {} found in the blockchain (txid: {}) ".format(parent, tx_label, tx_id)) + + else: logging.error("[{}] {} found in mempool (txid: {}) ".format(parent, tx_label, tx_id)) + except JSONRPCException as e: if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - if debug: - logging.error("[{}] {} not found in mempool nor blockchain (txid: {}) ".format(parent, tx_label, tx_id)) - elif debug: + logging.error("[{}] {} not found in mempool nor blockchain (txid: {}) ".format(parent, tx_label, tx_id)) + + else: # ToDO: Unhandled errors, check this properly logging.error("[{}] JSONRPCException. Error code {}".format(parent, e)) return tx_in_chain, confirmations -def can_connect_to_bitcoind(bitcoin_cli): +def can_connect_to_bitcoind(): can_connect = True try: @@ -40,18 +43,18 @@ def can_connect_to_bitcoind(bitcoin_cli): return can_connect -def in_correct_network(bitcoin_cli, network): +def in_correct_network(): mainnet_genesis_block_hash = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" testnet3_genesis_block_hash = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943" correct_network = False genesis_block_hash = bitcoin_cli.getblockhash(0) - if network == 'mainnet' and genesis_block_hash == mainnet_genesis_block_hash: + if conf.BTC_NETWORK == 'mainnet' and genesis_block_hash == mainnet_genesis_block_hash: correct_network = True - elif network == 'testnet' and genesis_block_hash == testnet3_genesis_block_hash: + elif conf.BTC_NETWORK == 'testnet' and genesis_block_hash == testnet3_genesis_block_hash: correct_network = True - elif network == 'regtest' and genesis_block_hash not in [mainnet_genesis_block_hash, testnet3_genesis_block_hash]: + elif conf.BTC_NETWORK == 'regtest' and genesis_block_hash not in [mainnet_genesis_block_hash, testnet3_genesis_block_hash]: correct_network = True return correct_network diff --git a/pisa/utils/authproxy.py b/pisa/utils/auth_proxy.py similarity index 100% rename from pisa/utils/authproxy.py rename to pisa/utils/auth_proxy.py diff --git a/pisa/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py similarity index 84% rename from pisa/zmq_subscriber.py rename to pisa/utils/zmq_subscriber.py index 90e706c..9ff9043 100644 --- a/pisa/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -1,7 +1,9 @@ import zmq import binascii +from pisa import logging from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT + # ToDo: #7-add-async-back-to-zmq class ZMQHandler: """ Adapted from https://github.com/bitcoin/bitcoin/blob/master/contrib/zmq/zmq_sub.py""" @@ -14,7 +16,7 @@ class ZMQHandler: self.parent = parent self.terminate = False - def handle(self, block_queue, debug, logging): + def handle(self, block_queue): while not self.terminate: msg = self.zmqSubSocket.recv_multipart() @@ -27,5 +29,4 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - if debug: - logging.info("[ZMQHandler-{}] new block received via ZMQ".format(self.parent, block_hash)) + logging.info("[ZMQHandler-{}] new block received via ZMQ".format(self.parent, block_hash)) diff --git a/pisa/watcher.py b/pisa/watcher.py index 680db56..2f517c1 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -1,12 +1,13 @@ from binascii import hexlify, unhexlify from queue import Queue from threading import Thread +from pisa import logging, bitcoin_cli from pisa.responder import Responder -from pisa.zmq_subscriber import ZMQHandler -from pisa.utils.authproxy import AuthServiceProxy, JSONRPCException +from pisa.utils.zmq_subscriber import ZMQHandler +from pisa.utils.auth_proxy import JSONRPCException from hashlib import sha256 from uuid import uuid4 -from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MAX_APPOINTMENTS, EXPIRY_DELTA +from pisa.conf import MAX_APPOINTMENTS, EXPIRY_DELTA class Watcher: @@ -19,7 +20,7 @@ class Watcher: self.zmq_subscriber = None self.responder = Responder() - def add_appointment(self, appointment, debug, logging): + def add_appointment(self, appointment): # Rationale: # The Watcher will analyze every received block looking for appointment matches. If there is no work # to do the watcher can go sleep (if appointments = {} then asleep = True) otherwise for every received block @@ -45,36 +46,30 @@ class Watcher: if self.asleep: self.asleep = False self.block_queue = Queue() - zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue, debug, logging]) - watcher = Thread(target=self.do_watch, args=[debug, logging]) + zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue]) + watcher = Thread(target=self.do_watch) zmq_thread.start() watcher.start() - if debug: - logging.info("[Watcher] waking up!") + logging.info("[Watcher] waking up!") appointment_added = True - if debug: - logging.info('[Watcher] new appointment accepted (locator = {})'.format(appointment.locator)) + logging.info('[Watcher] new appointment accepted (locator = {})'.format(appointment.locator)) else: appointment_added = False - if debug: - logging.info('[Watcher] maximum appointments reached, appointment rejected (locator = {})' - .format(appointment.locator)) + logging.info('[Watcher] maximum appointments reached, appointment rejected (locator = {})'.format( + appointment.locator)) return appointment_added - def do_subscribe(self, block_queue, debug, logging): + def do_subscribe(self, block_queue): self.zmq_subscriber = ZMQHandler(parent='Watcher') - self.zmq_subscriber.handle(block_queue, debug, logging) - - def do_watch(self, debug, logging): - bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, - BTC_RPC_PORT)) + self.zmq_subscriber.handle(block_queue) + def do_watch(self): while len(self.appointments) > 0: block_hash = self.block_queue.get() @@ -82,11 +77,10 @@ class Watcher: block = bitcoin_cli.getblock(block_hash) txids = block.get('tx') - if debug: - logging.info("[Watcher] new block received {}".format(block_hash)) - logging.info("[Watcher] list of transactions: {}".format(txids)) + logging.info("[Watcher] new block received {}".format(block_hash)) + logging.info("[Watcher] list of transactions: {}".format(txids)) - self.delete_expired_appointment(block, debug, logging) + self.delete_expired_appointment(block) potential_locators = {sha256(unhexlify(txid)).hexdigest(): txid for txid in txids} @@ -95,21 +89,19 @@ class Watcher: intersection = set(self.locator_uuid_map.keys()).intersection(potential_locators.keys()) potential_matches = {locator: potential_locators[locator] for locator in intersection} - if debug: - if len(potential_matches) > 0: - logging.info("[Watcher] list of potential matches: {}".format(potential_matches)) - else: - logging.info("[Watcher] no potential matches found") + if len(potential_matches) > 0: + logging.info("[Watcher] list of potential matches: {}".format(potential_matches)) + else: + logging.info("[Watcher] no potential matches found") - matches = self.check_potential_matches(potential_matches, bitcoin_cli, debug, logging) + matches = self.check_potential_matches(potential_matches) for locator, uuid, dispute_txid, justice_txid, justice_rawtx in matches: - if debug: - logging.info("[Watcher] notifying responder about {} and deleting appointment {} (uuid: {})" - .format(justice_txid, locator, uuid)) + logging.info("[Watcher] notifying responder about {} and deleting appointment {} (uuid: {})" + .format(justice_txid, locator, uuid)) self.responder.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, - self.appointments[uuid].end_time, debug, logging) + self.appointments[uuid].end_time) # Delete the appointment self.appointments.pop(uuid) @@ -124,17 +116,15 @@ class Watcher: self.locator_uuid_map[locator].remove(uuid) except JSONRPCException as e: - if debug: - logging.error("[Watcher] couldn't get block from bitcoind. Error code {}".format(e)) + logging.error("[Watcher] couldn't get block from bitcoind. Error code {}".format(e)) # Go back to sleep if there are no more appointments self.asleep = True self.zmq_subscriber.terminate = True - if debug: - logging.error("[Watcher] no more pending appointments, going back to sleep") + logging.error("[Watcher] no more pending appointments, going back to sleep") - def delete_expired_appointment(self, block, debug, logging): + def delete_expired_appointment(self, block): to_delete = [uuid for uuid, appointment in self.appointments.items() if block["height"] > appointment.end_time + EXPIRY_DELTA] @@ -150,30 +140,26 @@ class Watcher: else: self.locator_uuid_map[locator].remove(uuid) - if debug: - logging.info("[Watcher] end time reached with no match! Deleting appointment {} (uuid: {})" - .format(locator, uuid)) + logging.info("[Watcher] end time reached with no match! Deleting appointment {} (uuid: {})".format(locator, + uuid)) - def check_potential_matches(self, potential_matches, bitcoin_cli, debug, logging): + def check_potential_matches(self, potential_matches): matches = [] for locator, dispute_txid in potential_matches.items(): for uuid in self.locator_uuid_map[locator]: try: # ToDo: #20-test-tx-decrypting-edge-cases - justice_rawtx = self.appointments[uuid].encrypted_blob.decrypt(unhexlify(dispute_txid), debug, - logging) + justice_rawtx = self.appointments[uuid].encrypted_blob.decrypt(unhexlify(dispute_txid)) justice_rawtx = hexlify(justice_rawtx).decode() justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - if debug: - logging.info("[Watcher] match found for locator {} (uuid: {}): {}".format(locator, uuid, - justice_txid)) + logging.info("[Watcher] match found for locator {} (uuid: {}): {}".format(locator, uuid, + justice_txid)) except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple # for the POC - if debug: - logging.error("[Watcher] can't build transaction from decoded data. Error code {}".format(e)) + logging.error("[Watcher] can't build transaction from decoded data. Error code {}".format(e)) return matches From e9832e44080502abd7fb2b3896da6b860b7dc0c0 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 3 Oct 2019 11:47:07 +0100 Subject: [PATCH 02/82] More clean up --- pisa/__init__.py | 4 +++- pisa/api.py | 6 ++++-- pisa/appointment.py | 2 -- pisa/encrypted_blob.py | 2 +- pisa/inspector.py | 3 ++- pisa/pisad.py | 1 + pisa/tools.py | 3 ++- 7 files changed, 13 insertions(+), 8 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index 70e02c2..a279a5c 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -1,6 +1,8 @@ +import logging + from pisa.utils.auth_proxy import AuthServiceProxy import pisa.conf as conf -import logging + HOST = 'localhost' PORT = 9814 diff --git a/pisa/api.py b/pisa/api.py index 8f26037..660e5f0 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,9 +1,11 @@ +import json +from flask import Flask, request, Response, abort, jsonify + from pisa import HOST, PORT, logging, bitcoin_cli from pisa.watcher import Watcher from pisa.inspector import Inspector from pisa.appointment import Appointment -from flask import Flask, request, Response, abort, jsonify -import json + # ToDo: #5-add-async-to-api app = Flask(__name__) diff --git a/pisa/appointment.py b/pisa/appointment.py index f58538b..a4d5718 100644 --- a/pisa/appointment.py +++ b/pisa/appointment.py @@ -21,5 +21,3 @@ class Appointment: # ToDO: #3-improve-appointment-structure - - diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index 49fd4af..5c1b78f 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -1,5 +1,5 @@ -from binascii import unhexlify, hexlify from hashlib import sha256 +from binascii import unhexlify, hexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM diff --git a/pisa/inspector.py b/pisa/inspector.py index 5b23cb3..25bb344 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -1,6 +1,7 @@ import re -import pisa.conf as conf + from pisa import errors +import pisa.conf as conf from pisa import logging, bitcoin_cli from pisa.appointment import Appointment from pisa.utils.auth_proxy import JSONRPCException diff --git a/pisa/pisad.py b/pisa/pisad.py index bd1cc82..8fbce83 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,5 +1,6 @@ from sys import argv from getopt import getopt + from pisa import logging from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network diff --git a/pisa/tools.py b/pisa/tools.py index 3162ee2..909c64c 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -1,9 +1,10 @@ import re +from http.client import HTTPException + import pisa.conf as conf from pisa import logging, bitcoin_cli from pisa.utils.auth_proxy import JSONRPCException from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY -from http.client import HTTPException def check_tx_in_chain(tx_id, parent='', tx_label='transaction'): From 5ba6fcb9ef535e04e3cc54f434b32fd12a092355 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 3 Oct 2019 11:49:24 +0100 Subject: [PATCH 03/82] Improves modularity of the code Adds cleaner and block processor to increase the modularity and reuse of the code --- pisa/block_processor.py | 57 +++++++++++++++++++++++++++++++++++++++++ pisa/cleaner.py | 45 ++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 pisa/block_processor.py create mode 100644 pisa/cleaner.py diff --git a/pisa/block_processor.py b/pisa/block_processor.py new file mode 100644 index 0000000..b6d9882 --- /dev/null +++ b/pisa/block_processor.py @@ -0,0 +1,57 @@ +import binascii +from hashlib import sha256 + +from pisa import logging, bitcoin_cli +from pisa.utils.auth_proxy import JSONRPCException + + +class BlockProcessor: + @staticmethod + def getblock(block_hash): + block = None + + try: + block = bitcoin_cli.getblock(block_hash) + + except JSONRPCException as e: + logging.error("[BlockProcessor] couldn't get block from bitcoind. Error code {}".format(e)) + + return block + + @staticmethod + def get_potential_matches(txids, locator_uuid_map): + potential_locators = {sha256(binascii.unhexlify(txid)).hexdigest(): txid for txid in txids} + + # Check is any of the tx_ids in the received block is an actual match + intersection = set(locator_uuid_map.keys()).intersection(potential_locators.keys()) + potential_matches = {locator: potential_locators[locator] for locator in intersection} + + if len(potential_matches) > 0: + logging.info("[BlockProcessor] list of potential matches: {}".format(potential_matches)) + + else: + logging.info("[BlockProcessor] no potential matches found") + + @staticmethod + def get_matches(potential_matches, locator_uuid_map, appointments): + matches = [] + + for locator, dispute_txid in potential_matches.items(): + for uuid in locator_uuid_map[locator]: + try: + # ToDo: #20-test-tx-decrypting-edge-cases + justice_rawtx = appointments[uuid].encrypted_blob.decrypt(binascii.unhexlify(dispute_txid)) + justice_rawtx = binascii.hexlify(justice_rawtx).decode() + justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') + matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) + + logging.info("[BlockProcessor] match found for locator {} (uuid: {}): {}".format( + locator, uuid, justice_txid)) + + except JSONRPCException as e: + # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple + # for the POC + logging.error("[BlockProcessor] can't build transaction from decoded data. Error code {}".format(e)) + + return matches + diff --git a/pisa/cleaner.py b/pisa/cleaner.py new file mode 100644 index 0000000..d7808b9 --- /dev/null +++ b/pisa/cleaner.py @@ -0,0 +1,45 @@ +import pisa.conf as conf +from pisa import logging + + +class Cleaner: + @staticmethod + def delete_expired_appointment(block, appointments, locator_uuid_map): + to_delete = [uuid for uuid, appointment in appointments.items() + if block["height"] > appointment.end_time + conf.EXPIRY_DELTA] + + for uuid in to_delete: + locator = appointments[uuid].locator + + appointments.pop(uuid) + + if len(locator_uuid_map[locator]) == 1: + locator_uuid_map.pop(locator) + + else: + locator_uuid_map[locator].remove(uuid) + + logging.info("[Cleaner] end time reached with no match! Deleting appointment {} (uuid: {})".format(locator, + uuid)) + + return appointments, locator_uuid_map + + @staticmethod + def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height): + for uuid in completed_jobs: + logging.info("[Cleaner] job completed (uuid = {}). Appointment ended at block {} after {} confirmations" + .format(uuid, jobs[uuid].justice_txid, height, jobs[uuid].confirmations)) + + # ToDo: #9-add-data-persistence + justice_txid = jobs[uuid].justice_txid + jobs.pop(uuid) + + if len(tx_job_map[justice_txid]) == 1: + tx_job_map.pop(justice_txid) + + logging.info("[Cleaner] no more jobs for justice_txid {}".format(justice_txid)) + + else: + tx_job_map[justice_txid].remove(uuid) + + return jobs, tx_job_map From 3e0cca14d7b88af5a108fe1ac4f25569cf68c6d5 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 3 Oct 2019 11:49:49 +0100 Subject: [PATCH 04/82] Clean up + higher modular design --- pisa/responder.py | 63 ++++++++++++++++----------------- pisa/watcher.py | 88 +++++++++++------------------------------------ 2 files changed, 50 insertions(+), 101 deletions(-) diff --git a/pisa/responder.py b/pisa/responder.py index c4b6616..e277eea 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -2,9 +2,12 @@ from queue import Queue from threading import Thread from hashlib import sha256 from binascii import unhexlify -from pisa import logging, bitcoin_cli + from pisa.rpc_errors import * +from pisa.cleaner import Cleaner +from pisa import logging, bitcoin_cli from pisa.tools import check_tx_in_chain +from pisa.block_processor import BlockProcessor from pisa.utils.zmq_subscriber import ZMQHandler from pisa.utils.auth_proxy import JSONRPCException @@ -90,12 +93,16 @@ class Responder: self.zmq_subscriber.handle(block_queue) def handle_responses(self): + # ToDo: #9-add-data-persistence + # change prev_block_hash to the last known tip when bootstrapping prev_block_hash = 0 + while len(self.jobs) > 0: # We get notified for every new received block block_hash = self.block_queue.get() + block = BlockProcessor.getblock(block_hash) - try: + if block is not None: block = bitcoin_cli.getblock(block_hash) txs = block.get('tx') height = block.get('height') @@ -104,12 +111,13 @@ class Responder: logging.info("[Responder] prev. block hash {}".format(block.get('previousblockhash'))) logging.info("[Responder] list of transactions: {}".format(txs)) - except JSONRPCException as e: - logging.error("[Responder] couldn't get block from bitcoind. Error code {}".format(e)) - + else: continue completed_jobs = [] + jobs_to_rebroadcast = [] + # ToDo: #9-add-data-persistence + # change prev_block_hash condition if prev_block_hash == block.get('previousblockhash') or prev_block_hash == 0: # Keep count of the confirmations each tx gets for justice_txid, jobs in self.tx_job_map.items(): @@ -121,15 +129,8 @@ class Responder: uuid, justice_txid)) elif self.jobs[uuid].missed_confirmations >= CONFIRMATIONS_BEFORE_RETRY: - # If a transactions has missed too many confirmations for a while we'll try to rebroadcast - # ToDO: #22-discuss-confirmations-before-retry - # ToDo: #23-define-behaviour-approaching-end - self.add_response(uuid, self.jobs[uuid].dispute_txid, justice_txid, - self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, - retry=True) - - logging.warning("[Responder] txid = {} has missed {} confirmations. Rebroadcasting" - .format(justice_txid, CONFIRMATIONS_BEFORE_RETRY)) + # If a transactions has missed too many confirmations we add it to the rebroadcast list + jobs_to_rebroadcast.append(uuid) else: # Otherwise we increase the number of missed confirmations @@ -140,7 +141,10 @@ class Responder: # The end of the appointment has been reached completed_jobs.append(uuid) - self.remove_completed_jobs(completed_jobs, height) + self.jobs, self.tx_job_map = Cleaner.delete_completed_jobs(self.jobs, self.tx_job_map, completed_jobs, + height) + + self.rebroadcast(jobs_to_rebroadcast) else: logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}" @@ -156,6 +160,17 @@ class Responder: logging.info("[Responder] no more pending jobs, going back to sleep") + def rebroadcast(self, jobs_to_rebroadcast): + # ToDO: #22-discuss-confirmations-before-retry + # ToDo: #23-define-behaviour-approaching-end + + for uuid in jobs_to_rebroadcast: + self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, + self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) + + logging.warning("[Responder] txid = {} has missed {} confirmations. Rebroadcasting" + .format(self.jobs[uuid].justice_txid, CONFIRMATIONS_BEFORE_RETRY)) + def handle_send_failures(self, e, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry): # Since we're pushing a raw transaction to the network we can get two kind of rejections: # RPC_VERIFY_REJECTED and RPC_VERIFY_ALREADY_IN_CHAIN. The former implies that the transaction is rejected @@ -194,24 +209,6 @@ class Responder: # If something else happens (unlikely but possible) log it so we can treat it in future releases logging.error("[Responder] JSONRPCException. Error {}".format(e)) - def remove_completed_jobs(self, completed_jobs, height): - for uuid in completed_jobs: - logging.info("[Responder] job completed (uuid = {}, justice_txid = {}). Appointment ended at " - "block {} after {} confirmations".format(uuid, self.jobs[uuid].justice_txid, height, - self.jobs[uuid].confirmations)) - - # ToDo: #9-add-data-persistency - justice_txid = self.jobs[uuid].justice_txid - self.jobs.pop(uuid) - - if len(self.tx_job_map[justice_txid]) == 1: - self.tx_job_map.pop(justice_txid) - - logging.info("[Responder] no more jobs for justice_txid {}".format(justice_txid)) - - else: - self.tx_job_map[justice_txid].remove(uuid) - def handle_reorgs(self): for uuid, job in self.jobs.items(): # First we check if the dispute transaction is still in the blockchain. If not, the justice can not be diff --git a/pisa/watcher.py b/pisa/watcher.py index 2f517c1..bbf066c 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -1,13 +1,17 @@ -from binascii import hexlify, unhexlify +from uuid import uuid4 from queue import Queue from threading import Thread -from pisa import logging, bitcoin_cli + +from pisa import logging from pisa.responder import Responder +from pisa.conf import MAX_APPOINTMENTS +from pisa.block_processor import BlockProcessor +from pisa.cleaner import Cleaner from pisa.utils.zmq_subscriber import ZMQHandler -from pisa.utils.auth_proxy import JSONRPCException -from hashlib import sha256 -from uuid import uuid4 -from pisa.conf import MAX_APPOINTMENTS, EXPIRY_DELTA + + +# WIP: MOVED BLOCKCHAIN RELATED TASKS TO BLOCK PROCESSOR IN AN AIM TO MAKE THE CODE MORE MODULAR. THIS SHOULD HELP +# WITH CODE REUSE WHEN MERGING THE DATA PERSISTENCE PART. class Watcher: @@ -72,29 +76,20 @@ class Watcher: def do_watch(self): while len(self.appointments) > 0: block_hash = self.block_queue.get() + logging.info("[Watcher] new block received {}".format(block_hash)) - try: - block = bitcoin_cli.getblock(block_hash) + block = BlockProcessor.getblock(block_hash) + + if block is not None: txids = block.get('tx') - logging.info("[Watcher] new block received {}".format(block_hash)) logging.info("[Watcher] list of transactions: {}".format(txids)) - self.delete_expired_appointment(block) + self.appointments, self.locator_uuid_map = Cleaner.delete_expired_appointment( + block, self.appointments, self.locator_uuid_map) - potential_locators = {sha256(unhexlify(txid)).hexdigest(): txid for txid in txids} - - # Check is any of the tx_ids in the received block is an actual match - # Get the locators that are both in the map and in the potential locators dict. - intersection = set(self.locator_uuid_map.keys()).intersection(potential_locators.keys()) - potential_matches = {locator: potential_locators[locator] for locator in intersection} - - if len(potential_matches) > 0: - logging.info("[Watcher] list of potential matches: {}".format(potential_matches)) - else: - logging.info("[Watcher] no potential matches found") - - matches = self.check_potential_matches(potential_matches) + potential_matches = BlockProcessor.get_potential_matches(txids, self.locator_uuid_map) + matches = BlockProcessor.get_matches(potential_matches, self.locator_uuid_map, self.appointments) for locator, uuid, dispute_txid, justice_txid, justice_rawtx in matches: logging.info("[Watcher] notifying responder about {} and deleting appointment {} (uuid: {})" @@ -108,58 +103,15 @@ class Watcher: # If there was only one appointment that matches the locator we can delete the whole list if len(self.locator_uuid_map[locator]) == 1: - # ToDo: #9-add-data-persistency + # ToDo: #9-add-data-persistence self.locator_uuid_map.pop(locator) else: # Otherwise we just delete the appointment that matches locator:appointment_pos - # ToDo: #9-add-data-persistency + # ToDo: #9-add-data-persistence self.locator_uuid_map[locator].remove(uuid) - except JSONRPCException as e: - logging.error("[Watcher] couldn't get block from bitcoind. Error code {}".format(e)) - # Go back to sleep if there are no more appointments self.asleep = True self.zmq_subscriber.terminate = True logging.error("[Watcher] no more pending appointments, going back to sleep") - - def delete_expired_appointment(self, block): - to_delete = [uuid for uuid, appointment in self.appointments.items() if block["height"] > appointment.end_time - + EXPIRY_DELTA] - - for uuid in to_delete: - # ToDo: #9-add-data-persistency - locator = self.appointments[uuid].locator - - self.appointments.pop(uuid) - - if len(self.locator_uuid_map[locator]) == 1: - self.locator_uuid_map.pop(locator) - - else: - self.locator_uuid_map[locator].remove(uuid) - - logging.info("[Watcher] end time reached with no match! Deleting appointment {} (uuid: {})".format(locator, - uuid)) - - def check_potential_matches(self, potential_matches): - matches = [] - - for locator, dispute_txid in potential_matches.items(): - for uuid in self.locator_uuid_map[locator]: - try: - # ToDo: #20-test-tx-decrypting-edge-cases - justice_rawtx = self.appointments[uuid].encrypted_blob.decrypt(unhexlify(dispute_txid)) - justice_rawtx = hexlify(justice_rawtx).decode() - justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') - matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - - logging.info("[Watcher] match found for locator {} (uuid: {}): {}".format(locator, uuid, - justice_txid)) - except JSONRPCException as e: - # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple - # for the POC - logging.error("[Watcher] can't build transaction from decoded data. Error code {}".format(e)) - - return matches From 9a37b211a006495825c55c2d3f924ca8930b0429 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 4 Oct 2019 13:49:48 +0100 Subject: [PATCH 05/82] Adds Carrier to take care of sending/getting transactions --- pisa/carrier.py | 77 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 pisa/carrier.py diff --git a/pisa/carrier.py b/pisa/carrier.py new file mode 100644 index 0000000..0526f5c --- /dev/null +++ b/pisa/carrier.py @@ -0,0 +1,77 @@ +from pisa.utils.auth_proxy import JSONRPCException +from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION +from pisa import logging, bitcoin_cli +from pisa.rpc_errors import * + + +class Carrier: + class Receipt: + def __init__(self, delivered, confirmations=0, reason=None): + self.delivered = delivered + self.confirmations = confirmations + self.reason = reason + + def send_transaction(self, rawtx, txid): + try: + logging.info("[Carrier] pushing transaction to the network (txid: {})".format(rawtx)) + bitcoin_cli.sendrawtransaction(rawtx) + + receipt = self.Receipt(delivered=True) + + except JSONRPCException as e: + errno = e.error.get('code') + # Since we're pushing a raw transaction to the network we can get two kind of rejections: + # RPC_VERIFY_REJECTED and RPC_VERIFY_ALREADY_IN_CHAIN. The former implies that the transaction is rejected + # due to network rules, whereas the later implies that the transaction is already in the blockchain. + if errno == RPC_VERIFY_REJECTED: + # DISCUSS: what to do in this case + # DISCUSS: invalid transactions (properly formatted but invalid, like unsigned) fit here too. + # DISCUSS: RPC_VERIFY_ERROR could also be a possible case. + # DISCUSS: check errors -9 and -10 + # TODO: UNKNOWN_JSON_RPC_EXCEPTION is not the proper exception here. This is long due. + receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) + + elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: + logging.info("[Carrier] {} is already in the blockchain. Getting confirmation count".format(txid)) + + # If the transaction is already in the chain, we get the number of confirmations and watch the job + # until the end of the appointment + tx_info = self.get_transaction(txid) + + if tx_info is not None: + confirmations = int(tx_info.get("confirmations")) + receipt = self.Receipt(delivered=True, confirmations=confirmations) + + else: + # There's a really unlike edge case where a transaction can be reorged between receiving the + # notification and querying the data. In such a case we just resend + self.send_transaction(rawtx, txid) + + else: + # If something else happens (unlikely but possible) log it so we can treat it in future releases + logging.error("[Responder] JSONRPCException. Error {}".format(e)) + receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) + + return receipt + + @staticmethod + def get_transaction(txid): + tx_info = None + + try: + tx_info = bitcoin_cli.getrawtransaction(txid, 1) + + except JSONRPCException as e: + # While it's quite unlikely, the transaction that was already in the blockchain could have been + # reorged while we were querying bitcoind to get the confirmation count. In such a case we just + # restart the job + if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: + logging.info("[Carrier] transaction {} got reorged before obtaining information".format(txid)) + + # TODO: Check RPC methods to see possible returns and avoid general else + # else: + # # If something else happens (unlikely but possible) log it so we can treat it in future releases + # logging.error("[Responder] JSONRPCException. Error {}".format(e)) + + return tx_info + From 3ed9ccd466cdc7cfa93eab0f25754ed0b33a2f71 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 4 Oct 2019 13:50:43 +0100 Subject: [PATCH 06/82] Adds additional functionality that will be needed when deploying data persistence --- pisa/block_processor.py | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/pisa/block_processor.py b/pisa/block_processor.py index b6d9882..b047cc3 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -7,7 +7,7 @@ from pisa.utils.auth_proxy import JSONRPCException class BlockProcessor: @staticmethod - def getblock(block_hash): + def get_block(block_hash): block = None try: @@ -18,6 +18,18 @@ class BlockProcessor: return block + @staticmethod + def get_best_block_hash(): + block_hash = None + + try: + block_hash = bitcoin_cli.getbestblockhash() + + except JSONRPCException as e: + logging.error("[BlockProcessor] couldn't get block hash. Error code {}".format(e)) + + return block_hash + @staticmethod def get_potential_matches(txids, locator_uuid_map): potential_locators = {sha256(binascii.unhexlify(txid)).hexdigest(): txid for txid in txids} @@ -55,3 +67,24 @@ class BlockProcessor: return matches + @staticmethod + def check_confirmations(txs, unconfirmed_txs, tx_job_map, missed_confirmations): + + for tx in txs: + if tx in tx_job_map and tx in unconfirmed_txs: + unconfirmed_txs.remove(tx) + + logging.info("[Responder] confirmation received for tx {}".format(tx)) + + elif tx in unconfirmed_txs: + if tx in missed_confirmations: + missed_confirmations[tx] += 1 + + else: + missed_confirmations[tx] = 1 + + logging.info("[Responder] tx {} missed a confirmation (total missed: {})" + .format(tx, missed_confirmations[tx])) + + return unconfirmed_txs, missed_confirmations + From 47044625022c7781b497c31669bd4abcdc38887f Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 4 Oct 2019 13:52:51 +0100 Subject: [PATCH 07/82] Increases responder modularity The responder had way too complex functions. Separate them into smaller / more specific ones to increse modularity and code reuse. --- pisa/responder.py | 172 +++++++++++++++++++--------------------------- 1 file changed, 69 insertions(+), 103 deletions(-) diff --git a/pisa/responder.py b/pisa/responder.py index e277eea..0f04662 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -3,27 +3,24 @@ from threading import Thread from hashlib import sha256 from binascii import unhexlify -from pisa.rpc_errors import * from pisa.cleaner import Cleaner -from pisa import logging, bitcoin_cli +from pisa.carrier import Carrier +from pisa import logging from pisa.tools import check_tx_in_chain from pisa.block_processor import BlockProcessor from pisa.utils.zmq_subscriber import ZMQHandler -from pisa.utils.auth_proxy import JSONRPCException CONFIRMATIONS_BEFORE_RETRY = 6 MIN_CONFIRMATIONS = 6 class Job: - def __init__(self, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations=0, retry_counter=0): + def __init__(self, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry_counter=0): self.dispute_txid = dispute_txid self.justice_txid = justice_txid self.justice_rawtx = justice_rawtx self.appointment_end = appointment_end - self.confirmations = confirmations - self.missed_confirmations = 0 self.retry_counter = retry_counter # FIXME: locator is here so we can give info about jobs for now. It can be either passed from watcher or info @@ -31,8 +28,7 @@ class Job: self.locator = sha256(unhexlify(dispute_txid)).hexdigest() def to_json(self): - job = {"locator": self.locator, "justice_rawtx": self.justice_rawtx, "confirmations": self.confirmations, - "appointment_end": self.appointment_end} + job = {"locator": self.locator, "justice_rawtx": self.justice_rawtx, "appointment_end": self.appointment_end} return job @@ -41,25 +37,28 @@ class Responder: def __init__(self): self.jobs = dict() self.tx_job_map = dict() + self.unconfirmed_txs = [] + self.missed_confirmations = dict() self.block_queue = None self.asleep = True self.zmq_subscriber = None def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=False): + if self.asleep: + logging.info("[Responder] waking up!") - try: - if self.asleep: - logging.info("[Responder] waking up!") - logging.info("[Responder] pushing transaction to the network (txid: {})".format(justice_txid)) + carrier = Carrier() + receipt = carrier.send_transaction(justice_rawtx, justice_txid) - bitcoin_cli.sendrawtransaction(justice_rawtx) - - # handle_responses can call add_response recursively if a broadcast transaction does not get confirmations + if receipt.delivered: + # do_watch can call add_response recursively if a broadcast transaction does not get confirmations # retry holds such information. - self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry) + self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry, + confirmations=receipt.confirmations) - except JSONRPCException as e: - self.handle_send_failures(e, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry) + else: + # TODO: Add the missing reasons (e.g. RPC_VERIFY_REJECTED) + pass def create_job(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations=0, retry=False): @@ -68,6 +67,7 @@ class Responder: if retry: self.jobs[uuid].retry_counter += 1 self.jobs[uuid].missed_confirmations = 0 + else: self.jobs[uuid] = Job(dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations) @@ -77,6 +77,9 @@ class Responder: else: self.tx_job_map[justice_txid] = [uuid] + if confirmations == 0: + self.unconfirmed_txs.append(justice_txid) + logging.info('[Responder] new job added (dispute txid = {}, justice txid = {}, appointment end = {})' .format(dispute_txid, justice_txid, appointment_end)) @@ -84,7 +87,7 @@ class Responder: self.asleep = False self.block_queue = Queue() zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue]) - responder = Thread(target=self.handle_responses) + responder = Thread(target=self.do_watch) zmq_thread.start() responder.start() @@ -92,7 +95,7 @@ class Responder: self.zmq_subscriber = ZMQHandler(parent='Responder') self.zmq_subscriber.handle(block_queue) - def handle_responses(self): + def do_watch(self): # ToDo: #9-add-data-persistence # change prev_block_hash to the last known tip when bootstrapping prev_block_hash = 0 @@ -100,10 +103,9 @@ class Responder: while len(self.jobs) > 0: # We get notified for every new received block block_hash = self.block_queue.get() - block = BlockProcessor.getblock(block_hash) + block = BlockProcessor.get_block(block_hash) if block is not None: - block = bitcoin_cli.getblock(block_hash) txs = block.get('tx') height = block.get('height') @@ -111,48 +113,25 @@ class Responder: logging.info("[Responder] prev. block hash {}".format(block.get('previousblockhash'))) logging.info("[Responder] list of transactions: {}".format(txs)) - else: - continue + # ToDo: #9-add-data-persistence + # change prev_block_hash condition + if prev_block_hash == block.get('previousblockhash') or prev_block_hash == 0: + self.unconfirmed_txs, self.missed_confirmations = BlockProcessor.check_confirmations( + txs, self.unconfirmed_txs, self.tx_job_map, self.missed_confirmations) - completed_jobs = [] - jobs_to_rebroadcast = [] - # ToDo: #9-add-data-persistence - # change prev_block_hash condition - if prev_block_hash == block.get('previousblockhash') or prev_block_hash == 0: - # Keep count of the confirmations each tx gets - for justice_txid, jobs in self.tx_job_map.items(): - for uuid in jobs: - if justice_txid in txs or self.jobs[uuid].confirmations > 0: - self.jobs[uuid].confirmations += 1 + txs_to_rebroadcast = self.get_txs_to_rebroadcast(txs) + self.jobs, self.tx_job_map = Cleaner.delete_completed_jobs(self.jobs, self.tx_job_map, + self.get_completed_jobs(height), height) - logging.info("[Responder] new confirmation received for job = {}, txid = {}".format( - uuid, justice_txid)) + self.rebroadcast(txs_to_rebroadcast) - elif self.jobs[uuid].missed_confirmations >= CONFIRMATIONS_BEFORE_RETRY: - # If a transactions has missed too many confirmations we add it to the rebroadcast list - jobs_to_rebroadcast.append(uuid) + else: + logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}" + .format(prev_block_hash, block.get('previousblockhash'))) - else: - # Otherwise we increase the number of missed confirmations - self.jobs[uuid].missed_confirmations += 1 + self.handle_reorgs() - if self.jobs[uuid].appointment_end <= height and self.jobs[uuid].confirmations >= \ - MIN_CONFIRMATIONS: - # The end of the appointment has been reached - completed_jobs.append(uuid) - - self.jobs, self.tx_job_map = Cleaner.delete_completed_jobs(self.jobs, self.tx_job_map, completed_jobs, - height) - - self.rebroadcast(jobs_to_rebroadcast) - - else: - logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}" - .format(prev_block_hash, block.get('previousblockhash'))) - - self.handle_reorgs() - - prev_block_hash = block.get('hash') + prev_block_hash = block.get('hash') # Go back to sleep if there are no more jobs self.asleep = True @@ -160,55 +139,43 @@ class Responder: logging.info("[Responder] no more pending jobs, going back to sleep") + def get_txs_to_rebroadcast(self, txs): + txs_to_rebroadcast = [] + + for tx in txs: + if self.missed_confirmations[tx] >= CONFIRMATIONS_BEFORE_RETRY: + # If a transactions has missed too many confirmations we add it to the rebroadcast list + txs_to_rebroadcast.append(tx) + + return txs_to_rebroadcast + + def get_completed_jobs(self, height): + completed_jobs = [] + + for uuid, job in self.jobs: + if job.appointment_end <= height: + tx = Carrier.get_transaction(job.dispute_txid) + + # FIXME: Should be improved with the librarian + if tx is not None and tx.get('confirmations') > MIN_CONFIRMATIONS: + # The end of the appointment has been reached + completed_jobs.append(uuid) + + return completed_jobs + def rebroadcast(self, jobs_to_rebroadcast): # ToDO: #22-discuss-confirmations-before-retry # ToDo: #23-define-behaviour-approaching-end - for uuid in jobs_to_rebroadcast: - self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, - self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) + for tx in jobs_to_rebroadcast: + for uuid in self.tx_job_map[tx]: + self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, + self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) - logging.warning("[Responder] txid = {} has missed {} confirmations. Rebroadcasting" - .format(self.jobs[uuid].justice_txid, CONFIRMATIONS_BEFORE_RETRY)) - - def handle_send_failures(self, e, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry): - # Since we're pushing a raw transaction to the network we can get two kind of rejections: - # RPC_VERIFY_REJECTED and RPC_VERIFY_ALREADY_IN_CHAIN. The former implies that the transaction is rejected - # due to network rules, whereas the later implies that the transaction is already in the blockchain. - if e.error.get('code') == RPC_VERIFY_REJECTED: - # DISCUSS: what to do in this case - # DISCUSS: invalid transactions (properly formatted but invalid, like unsigned) fit here too. - # DISCUSS: RPC_VERIFY_ERROR could also be a possible case. - # DISCUSS: check errors -9 and -10 - pass - - elif e.error.get('code') == RPC_VERIFY_ALREADY_IN_CHAIN: - try: - logging.info("[Responder] {} is already in the blockchain. Getting the confirmation count and start " - "monitoring the transaction".format(justice_txid)) - - # If the transaction is already in the chain, we get the number of confirmations and watch the job - # until the end of the appointment - tx_info = bitcoin_cli.getrawtransaction(justice_txid, 1) - confirmations = int(tx_info.get("confirmations")) - self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry, - confirmations=confirmations) - - except JSONRPCException as e: - # While it's quite unlikely, the transaction that was already in the blockchain could have been - # reorged while we were querying bitcoind to get the confirmation count. In such a case we just - # restart the job - if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - self.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry) - - else: - # If something else happens (unlikely but possible) log it so we can treat it in future releases - logging.error("[Responder] JSONRPCException. Error {}".format(e)) - - else: - # If something else happens (unlikely but possible) log it so we can treat it in future releases - logging.error("[Responder] JSONRPCException. Error {}".format(e)) + logging.warning("[Responder] tx {} has missed {} confirmations. Rebroadcasting" + .format(self.jobs[uuid].justice_txid, CONFIRMATIONS_BEFORE_RETRY)) + # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): for uuid, job in self.jobs.items(): # First we check if the dispute transaction is still in the blockchain. If not, the justice can not be @@ -240,4 +207,3 @@ class Responder: # reorg manager logging.warning("[Responder] dispute and justice transaction missing. Calling the reorg manager") logging.error("[Responder] reorg manager not yet implemented") - pass From 1e18630ce2fa9b582dd9e5a02e0efd51213f6bee Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 4 Oct 2019 17:23:45 +0100 Subject: [PATCH 08/82] Adds Cleaner tests. Fixes Cleaner, Watcher and Responder There is no need to return the dictionaries modified by the client since they are "passed-by-reference" in Python. --- pisa/cleaner.py | 19 ++++------ pisa/responder.py | 8 ++-- pisa/watcher.py | 11 ++++-- tests/test_cleaner.py | 87 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 105 insertions(+), 20 deletions(-) create mode 100644 tests/test_cleaner.py diff --git a/pisa/cleaner.py b/pisa/cleaner.py index d7808b9..b7c2947 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -1,14 +1,13 @@ -import pisa.conf as conf from pisa import logging +# Dictionaries in Python are "passed-by-reference", so no return is needed for the Cleaner" +# https://docs.python.org/3/faq/programming.html#how-do-i-write-a-function-with-output-parameters-call-by-reference + class Cleaner: @staticmethod - def delete_expired_appointment(block, appointments, locator_uuid_map): - to_delete = [uuid for uuid, appointment in appointments.items() - if block["height"] > appointment.end_time + conf.EXPIRY_DELTA] - - for uuid in to_delete: + def delete_expired_appointment(expired_appointments, appointments, locator_uuid_map): + for uuid in expired_appointments: locator = appointments[uuid].locator appointments.pop(uuid) @@ -22,13 +21,11 @@ class Cleaner: logging.info("[Cleaner] end time reached with no match! Deleting appointment {} (uuid: {})".format(locator, uuid)) - return appointments, locator_uuid_map - @staticmethod def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height): - for uuid in completed_jobs: + for uuid, confirmations in completed_jobs: logging.info("[Cleaner] job completed (uuid = {}). Appointment ended at block {} after {} confirmations" - .format(uuid, jobs[uuid].justice_txid, height, jobs[uuid].confirmations)) + .format(uuid, height, confirmations)) # ToDo: #9-add-data-persistence justice_txid = jobs[uuid].justice_txid @@ -41,5 +38,3 @@ class Cleaner: else: tx_job_map[justice_txid].remove(uuid) - - return jobs, tx_job_map diff --git a/pisa/responder.py b/pisa/responder.py index 0f04662..7eabd9b 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -120,8 +120,7 @@ class Responder: txs, self.unconfirmed_txs, self.tx_job_map, self.missed_confirmations) txs_to_rebroadcast = self.get_txs_to_rebroadcast(txs) - self.jobs, self.tx_job_map = Cleaner.delete_completed_jobs(self.jobs, self.tx_job_map, - self.get_completed_jobs(height), height) + Cleaner.delete_completed_jobs(self.jobs, self.tx_job_map, self.get_completed_jobs(height), height) self.rebroadcast(txs_to_rebroadcast) @@ -157,9 +156,10 @@ class Responder: tx = Carrier.get_transaction(job.dispute_txid) # FIXME: Should be improved with the librarian - if tx is not None and tx.get('confirmations') > MIN_CONFIRMATIONS: + confirmations = tx.get('confirmations') + if tx is not None and confirmations > MIN_CONFIRMATIONS: # The end of the appointment has been reached - completed_jobs.append(uuid) + completed_jobs.append((uuid, confirmations)) return completed_jobs diff --git a/pisa/watcher.py b/pisa/watcher.py index bbf066c..f63e5ab 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -3,10 +3,11 @@ from queue import Queue from threading import Thread from pisa import logging +from pisa.cleaner import Cleaner +from pisa.conf import EXPIRY_DELTA from pisa.responder import Responder from pisa.conf import MAX_APPOINTMENTS from pisa.block_processor import BlockProcessor -from pisa.cleaner import Cleaner from pisa.utils.zmq_subscriber import ZMQHandler @@ -78,15 +79,17 @@ class Watcher: block_hash = self.block_queue.get() logging.info("[Watcher] new block received {}".format(block_hash)) - block = BlockProcessor.getblock(block_hash) + block = BlockProcessor.get_block(block_hash) if block is not None: txids = block.get('tx') logging.info("[Watcher] list of transactions: {}".format(txids)) - self.appointments, self.locator_uuid_map = Cleaner.delete_expired_appointment( - block, self.appointments, self.locator_uuid_map) + expired_appointments = [uuid for uuid, appointment in self.appointments.items() + if block["height"] > appointment.end_time + EXPIRY_DELTA] + + Cleaner.delete_expired_appointment(expired_appointments, self.appointments, self.locator_uuid_map) potential_matches = BlockProcessor.get_potential_matches(txids, self.locator_uuid_map) matches = BlockProcessor.get_matches(potential_matches, self.locator_uuid_map, self.appointments) diff --git a/tests/test_cleaner.py b/tests/test_cleaner.py new file mode 100644 index 0000000..5a3bbab --- /dev/null +++ b/tests/test_cleaner.py @@ -0,0 +1,87 @@ +import random +from os import urandom +from uuid import uuid4 +from binascii import hexlify + +from pisa import logging +from pisa.responder import Job +from pisa.cleaner import Cleaner +from pisa.appointment import Appointment + +CONFIRMATIONS = 6 +ITEMS = 10 +MAX_ITEMS = 100 +ITERATIONS = 1000 + + +def set_up_appointments(total_appointments): + appointments = dict() + locator_uuid_map = dict() + + for _ in range(total_appointments): + uuid = uuid4().hex + locator = hexlify(urandom(64)) + + appointments[uuid] = Appointment(locator, None, None, None, None, None, None) + locator_uuid_map[locator] = [uuid] + + # Each locator can have more than one uuid assigned to it. Do a coin toss to add multiple ones + while random.randint(0, 1): + uuid = uuid4().hex + + appointments[uuid] = Appointment(locator, None, None, None, None, None, None) + locator_uuid_map[locator].append(uuid) + + return appointments, locator_uuid_map + + +def set_up_jobs(total_jobs): + jobs = dict() + tx_job_map = dict() + + for _ in range(total_jobs): + uuid = uuid4().hex + txid = hexlify(urandom(64)) + + # Assign both justice_txid and dispute_txid the same id (it shouldn't matter) + jobs[uuid] = Job(txid, txid, None, None, None) + tx_job_map[txid] = [uuid] + + # Each justice_txid can have more than one uuid assigned to it. Do a coin toss to add multiple ones + while random.randint(0, 1): + uuid = uuid4().hex + + jobs[uuid] = Job(txid, txid, None, None, None) + tx_job_map[txid].append(uuid) + + return jobs, tx_job_map + + +def test_delete_expired_appointment(): + appointments, locator_uuid_map = set_up_appointments(MAX_ITEMS) + expired_appointments = random.sample(list(appointments.keys()), k=ITEMS) + + Cleaner.delete_expired_appointment(expired_appointments, appointments, locator_uuid_map) + + assert not set(expired_appointments).issubset(appointments.keys()) + + +def test_delete_completed_jobs(): + jobs, tx_job_map = set_up_jobs(MAX_ITEMS) + selected_jobs = random.sample(list(jobs.keys()), k=ITEMS) + + completed_jobs = [(job, 6) for job in selected_jobs] + + Cleaner.delete_completed_jobs(jobs, tx_job_map, completed_jobs, 0) + + assert not set(completed_jobs).issubset(jobs.keys()) + + +logging.getLogger().disabled = True + +for _ in range(ITERATIONS): + test_delete_expired_appointment() + +for _ in range(ITERATIONS): + test_delete_completed_jobs() + From beac88a2c5d0ec69bfefce22b54e8899aabf652b Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 4 Oct 2019 17:29:13 +0100 Subject: [PATCH 09/82] Refactors test structure --- {tests => test}/__init__.py | 0 {tests => test}/add_appointment_test.py | 2 +- {tests => test}/appointment_tests.py | 0 {tests => test}/simulator/__init__.py | 0 {tests => test}/simulator/bitcoin_sim_tests.py | 0 {tests => test}/simulator/bitcoind_sim.py | 2 +- {tests => test}/simulator/zmq_publisher.py | 0 test/unit/__init__.py | 0 {tests => test/unit}/test_cleaner.py | 0 9 files changed, 2 insertions(+), 2 deletions(-) rename {tests => test}/__init__.py (100%) rename {tests => test}/add_appointment_test.py (98%) rename {tests => test}/appointment_tests.py (100%) rename {tests => test}/simulator/__init__.py (100%) rename {tests => test}/simulator/bitcoin_sim_tests.py (100%) rename {tests => test}/simulator/bitcoind_sim.py (99%) rename {tests => test}/simulator/zmq_publisher.py (100%) create mode 100644 test/unit/__init__.py rename {tests => test/unit}/test_cleaner.py (100%) diff --git a/tests/__init__.py b/test/__init__.py similarity index 100% rename from tests/__init__.py rename to test/__init__.py diff --git a/tests/add_appointment_test.py b/test/add_appointment_test.py similarity index 98% rename from tests/add_appointment_test.py rename to test/add_appointment_test.py index caccfdc..68fea80 100644 --- a/tests/add_appointment_test.py +++ b/test/add_appointment_test.py @@ -7,7 +7,7 @@ from hashlib import sha256 from binascii import hexlify, unhexlify from apps.cli.blob import Blob from pisa import HOST, PORT -from pisa.utils.authproxy import AuthServiceProxy +from pisa.utils.auth_proxy import AuthServiceProxy from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT PISA_API = "http://{}:{}".format(HOST, PORT) diff --git a/tests/appointment_tests.py b/test/appointment_tests.py similarity index 100% rename from tests/appointment_tests.py rename to test/appointment_tests.py diff --git a/tests/simulator/__init__.py b/test/simulator/__init__.py similarity index 100% rename from tests/simulator/__init__.py rename to test/simulator/__init__.py diff --git a/tests/simulator/bitcoin_sim_tests.py b/test/simulator/bitcoin_sim_tests.py similarity index 100% rename from tests/simulator/bitcoin_sim_tests.py rename to test/simulator/bitcoin_sim_tests.py diff --git a/tests/simulator/bitcoind_sim.py b/test/simulator/bitcoind_sim.py similarity index 99% rename from tests/simulator/bitcoind_sim.py rename to test/simulator/bitcoind_sim.py index d9b902d..9ba6089 100644 --- a/tests/simulator/bitcoind_sim.py +++ b/test/simulator/bitcoind_sim.py @@ -1,6 +1,6 @@ from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT from flask import Flask, request, Response, abort -from tests.simulator.zmq_publisher import ZMQPublisher +from test.simulator.zmq_publisher import ZMQPublisher from threading import Thread from pisa.rpc_errors import * from pisa.tools import check_txid_format diff --git a/tests/simulator/zmq_publisher.py b/test/simulator/zmq_publisher.py similarity index 100% rename from tests/simulator/zmq_publisher.py rename to test/simulator/zmq_publisher.py diff --git a/test/unit/__init__.py b/test/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_cleaner.py b/test/unit/test_cleaner.py similarity index 100% rename from tests/test_cleaner.py rename to test/unit/test_cleaner.py From 5ae210d8939b8833b2d73f8dbdf6a38bb0ee9cb6 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 7 Oct 2019 11:39:10 +0100 Subject: [PATCH 10/82] Updates os.urandom to python3 All string generated with urandom(x) were using binascii + os.urandom + encode. In python3 os.urandom has a hex method. --- apps/cli/pisa-cli.py | 5 ++--- test/add_appointment_test.py | 7 +++---- test/simulator/bitcoin_sim_tests.py | 8 ++++---- test/simulator/bitcoind_sim.py | 4 ++-- test/unit/test_cleaner.py | 4 ++-- 5 files changed, 13 insertions(+), 15 deletions(-) diff --git a/apps/cli/pisa-cli.py b/apps/cli/pisa-cli.py index dacca68..3dd22f1 100644 --- a/apps/cli/pisa-cli.py +++ b/apps/cli/pisa-cli.py @@ -28,9 +28,8 @@ def generate_dummy_appointment(): current_height = r.json().get("block_count") - dummy_appointment_data = {"tx": hexlify(os.urandom(192)).decode('utf-8'), - "tx_id": hexlify(os.urandom(32)).decode('utf-8'), "start_time": current_height + 5, - "end_time": current_height + 10, "dispute_delta": 20} + dummy_appointment_data = {"tx": os.urandom(192).hex(), "tx_id": os.urandom(32).hex(), + "start_time": current_height + 5, "end_time": current_height + 10, "dispute_delta": 20} print('Generating dummy appointment data:''\n\n' + json.dumps(dummy_appointment_data, indent=4, sort_keys=True)) diff --git a/test/add_appointment_test.py b/test/add_appointment_test.py index 68fea80..35aa1b0 100644 --- a/test/add_appointment_test.py +++ b/test/add_appointment_test.py @@ -18,8 +18,7 @@ def generate_dummy_appointment(dispute_txid): current_height = r.json().get("block_count") - dummy_appointment_data = {"tx": hexlify(os.urandom(32)).decode('utf-8'), - "tx_id": dispute_txid, "start_time": current_height + 5, + dummy_appointment_data = {"tx": os.urandom(32).hex(), "tx_id": dispute_txid, "start_time": current_height + 5, "end_time": current_height + 10, "dispute_delta": 20} cipher = "AES-GCM-128" @@ -40,7 +39,7 @@ def generate_dummy_appointment(dispute_txid): def test_add_appointment(appointment=None): if not appointment: - dispute_txid = hexlify(os.urandom(32)).decode('utf-8') + dispute_txid = os.urandom(32).hex() appointment = generate_dummy_appointment(dispute_txid) print("Sending appointment (locator: {}) to PISA".format(appointment.get("locator"))) @@ -67,7 +66,7 @@ def test_add_appointment(appointment=None): def test_same_locator_multiple_appointments(): - dispute_txid = hexlify(os.urandom(32)).decode('utf-8') + dispute_txid = os.urandom(32).hex() appointment = generate_dummy_appointment(dispute_txid) # Send it once diff --git a/test/simulator/bitcoin_sim_tests.py b/test/simulator/bitcoin_sim_tests.py index dd77baa..bc5f45a 100644 --- a/test/simulator/bitcoin_sim_tests.py +++ b/test/simulator/bitcoin_sim_tests.py @@ -32,7 +32,7 @@ assert(len(block.get('tx')) != 0) assert(isinstance(block.get('height'), int)) # Some fails -values += ["a"*64, binascii.hexlify(os.urandom(32)).decode()] +values += ["a"*64, os.urandom(32).hex()] print("\ngetblock fails ({}):".format(len(values))) for v in values: @@ -50,14 +50,14 @@ assert(isinstance(tx.get('txid'), str)) assert(check_txid_format(tx.get('txid'))) # Therefore should also work for a random formatted 32-byte hex in our simulation -random_tx = binascii.hexlify(os.urandom(32)).decode() +random_tx = os.urandom(32).hex() tx = bitcoin_cli.decoderawtransaction(random_tx) assert(isinstance(tx, dict)) assert(isinstance(tx.get('txid'), str)) assert(check_txid_format(tx.get('txid'))) # But it should fail for not proper formatted one -values = [1, None, '', "a"*63, "b"*65, [], binascii.hexlify(os.urandom(31)).hex()] +values = [1, None, '', "a"*63, "b"*65, [], os.urandom(31).hex()] print("\ndecoderawtransaction fails ({}):".format(len(values))) for v in values: @@ -68,7 +68,7 @@ for v in values: print('\t{}'.format(e)) # sendrawtransaction should only allow txids that the simulator has not mined yet -bitcoin_cli.sendrawtransaction(binascii.hexlify(os.urandom(32)).decode()) +bitcoin_cli.sendrawtransaction(os.urandom(32).hex()) # Any data not matching the txid format or that matches with an already mined transaction should fail values += [coinbase_tx] diff --git a/test/simulator/bitcoind_sim.py b/test/simulator/bitcoind_sim.py index 9ba6089..1512c1f 100644 --- a/test/simulator/bitcoind_sim.py +++ b/test/simulator/bitcoind_sim.py @@ -172,8 +172,8 @@ def simulate_mining(): prev_block_hash = None while True: - block_hash = binascii.hexlify(os.urandom(32)).decode('utf-8') - coinbase_tx_hash = binascii.hexlify(os.urandom(32)).decode('utf-8') + block_hash = os.urandom(32).hex() + coinbase_tx_hash = os.urandom(32).hex() txs_to_mine = [coinbase_tx_hash] if len(mempool) != 0: diff --git a/test/unit/test_cleaner.py b/test/unit/test_cleaner.py index 5a3bbab..de1be8a 100644 --- a/test/unit/test_cleaner.py +++ b/test/unit/test_cleaner.py @@ -20,7 +20,7 @@ def set_up_appointments(total_appointments): for _ in range(total_appointments): uuid = uuid4().hex - locator = hexlify(urandom(64)) + locator = urandom(32).hex() appointments[uuid] = Appointment(locator, None, None, None, None, None, None) locator_uuid_map[locator] = [uuid] @@ -41,7 +41,7 @@ def set_up_jobs(total_jobs): for _ in range(total_jobs): uuid = uuid4().hex - txid = hexlify(urandom(64)) + txid = urandom(32).hex() # Assign both justice_txid and dispute_txid the same id (it shouldn't matter) jobs[uuid] = Job(txid, txid, None, None, None) From 23efd48796931336eec63cb3b80e0c626ade1083 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 7 Oct 2019 11:47:21 +0100 Subject: [PATCH 11/82] Fixes logging bug introduced in 93e23e7 When cleaning up code in commit 93e23e7 a condition was removed that makes the inspector log empty data. --- pisa/inspector.py | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/pisa/inspector.py b/pisa/inspector.py index 25bb344..da65bfd 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -47,7 +47,8 @@ class Inspector: return r - def check_locator(self, locator): + @staticmethod + def check_locator(locator): message = None rcode = 0 @@ -65,11 +66,13 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong locator format ({})".format(locator) - logging.error("[Inspector] {}".format(message)) + if message is not None: + logging.error("[Inspector] {}".format(message)) return rcode, message - def check_start_time(self, start_time, block_height): + @staticmethod + def check_start_time(start_time, block_height): message = None rcode = 0 @@ -88,11 +91,13 @@ class Inspector: else: message = "start_time too close to current height" - logging.error("[Inspector] {}".format(message)) + if message is not None: + logging.error("[Inspector] {}".format(message)) return rcode, message - def check_end_time(self, end_time, start_time, block_height): + @staticmethod + def check_end_time(end_time, start_time, block_height): message = None rcode = 0 @@ -114,11 +119,13 @@ class Inspector: rcode = errors.APPOINTMENT_FIELD_TOO_SMALL message = 'end_time is in the past' - logging.error("[Inspector] {}".format(message)) + if message is not None: + logging.error("[Inspector] {}".format(message)) return rcode, message - def check_delta(self, dispute_delta): + @staticmethod + def check_delta(dispute_delta): message = None rcode = 0 @@ -135,12 +142,14 @@ class Inspector: message = "dispute delta too small. The dispute delta should be at least {} (current: {})".format( conf.MIN_DISPUTE_DELTA, dispute_delta) - logging.error("[Inspector] {}".format(message)) + if message is not None: + logging.error("[Inspector] {}".format(message)) return rcode, message # ToDo: #6-define-checks-encrypted-blob - def check_blob(self, encrypted_blob): + @staticmethod + def check_blob(encrypted_blob): message = None rcode = 0 @@ -157,11 +166,13 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD message = "wrong encrypted_blob" - logging.error("[Inspector] {}".format(message)) + if message is not None: + logging.error("[Inspector] {}".format(message)) return rcode, message - def check_cipher(self, cipher): + @staticmethod + def check_cipher(cipher): message = None rcode = 0 @@ -177,11 +188,13 @@ class Inspector: rcode = errors.APPOINTMENT_CIPHER_NOT_SUPPORTED message = "cipher not supported: {}".format(cipher) - logging.error("[Inspector] {}".format(message)) + if message is not None: + logging.error("[Inspector] {}".format(message)) return rcode, message - def check_hash_function(self, hash_function): + @staticmethod + def check_hash_function(hash_function): message = None rcode = 0 @@ -197,6 +210,7 @@ class Inspector: rcode = errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED message = "hash_function not supported {}".format(hash_function) - logging.error("[Inspector] {}".format(message)) + if message is not None: + logging.error("[Inspector] {}".format(message)) return rcode, message From 067efcca73723675eb28afef3656ca3bdf47e116 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 7 Oct 2019 11:58:28 +0100 Subject: [PATCH 12/82] Clean unused imports and more cleanup --- apps/cli/blob.py | 3 ++- apps/cli/pisa-cli.py | 7 ++++--- test/add_appointment_test.py | 7 ++++--- test/appointment_tests.py | 21 ++++++--------------- test/simulator/bitcoin_sim_tests.py | 3 +-- test/unit/test_cleaner.py | 1 - 6 files changed, 17 insertions(+), 25 deletions(-) diff --git a/apps/cli/blob.py b/apps/cli/blob.py index 968f9e3..ac7dfa3 100644 --- a/apps/cli/blob.py +++ b/apps/cli/blob.py @@ -1,6 +1,7 @@ -from binascii import hexlify, unhexlify from hashlib import sha256 +from binascii import hexlify, unhexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM + from apps.cli import SUPPORTED_HASH_FUNCTIONS, SUPPORTED_CIPHERS diff --git a/apps/cli/pisa-cli.py b/apps/cli/pisa-cli.py index 3dd22f1..3252f79 100644 --- a/apps/cli/pisa-cli.py +++ b/apps/cli/pisa-cli.py @@ -5,13 +5,14 @@ import json import logging import requests from sys import argv -from getopt import getopt, GetoptError from hashlib import sha256 -from binascii import hexlify, unhexlify +from binascii import unhexlify +from getopt import getopt, GetoptError from requests import ConnectTimeout, ConnectionError -from apps.cli import DEFAULT_PISA_API_SERVER, DEFAULT_PISA_API_PORT, CLIENT_LOG_FILE + from apps.cli.blob import Blob from apps.cli.help import help_add_appointment, help_get_appointment +from apps.cli import DEFAULT_PISA_API_SERVER, DEFAULT_PISA_API_PORT, CLIENT_LOG_FILE def show_message(message, debug, logging): diff --git a/test/add_appointment_test.py b/test/add_appointment_test.py index 35aa1b0..1608779 100644 --- a/test/add_appointment_test.py +++ b/test/add_appointment_test.py @@ -1,12 +1,13 @@ import os import json -import requests import time +import requests from copy import deepcopy from hashlib import sha256 -from binascii import hexlify, unhexlify -from apps.cli.blob import Blob +from binascii import unhexlify + from pisa import HOST, PORT +from apps.cli.blob import Blob from pisa.utils.auth_proxy import AuthServiceProxy from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT diff --git a/test/appointment_tests.py b/test/appointment_tests.py index a35dc59..40e2f61 100644 --- a/test/appointment_tests.py +++ b/test/appointment_tests.py @@ -1,21 +1,12 @@ -import logging +import pisa.conf as conf from pisa.inspector import Inspector from pisa.appointment import Appointment -from pisa import errors -from pisa.utils.authproxy import AuthServiceProxy, JSONRPCException -from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, SUPPORTED_HASH_FUNCTIONS, \ - SUPPORTED_CIPHERS, TEST_LOG_FILE - -logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[ - logging.FileHandler(TEST_LOG_FILE) -]) +from pisa import errors, logging, bitcoin_cli +from pisa.utils.auth_proxy import JSONRPCException appointment = {"locator": None, "start_time": None, "end_time": None, "dispute_delta": None, "encrypted_blob": None, "cipher": None, "hash_function": None} -bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, - BTC_RPC_PORT)) - try: block_height = bitcoin_cli.getblockcount() @@ -54,7 +45,7 @@ cipher_rets = [errors.APPOINTMENT_EMPTY_FIELD, errors.APPOINTMENT_WRONG_FIELD_TY hash_function_rets = [errors.APPOINTMENT_EMPTY_FIELD, errors.APPOINTMENT_WRONG_FIELD_TYPE, errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED, errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED] -inspector = Inspector(debug=True, logging=logging) +inspector = Inspector() print("Locator tests\n") for locator, ret in zip(locators, locators_rets): @@ -119,7 +110,7 @@ for cipher, ret in zip(ciphers, cipher_rets): print(r) # Setting the cipher to the only supported one for now -appointment['cipher'] = SUPPORTED_CIPHERS[0] +appointment['cipher'] = conf.SUPPORTED_CIPHERS[0] print("\nHash function tests\n") for hash_function, ret in zip(hash_functions, hash_function_rets): @@ -130,7 +121,7 @@ for hash_function, ret in zip(hash_functions, hash_function_rets): print(r) # Setting the cipher to the only supported one for now -appointment['hash_function'] = SUPPORTED_HASH_FUNCTIONS[0] +appointment['hash_function'] = conf.SUPPORTED_HASH_FUNCTIONS[0] r = inspector.inspect(appointment) assert type(r) == Appointment diff --git a/test/simulator/bitcoin_sim_tests.py b/test/simulator/bitcoin_sim_tests.py index bc5f45a..cb1c5bb 100644 --- a/test/simulator/bitcoin_sim_tests.py +++ b/test/simulator/bitcoin_sim_tests.py @@ -1,6 +1,5 @@ import os -import binascii -from pisa.utils.authproxy import AuthServiceProxy, JSONRPCException +from pisa.utils.auth_proxy import AuthServiceProxy, JSONRPCException from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT from pisa.tools import check_txid_format diff --git a/test/unit/test_cleaner.py b/test/unit/test_cleaner.py index de1be8a..5308ca6 100644 --- a/test/unit/test_cleaner.py +++ b/test/unit/test_cleaner.py @@ -1,7 +1,6 @@ import random from os import urandom from uuid import uuid4 -from binascii import hexlify from pisa import logging from pisa.responder import Job From 4be506412ee208617f452bd1e7b2b9a4f496c41a Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 7 Oct 2019 15:18:00 +0100 Subject: [PATCH 13/82] Adds Inspector unittests. Fixes some related bugs. --- pisa/block_processor.py | 12 ++ pisa/inspector.py | 44 ++++--- test/appointment_tests.py | 130 -------------------- test/unit/test_inspector.py | 230 ++++++++++++++++++++++++++++++++++++ 4 files changed, 270 insertions(+), 146 deletions(-) delete mode 100644 test/appointment_tests.py create mode 100644 test/unit/test_inspector.py diff --git a/pisa/block_processor.py b/pisa/block_processor.py index b047cc3..062d938 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -30,6 +30,18 @@ class BlockProcessor: return block_hash + @staticmethod + def get_block_count(): + block_count = None + + try: + block_count = bitcoin_cli.getblockcount() + + except JSONRPCException as e: + logging.error("[BlockProcessor] couldn't get block block count. Error code {}".format(e)) + + return block_count + @staticmethod def get_potential_matches(txids, locator_uuid_map): potential_locators = {sha256(binascii.unhexlify(txid)).hexdigest(): txid for txid in txids} diff --git a/pisa/inspector.py b/pisa/inspector.py index da65bfd..ae9ca89 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -2,9 +2,14 @@ import re from pisa import errors import pisa.conf as conf -from pisa import logging, bitcoin_cli +from pisa import logging from pisa.appointment import Appointment -from pisa.utils.auth_proxy import JSONRPCException +from pisa.block_processor import BlockProcessor + +# FIXME: The inspector logs the wrong messages sent form the users. A possible attack surface would be to send a really +# long field that, even if not accepted by PISA, would be stored in the logs. This is a possible DoS surface +# since pisa would store any kind of message (no matter the length). Solution: truncate the length of the fields +# stored + blacklist if multiple wrong requests are received. class Inspector: @@ -17,10 +22,11 @@ class Inspector: cipher = data.get('cipher') hash_function = data.get('hash_function') - try: - block_height = bitcoin_cli.getblockcount() + block_height = BlockProcessor.get_block_count() + if block_height is not None: rcode, message = self.check_locator(locator) + if rcode == 0: rcode, message = self.check_start_time(start_time, block_height) if rcode == 0: @@ -39,9 +45,7 @@ class Inspector: else: r = (rcode, message) - except JSONRPCException as e: - logging.error("[Inspector] JSONRPCException. Error code {}".format(e)) - + else: # In case of an unknown exception, assign a special rcode and reason. r = (errors.UNKNOWN_JSON_RPC_EXCEPTION, "Unexpected error occurred") @@ -76,6 +80,9 @@ class Inspector: message = None rcode = 0 + # TODO: What's too close to the current height is not properly defined. Right now any appointment that is in the + # future will be accepted (even if it's only one block away). + t = type(start_time) if start_time is None: @@ -89,7 +96,7 @@ class Inspector: if start_time < block_height: message = "start_time is in the past" else: - message = "start_time too close to current height" + message = "start_time is too close to current height" if message is not None: logging.error("[Inspector] {}".format(message)) @@ -101,6 +108,9 @@ class Inspector: message = None rcode = 0 + # TODO: What's too close to the current height is not properly defined. Right now any appointment that ends in + # the future will be accepted (even if it's only one block away). + t = type(end_time) if end_time is None: @@ -115,9 +125,12 @@ class Inspector: message = "end_time is smaller than start_time" else: message = "end_time is equal to start_time" - elif block_height > end_time: + elif block_height >= end_time: rcode = errors.APPOINTMENT_FIELD_TOO_SMALL - message = 'end_time is in the past' + if block_height > end_time: + message = 'end_time is in the past' + else: + message = 'end_time is too close to current height' if message is not None: logging.error("[Inspector] {}".format(message)) @@ -161,10 +174,9 @@ class Inspector: elif t != str: rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE message = "wrong encrypted_blob data type ({})".format(t) - elif encrypted_blob == '': - # ToDo: #6 We may want to define this to be at least as long as one block of the cipher we are using - rcode = errors.APPOINTMENT_WRONG_FIELD - message = "wrong encrypted_blob" + elif re.search(r'^[0-9A-Fa-f]+$', encrypted_blob) is None: + rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT + message = "wrong encrypted_blob format ({})".format(encrypted_blob) if message is not None: logging.error("[Inspector] {}".format(message)) @@ -184,7 +196,7 @@ class Inspector: elif t != str: rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE message = "wrong cipher data type ({})".format(t) - elif cipher not in conf.SUPPORTED_CIPHERS: + elif cipher.upper() not in conf.SUPPORTED_CIPHERS: rcode = errors.APPOINTMENT_CIPHER_NOT_SUPPORTED message = "cipher not supported: {}".format(cipher) @@ -206,7 +218,7 @@ class Inspector: elif t != str: rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE message = "wrong hash_function data type ({})".format(t) - elif hash_function not in conf.SUPPORTED_HASH_FUNCTIONS: + elif hash_function.upper() not in conf.SUPPORTED_HASH_FUNCTIONS: rcode = errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED message = "hash_function not supported {}".format(hash_function) diff --git a/test/appointment_tests.py b/test/appointment_tests.py deleted file mode 100644 index 40e2f61..0000000 --- a/test/appointment_tests.py +++ /dev/null @@ -1,130 +0,0 @@ -import pisa.conf as conf -from pisa.inspector import Inspector -from pisa.appointment import Appointment -from pisa import errors, logging, bitcoin_cli -from pisa.utils.auth_proxy import JSONRPCException - -appointment = {"locator": None, "start_time": None, "end_time": None, "dispute_delta": None, - "encrypted_blob": None, "cipher": None, "hash_function": None} - -try: - block_height = bitcoin_cli.getblockcount() - -except JSONRPCException as e: - logging.error("[Inspector] JSONRPCException. Error code {}".format(e)) - -locators = [None, 0, 'A' * 31, "A" * 63 + "_"] -start_times = [None, 0, '', 15.0, block_height - 10] -end_times = [None, 0, '', 26.123, block_height - 11] -dispute_deltas = [None, 0, '', 1.2, -3, 30] -encrypted_blobs = [None, 0, ''] -ciphers = [None, 0, '', 'foo'] -hash_functions = [None, 0, '', 'foo'] - -locators_rets = [errors.APPOINTMENT_EMPTY_FIELD, errors.APPOINTMENT_WRONG_FIELD_TYPE, - errors.APPOINTMENT_WRONG_FIELD_SIZE, errors.APPOINTMENT_WRONG_FIELD_FORMAT] - -start_time_rets = [errors.APPOINTMENT_EMPTY_FIELD, errors.APPOINTMENT_FIELD_TOO_SMALL, - errors.APPOINTMENT_WRONG_FIELD_TYPE, errors.APPOINTMENT_WRONG_FIELD_TYPE, - errors.APPOINTMENT_FIELD_TOO_SMALL] - -end_time_rets = [errors.APPOINTMENT_EMPTY_FIELD, errors.APPOINTMENT_FIELD_TOO_SMALL, - errors.APPOINTMENT_WRONG_FIELD_TYPE, errors.APPOINTMENT_WRONG_FIELD_TYPE, - errors.APPOINTMENT_FIELD_TOO_SMALL] - -dispute_delta_rets = [errors.APPOINTMENT_EMPTY_FIELD, errors.APPOINTMENT_FIELD_TOO_SMALL, - errors.APPOINTMENT_WRONG_FIELD_TYPE, errors.APPOINTMENT_WRONG_FIELD_TYPE, - errors.APPOINTMENT_FIELD_TOO_SMALL] - -encrypted_blob_rets = [errors.APPOINTMENT_EMPTY_FIELD, errors.APPOINTMENT_WRONG_FIELD_TYPE, - errors.APPOINTMENT_WRONG_FIELD] - -cipher_rets = [errors.APPOINTMENT_EMPTY_FIELD, errors.APPOINTMENT_WRONG_FIELD_TYPE, - errors.APPOINTMENT_CIPHER_NOT_SUPPORTED, errors.APPOINTMENT_CIPHER_NOT_SUPPORTED] - -hash_function_rets = [errors.APPOINTMENT_EMPTY_FIELD, errors.APPOINTMENT_WRONG_FIELD_TYPE, - errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED, errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED] - -inspector = Inspector() - -print("Locator tests\n") -for locator, ret in zip(locators, locators_rets): - appointment["locator"] = locator - r = inspector.inspect(appointment) - - assert r[0] == ret - print(r) - -# Set locator to a 'valid' one -appointment['locator'] = 'A' * 64 - -print("\nStart time tests\n") -for start_time, ret in zip(start_times, start_time_rets): - appointment["start_time"] = start_time - r = inspector.inspect(appointment) - - assert r[0] == ret - print(r) -# Setting the start time to some time in the future -appointment['start_time'] = block_height + 10 - -print("\nEnd time tests\n") -for end_time, ret in zip(end_times, end_time_rets): - appointment["end_time"] = end_time - r = inspector.inspect(appointment) - - assert r[0] == ret - print(r) - -# Setting the end time to something consistent with start time -appointment['end_time'] = block_height + 30 - -print("\nDelta tests\n") -for dispute_delta, ret in zip(dispute_deltas, dispute_delta_rets): - appointment["dispute_delta"] = dispute_delta - r = inspector.inspect(appointment) - - assert r[0] == ret - print(r) - -# Setting the a proper dispute delta -appointment['dispute_delta'] = appointment['end_time'] - appointment['start_time'] - -print("\nEncrypted blob tests\n") -for encrypted_blob, ret in zip(encrypted_blobs, encrypted_blob_rets): - appointment["encrypted_blob"] = encrypted_blob - r = inspector.inspect(appointment) - - assert r[0] == ret - print(r) - -# Setting the encrypted blob to something that may pass -appointment['encrypted_blob'] = 'A' * 32 - -print("\nCipher tests\n") -for cipher, ret in zip(ciphers, cipher_rets): - appointment["cipher"] = cipher - r = inspector.inspect(appointment) - - assert r[0] == ret - print(r) - -# Setting the cipher to the only supported one for now -appointment['cipher'] = conf.SUPPORTED_CIPHERS[0] - -print("\nHash function tests\n") -for hash_function, ret in zip(hash_functions, hash_function_rets): - appointment["hash_function"] = hash_function - r = inspector.inspect(appointment) - - assert r[0] == ret - print(r) - -# Setting the cipher to the only supported one for now -appointment['hash_function'] = conf.SUPPORTED_HASH_FUNCTIONS[0] - -r = inspector.inspect(appointment) -assert type(r) == Appointment - -print("\nAll tests passed!") - diff --git a/test/unit/test_inspector.py b/test/unit/test_inspector.py new file mode 100644 index 0000000..8e777d7 --- /dev/null +++ b/test/unit/test_inspector.py @@ -0,0 +1,230 @@ +from os import urandom + +from pisa import logging +from pisa.errors import * +from pisa.inspector import Inspector +from pisa.appointment import Appointment +from pisa.block_processor import BlockProcessor +from pisa.conf import MIN_DISPUTE_DELTA, SUPPORTED_CIPHERS, SUPPORTED_HASH_FUNCTIONS + +inspector = Inspector() +APPOINTMENT_OK = (0, None) + +NO_HEX_STINGS = ["R" * 64, urandom(31).hex() + "PP", "$"*64, " "*64] +WRONG_TYPES = [[], '', urandom(32).hex(), 3.2, 2.0, (), object, {}, " "*32, object()] +WRONG_TYPES_NO_STR = [[], urandom(32), 3.2, 2.0, (), object, {}, object()] + + +def test_check_locator(): + # Right appointment type, size and format + locator = urandom(32).hex() + assert(Inspector.check_locator(locator) == APPOINTMENT_OK) + + # Wrong size (too big) + locator = urandom(33).hex() + assert(Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_SIZE) + + # Wrong size (too small) + locator = urandom(31).hex() + assert(Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_SIZE) + + # Empty + locator = None + assert (Inspector.check_locator(locator)[0] == APPOINTMENT_EMPTY_FIELD) + + # Wrong type (several types tested, it should do for anything that is not a string) + locators = [[], -1, 3.2, 0, 4, (), object, {}, object()] + + for locator in locators: + assert (Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_TYPE) + + # Wrong format (no hex) + locators = NO_HEX_STINGS + for locator in locators: + assert (Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_FORMAT) + + +def test_check_start_time(): + # Time is defined in block height + current_time = 100 + + # Right format and right value (start time in the future) + start_time = 101 + assert (Inspector.check_start_time(start_time, current_time) == APPOINTMENT_OK) + + # Start time too small (either same block or block in the past) + start_times = [100, 99, 98, -1] + for start_time in start_times: + assert (Inspector.check_start_time(start_time, current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL) + + # Empty field + start_time = None + assert (Inspector.check_start_time(start_time, current_time)[0] == APPOINTMENT_EMPTY_FIELD) + + # Wrong data type + start_times = WRONG_TYPES + for start_time in start_times: + assert (Inspector.check_start_time(start_time, current_time)[0] == APPOINTMENT_WRONG_FIELD_TYPE) + + +def test_check_end_time(): + # Time is defined in block height + current_time = 100 + start_time = 120 + + # Right format and right value (start time before end and end in the future) + end_time = 121 + assert (Inspector.check_end_time(end_time, start_time, current_time) == APPOINTMENT_OK) + + # End time too small (start time after end time) + end_times = [120, 119, 118, -1] + for end_time in end_times: + assert (Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL) + + # End time too small (either same height as current block or in the past) + current_time = 130 + end_times = [130, 129, 128, -1] + for end_time in end_times: + assert (Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_FIELD_TOO_SMALL) + + # Empty field + end_time = None + assert (Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_EMPTY_FIELD) + + # Wrong data type + end_times = WRONG_TYPES + for end_time in end_times: + assert (Inspector.check_end_time(end_time, start_time, current_time)[0] == APPOINTMENT_WRONG_FIELD_TYPE) + + +def test_check_delta(): + # Right value, right format + deltas = [MIN_DISPUTE_DELTA, MIN_DISPUTE_DELTA+1, MIN_DISPUTE_DELTA+1000] + for delta in deltas: + assert (Inspector.check_delta(delta) == APPOINTMENT_OK) + + # Delta too small + deltas = [MIN_DISPUTE_DELTA-1, MIN_DISPUTE_DELTA-2, 0, -1, -1000] + for delta in deltas: + assert (Inspector.check_delta(delta)[0] == APPOINTMENT_FIELD_TOO_SMALL) + + # Empty field + delta = None + assert(Inspector.check_delta(delta)[0] == APPOINTMENT_EMPTY_FIELD) + + # Wrong data type + deltas = WRONG_TYPES + for delta in deltas: + assert (Inspector.check_delta(delta)[0] == APPOINTMENT_WRONG_FIELD_TYPE) + + +def test_check_blob(): + # Right format and length + encrypted_blob = urandom(120).hex() + assert(Inspector.check_blob(encrypted_blob) == APPOINTMENT_OK) + + # # Wrong content + # # FIXME: There is not proper defined format for this yet. It should be restricted by size at least, and check it + # # is multiple of the block size defined by the encryption function. + + # Wrong type + encrypted_blobs = WRONG_TYPES_NO_STR + for encrypted_blob in encrypted_blobs: + assert (Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_WRONG_FIELD_TYPE) + + # Empty field + encrypted_blob = None + assert (Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_EMPTY_FIELD) + + # Wrong format (no hex) + encrypted_blobs = NO_HEX_STINGS + for encrypted_blob in encrypted_blobs: + assert (Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_WRONG_FIELD_FORMAT) + + +def test_check_cipher(): + # Right format and content (any case combination should be accepted) + for cipher in SUPPORTED_CIPHERS: + cipher_cases = [cipher, cipher.lower(), cipher.capitalize()] + for case in cipher_cases: + assert(Inspector.check_cipher(case) == APPOINTMENT_OK) + + # Wrong type + ciphers = WRONG_TYPES_NO_STR + for cipher in ciphers: + assert(Inspector.check_cipher(cipher)[0] == APPOINTMENT_WRONG_FIELD_TYPE) + + # Wrong value + ciphers = NO_HEX_STINGS + for cipher in ciphers: + assert(Inspector.check_cipher(cipher)[0] == APPOINTMENT_CIPHER_NOT_SUPPORTED) + + # Empty field + cipher = None + assert (Inspector.check_cipher(cipher)[0] == APPOINTMENT_EMPTY_FIELD) + + +def test_check_hash_function(): + # Right format and content (any case combination should be accepted) + for hash_function in SUPPORTED_HASH_FUNCTIONS: + hash_function_cases = [hash_function, hash_function.lower(), hash_function.capitalize()] + for case in hash_function_cases: + assert (Inspector.check_hash_function(case) == APPOINTMENT_OK) + + # Wrong type + hash_functions = WRONG_TYPES_NO_STR + for hash_function in hash_functions: + assert (Inspector.check_hash_function(hash_function)[0] == APPOINTMENT_WRONG_FIELD_TYPE) + + # Wrong value + hash_functions = NO_HEX_STINGS + for hash_function in hash_functions: + assert (Inspector.check_hash_function(hash_function)[0] == APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED) + + # Empty field + hash_function = None + assert (Inspector.check_hash_function(hash_function)[0] == APPOINTMENT_EMPTY_FIELD) + + +def test_inspect(): + # Running this required bitcoind to be running (or mocked) since the block height is queried by inspect. + + # At this point every single check function has been already tested, let's test inspect with an invalid and a valid + # appointments. + + # Invalid appointment, every field is empty + appointment_data = dict() + appointment = inspector.inspect(appointment_data) + assert (type(appointment) == tuple and appointment[0] != 0) + + # Valid appointment + locator = urandom(32).hex() + start_time = BlockProcessor.get_block_count() + 5 + end_time = start_time + 20 + dispute_delta = MIN_DISPUTE_DELTA + encrypted_blob = urandom(64).hex() + cipher = SUPPORTED_CIPHERS[0] + hash_function = SUPPORTED_HASH_FUNCTIONS[0] + + appointment_data = {"locator": locator, "start_time": start_time, "end_time": end_time, + "dispute_delta": dispute_delta, "encrypted_blob": encrypted_blob, "cipher": cipher, + "hash_function": hash_function} + + appointment = inspector.inspect(appointment_data) + + assert(type(appointment) == Appointment and appointment.locator == locator and appointment.start_time == start_time + and appointment.end_time == end_time and appointment.dispute_delta == dispute_delta and + appointment.encrypted_blob.data == encrypted_blob and appointment.cipher == cipher and + appointment.hash_function == hash_function) + + +logging.getLogger().disabled = True + +test_check_locator() +test_check_start_time() +test_check_end_time() +test_check_delta() +test_check_blob() +test_check_cipher() +test_check_hash_function() +test_inspect() From d2a07d651965331714d1513be1ad09aa410ea90e Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 7 Oct 2019 15:25:40 +0100 Subject: [PATCH 14/82] More clean up --- pisa/api.py | 3 +-- pisa/carrier.py | 4 ++-- pisa/encrypted_blob.py | 15 ++++++++------- pisa/utils/zmq_subscriber.py | 1 + 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/pisa/api.py b/pisa/api.py index 660e5f0..2bbe448 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,11 +1,10 @@ import json from flask import Flask, request, Response, abort, jsonify -from pisa import HOST, PORT, logging, bitcoin_cli from pisa.watcher import Watcher from pisa.inspector import Inspector from pisa.appointment import Appointment - +from pisa import HOST, PORT, logging, bitcoin_cli # ToDo: #5-add-async-to-api app = Flask(__name__) diff --git a/pisa/carrier.py b/pisa/carrier.py index 0526f5c..eb0319d 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -1,7 +1,7 @@ +from pisa.rpc_errors import * +from pisa import logging, bitcoin_cli from pisa.utils.auth_proxy import JSONRPCException from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION -from pisa import logging, bitcoin_cli -from pisa.rpc_errors import * class Carrier: diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index 5c1b78f..7e7d2bb 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -2,12 +2,14 @@ from hashlib import sha256 from binascii import unhexlify, hexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM +from pisa import logging + class EncryptedBlob: def __init__(self, data): self.data = data - def decrypt(self, key, debug, logging): + def decrypt(self, key): # master_key = H(tx_id | tx_id) master_key = sha256(key + key).digest() @@ -15,12 +17,11 @@ class EncryptedBlob: sk = master_key[:16] nonce = master_key[16:] - if debug: - logging.info("[Watcher] creating new blob") - logging.info("[Watcher] master key: {}".format(hexlify(master_key).decode())) - logging.info("[Watcher] sk: {}".format(hexlify(sk).decode())) - logging.info("[Watcher] nonce: {}".format(hexlify(nonce).decode())) - logging.info("[Watcher] encrypted_blob: {}".format(self.data)) + logging.info("[Watcher] creating new blob") + logging.info("[Watcher] master key: {}".format(hexlify(master_key).decode())) + logging.info("[Watcher] sk: {}".format(hexlify(sk).decode())) + logging.info("[Watcher] nonce: {}".format(hexlify(nonce).decode())) + logging.info("[Watcher] encrypted_blob: {}".format(self.data)) # Decrypt aesgcm = AESGCM(sk) diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index 9ff9043..75e175d 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -1,5 +1,6 @@ import zmq import binascii + from pisa import logging from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT From 76f0b1934a81d30ddf1109d7c26cbb8eb3227ad7 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 7 Oct 2019 15:59:52 +0100 Subject: [PATCH 15/82] Client-side clean up --- apps/cli/__init__.py | 8 +++++ apps/cli/blob.py | 14 ++++---- apps/cli/pisa-cli.py | 78 ++++++++++++++++++-------------------------- 3 files changed, 47 insertions(+), 53 deletions(-) diff --git a/apps/cli/__init__.py b/apps/cli/__init__.py index aee318c..40e495c 100644 --- a/apps/cli/__init__.py +++ b/apps/cli/__init__.py @@ -1,3 +1,5 @@ +import logging + # PISA-SERVER DEFAULT_PISA_API_SERVER = 'btc.pisa.watch' DEFAULT_PISA_API_PORT = 9814 @@ -8,3 +10,9 @@ CLIENT_LOG_FILE = 'pisa.log' # CRYPTO SUPPORTED_HASH_FUNCTIONS = ["SHA256"] SUPPORTED_CIPHERS = ["AES-GCM-128"] + +# Configure logging +logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[ + logging.FileHandler(CLIENT_LOG_FILE), + logging.StreamHandler() +]) diff --git a/apps/cli/blob.py b/apps/cli/blob.py index ac7dfa3..9911011 100644 --- a/apps/cli/blob.py +++ b/apps/cli/blob.py @@ -2,6 +2,7 @@ from hashlib import sha256 from binascii import hexlify, unhexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM +from apps.cli import logging from apps.cli import SUPPORTED_HASH_FUNCTIONS, SUPPORTED_CIPHERS @@ -21,7 +22,7 @@ class Blob: raise Exception("Cipher not supported ({}). Supported ciphers: {}".format(self.hash_function, SUPPORTED_CIPHERS)) - def encrypt(self, tx_id, debug, logging): + def encrypt(self, tx_id): # Transaction to be encrypted # FIXME: The blob data should contain more things that just the transaction. Leaving like this for now. tx = unhexlify(self.data) @@ -39,11 +40,10 @@ class Blob: encrypted_blob = aesgcm.encrypt(nonce=nonce, data=tx, associated_data=None) encrypted_blob = hexlify(encrypted_blob).decode() - if debug: - logging.info("[Client] creating new blob") - logging.info("[Client] master key: {}".format(hexlify(master_key).decode())) - logging.info("[Client] sk: {}".format(hexlify(sk).decode())) - logging.info("[Client] nonce: {}".format(hexlify(nonce).decode())) - logging.info("[Client] encrypted_blob: {}".format(encrypted_blob)) + logging.info("[Client] creating new blob") + logging.info("[Client] master key: {}".format(hexlify(master_key).decode())) + logging.info("[Client] sk: {}".format(hexlify(sk).decode())) + logging.info("[Client] nonce: {}".format(hexlify(nonce).decode())) + logging.info("[Client] encrypted_blob: {}".format(encrypted_blob)) return encrypted_blob diff --git a/apps/cli/pisa-cli.py b/apps/cli/pisa-cli.py index 3252f79..6327214 100644 --- a/apps/cli/pisa-cli.py +++ b/apps/cli/pisa-cli.py @@ -12,14 +12,7 @@ from requests import ConnectTimeout, ConnectionError from apps.cli.blob import Blob from apps.cli.help import help_add_appointment, help_get_appointment -from apps.cli import DEFAULT_PISA_API_SERVER, DEFAULT_PISA_API_PORT, CLIENT_LOG_FILE - - -def show_message(message, debug, logging): - if debug: - logging.error('[Client] ' + message[0].lower() + message[1:]) - else: - sys.exit(message) +from apps.cli import DEFAULT_PISA_API_SERVER, DEFAULT_PISA_API_PORT # FIXME: TESTING ENDPOINT, WON'T BE THERE IN PRODUCTION @@ -39,7 +32,7 @@ def generate_dummy_appointment(): print('\nData stored in dummy_appointment_data.json') -def add_appointment(args, debug, logging): +def add_appointment(args): appointment_data = None use_help = "Use 'help add_appointment' for help of how to use the command." @@ -56,14 +49,14 @@ def add_appointment(args, debug, logging): if os.path.isfile(fin): appointment_data = json.load(open(fin)) else: - show_message("Can't find file " + fin, debug, logging) + logging.error("[Client] can't find file " + fin) else: - show_message("No file provided as appointment. " + use_help, debug, logging) + logging.error("[Client] no file provided as appointment. " + use_help) else: appointment_data = json.loads(arg_opt) except json.JSONDecodeError: - show_message("Non-JSON encoded data provided as appointment. " + use_help, debug, logging) + logging.error("[Client] non-JSON encoded data provided as appointment. " + use_help) if appointment_data: valid_locator = check_txid_format(appointment_data.get('tx_id')) @@ -72,28 +65,27 @@ def add_appointment(args, debug, logging): add_appointment_endpoint = "http://{}:{}".format(pisa_api_server, pisa_api_port) appointment = build_appointment(appointment_data.get('tx'), appointment_data.get('tx_id'), appointment_data.get('start_time'), appointment_data.get('end_time'), - appointment_data.get('dispute_delta'), debug, logging) + appointment_data.get('dispute_delta')) - if debug: - logging.info("[Client] sending appointment to PISA") + logging.info("[Client] sending appointment to PISA") try: r = requests.post(url=add_appointment_endpoint, json=json.dumps(appointment), timeout=5) - show_message("{} (code: {}).".format(r.text, r.status_code), debug, logging) + logging.info("[Client] {} (code: {}).".format(r.text, r.status_code)) except ConnectTimeout: - show_message("Can't connect to pisa API. Connection timeout.", debug, logging) + logging.error("[Client] can't connect to pisa API. Connection timeout.") except ConnectionError: - show_message("Can't connect to pisa API. Server cannot be reached.", debug, logging) + logging.error("[Client] can't connect to pisa API. Server cannot be reached.") else: - show_message("The provided locator is not valid.", debug, logging) + logging.error("[Client] the provided locator is not valid.") else: - show_message("No appointment data provided. " + use_help, debug, logging) + logging.error("[Client] no appointment data provided. " + use_help) -def get_appointment(args, debug, logging): +def get_appointment(args): if args: arg_opt = args.pop(0) @@ -112,18 +104,19 @@ def get_appointment(args, debug, logging): print(json.dumps(r.json(), indent=4, sort_keys=True)) except ConnectTimeout: - show_message("Can't connect to pisa API. Connection timeout.", debug, logging) + logging.error("[Client] can't connect to pisa API. Connection timeout.") except ConnectionError: - show_message("Can't connect to pisa API. Server cannot be reached.", debug, logging) + logging.error("[Client] can't connect to pisa API. Server cannot be reached.") else: - show_message("The provided locator is not valid.", debug, logging) + logging.error("[Client] the provided locator is not valid.") + else: - show_message("The provided locator is not valid.", debug, logging) + logging.error("[Client] the provided locator is not valid.") -def build_appointment(tx, tx_id, start_block, end_block, dispute_delta, debug, logging): +def build_appointment(tx, tx_id, start_block, end_block, dispute_delta): locator = sha256(unhexlify(tx_id)).hexdigest() cipher = "AES-GCM-128" @@ -131,7 +124,7 @@ def build_appointment(tx, tx_id, start_block, end_block, dispute_delta, debug, l # FIXME: The blob data should contain more things that just the transaction. Leaving like this for now. blob = Blob(tx, cipher, hash_function) - encrypted_blob = blob.encrypt(tx_id, debug, logging) + encrypted_blob = blob.encrypt(tx_id) appointment = {"locator": locator, "start_time": start_block, "end_time": end_block, "dispute_delta": dispute_delta, "encrypted_blob": encrypted_blob, "cipher": cipher, "hash_function": @@ -165,14 +158,13 @@ def show_usage(): if __name__ == '__main__': - debug = False pisa_api_server = DEFAULT_PISA_API_SERVER pisa_api_port = DEFAULT_PISA_API_PORT commands = ['add_appointment', 'get_appointment', 'help'] testing_commands = ['generate_dummy_appointment'] try: - opts, args = getopt(argv[1:], 's:p:dh', ['server', 'port', 'debug', 'help']) + opts, args = getopt(argv[1:], 's:p:h', ['server', 'port', 'help']) for opt, arg in opts: if opt in ['-s', 'server']: @@ -183,15 +175,6 @@ if __name__ == '__main__': if arg: pisa_api_port = int(arg) - if opt in ['-d', '--debug']: - debug = True - - # Configure logging - logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[ - logging.FileHandler(CLIENT_LOG_FILE), - logging.StreamHandler() - ]) - if opt in ['-h', '--help']: sys.exit(show_usage()) @@ -200,10 +183,10 @@ if __name__ == '__main__': if command in commands: if command == 'add_appointment': - add_appointment(args, debug, logging) + add_appointment(args) elif command == 'get_appointment': - get_appointment(args, debug, logging) + get_appointment(args) elif command == 'help': if args: @@ -216,8 +199,8 @@ if __name__ == '__main__': sys.exit(help_get_appointment()) else: - show_message("Unknown command. Use help to check the list of available commands.", debug, - logging) + logging.error("[Client] unknown command. Use help to check the list of available commands") + else: sys.exit(show_usage()) @@ -227,11 +210,14 @@ if __name__ == '__main__': generate_dummy_appointment() else: - show_message("Unknown command. Use help to check the list of available commands.", debug, logging) + logging.error("[Client] unknown command. Use help to check the list of available commands") + else: - show_message("No command provided. Use help to check the list of available commands.", debug, logging) + logging.error("[Client] no command provided. Use help to check the list of available commands.") except GetoptError as e: - show_message(e, debug, logging) + logging.error("[Client] {}".format(e)) + except json.JSONDecodeError as e: - show_message('Non-JSON encoded appointment passed as parameter.', debug, logging) + logging.error("[Client] non-JSON encoded appointment passed as parameter.") + From 88532f7345e8ca402f7990f1f2c6d4dbc8be2e5d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 7 Oct 2019 16:34:28 +0100 Subject: [PATCH 16/82] Changes derypt key input type and return The input types for Blob.encrypt and EncryptedBlob.decrypt were not consistent. The former was in hex whereas the later was in bytes. Format the later in hex for consistency. --- pisa/block_processor.py | 3 +-- pisa/encrypted_blob.py | 4 +++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 062d938..419e26f 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -64,8 +64,7 @@ class BlockProcessor: for uuid in locator_uuid_map[locator]: try: # ToDo: #20-test-tx-decrypting-edge-cases - justice_rawtx = appointments[uuid].encrypted_blob.decrypt(binascii.unhexlify(dispute_txid)) - justice_rawtx = binascii.hexlify(justice_rawtx).decode() + justice_rawtx = appointments[uuid].encrypted_blob.decrypt(dispute_txid) justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index 7e7d2bb..63f6e64 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -11,6 +11,7 @@ class EncryptedBlob: def decrypt(self, key): # master_key = H(tx_id | tx_id) + key = unhexlify(key) master_key = sha256(key + key).digest() # The 16 MSB of the master key will serve as the AES GCM 128 secret key. The 16 LSB will serve as the IV. @@ -27,5 +28,6 @@ class EncryptedBlob: aesgcm = AESGCM(sk) data = unhexlify(self.data.encode()) raw_tx = aesgcm.decrypt(nonce=nonce, data=data, associated_data=None) + hex_raw_tx = hexlify(raw_tx).decode('utf8') - return raw_tx + return hex_raw_tx From 10656c59559749830603807cf39bea14d3467b16 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 7 Oct 2019 16:46:05 +0100 Subject: [PATCH 17/82] Updates Blob Exception Blob was raising general Exceptions when a ValueError should be a better match --- apps/cli/blob.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/cli/blob.py b/apps/cli/blob.py index 9911011..d2bf390 100644 --- a/apps/cli/blob.py +++ b/apps/cli/blob.py @@ -14,13 +14,13 @@ class Blob: # FIXME: We only support SHA256 for now if self.hash_function.upper() not in SUPPORTED_HASH_FUNCTIONS: - raise Exception("Hash function not supported ({}). Supported Hash functions: {}" - .format(self.hash_function, SUPPORTED_HASH_FUNCTIONS)) + raise ValueError("Hash function not supported ({}). Supported Hash functions: {}" + .format(self.hash_function, SUPPORTED_HASH_FUNCTIONS)) # FIXME: We only support AES-GCM-128 for now if self.cipher.upper() not in SUPPORTED_CIPHERS: - raise Exception("Cipher not supported ({}). Supported ciphers: {}".format(self.hash_function, - SUPPORTED_CIPHERS)) + raise ValueError("Cipher not supported ({}). Supported ciphers: {}".format(self.hash_function, + SUPPORTED_CIPHERS)) def encrypt(self, tx_id): # Transaction to be encrypted From 8403f871caea8c98f7949a4a877ddfacc578fd40 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 7 Oct 2019 16:53:32 +0100 Subject: [PATCH 18/82] Adds EncryptedBlob unittests --- pisa/encrypted_blob.py | 2 ++ test/unit/test_encrypted_blob.py | 41 ++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 test/unit/test_encrypted_blob.py diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index 63f6e64..b81db3f 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -5,6 +5,8 @@ from cryptography.hazmat.primitives.ciphers.aead import AESGCM from pisa import logging +# FIXME: EncryptedBlob is assuming AESGCM. A cipher field should be part of the object and the decryption should be +# performed depending on the cipher. class EncryptedBlob: def __init__(self, data): self.data = data diff --git a/test/unit/test_encrypted_blob.py b/test/unit/test_encrypted_blob.py new file mode 100644 index 0000000..58ddd09 --- /dev/null +++ b/test/unit/test_encrypted_blob.py @@ -0,0 +1,41 @@ +from os import urandom +from cryptography.exceptions import InvalidTag + +from pisa import logging +from pisa.encrypted_blob import EncryptedBlob + + +def test_init_encrypted_blob(): + # No much to test here, basically that the object is properly created + data = urandom(64).hex() + assert (EncryptedBlob(data).data == data) + + +def test_decrypt(): + # TODO: The decryption tests are assuming the cipher is AES-GCM-128, since EncryptedBlob assumes the same. Fix this. + key = urandom(32).hex() + encrypted_data = urandom(64).hex() + encrypted_blob = EncryptedBlob(encrypted_data) + + # Trying to decrypt random data (in AES_GCM-128) should result in an InvalidTag exception + try: + encrypted_blob.decrypt(key) + assert False, "Able to decrypt random data with random key" + + except InvalidTag: + assert True + + # Valid data should run with no InvalidTag and verify + data = "6097cdf52309b1b2124efeed36bd34f46dc1c25ad23ac86f28380f746254f777" + key = 'b2e984a570f6f49bc38ace178e09147b0aa296cbb7c92eb01412f7e2d07b5659' + encrypted_data = "092e93d4a34aac4367075506f2c050ddfa1a201ee6669b65058572904dcea642aeb01ea4b57293618e8c46809dfadadc" + encrypted_blob = EncryptedBlob(encrypted_data) + + assert(encrypted_blob.decrypt(key) == data) + + +logging.getLogger().disabled = True + +test_init_encrypted_blob() +test_decrypt() + From ab1ad33e325d5d1803de3406de31d4a10504cf1d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 7 Oct 2019 17:22:19 +0100 Subject: [PATCH 19/82] Adds Blob unittests and Blob sanity checks --- apps/cli/blob.py | 10 +++++ test/unit/test_blob.py | 94 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 test/unit/test_blob.py diff --git a/apps/cli/blob.py b/apps/cli/blob.py index d2bf390..6041050 100644 --- a/apps/cli/blob.py +++ b/apps/cli/blob.py @@ -1,3 +1,4 @@ +import re from hashlib import sha256 from binascii import hexlify, unhexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM @@ -8,6 +9,9 @@ from apps.cli import SUPPORTED_HASH_FUNCTIONS, SUPPORTED_CIPHERS class Blob: def __init__(self, data, cipher, hash_function): + if type(data) is not str or re.search(r'^[0-9A-Fa-f]+$', data) is None: + raise ValueError("Non-Hex character found in txid.") + self.data = data self.cipher = cipher self.hash_function = hash_function @@ -23,6 +27,12 @@ class Blob: SUPPORTED_CIPHERS)) def encrypt(self, tx_id): + if len(tx_id) != 64: + raise ValueError("txid does not matches the expected size (32-byte / 64 hex chars).") + + elif re.search(r'^[0-9A-Fa-f]+$', tx_id) is None: + raise ValueError("Non-Hex character found in txid.") + # Transaction to be encrypted # FIXME: The blob data should contain more things that just the transaction. Leaving like this for now. tx = unhexlify(self.data) diff --git a/test/unit/test_blob.py b/test/unit/test_blob.py new file mode 100644 index 0000000..fc95450 --- /dev/null +++ b/test/unit/test_blob.py @@ -0,0 +1,94 @@ +from os import urandom + +from pisa import logging +from apps.cli.blob import Blob +from pisa.conf import SUPPORTED_CIPHERS, SUPPORTED_HASH_FUNCTIONS + + +def test_init_blob(): + data = urandom(64).hex() + + # Fixed (valid) hash function, try different valid ciphers + hash_function = SUPPORTED_HASH_FUNCTIONS[0] + for cipher in SUPPORTED_CIPHERS: + cipher_cases = [cipher, cipher.lower(), cipher.capitalize()] + + for case in cipher_cases: + blob = Blob(data, case, hash_function) + assert(blob.data == data and blob.cipher == case and blob.hash_function == hash_function) + + # Fixed (valid) cipher, try different valid hash functions + cipher = SUPPORTED_CIPHERS[0] + for hash_function in SUPPORTED_HASH_FUNCTIONS: + hash_function_cases = [hash_function, hash_function.lower(), hash_function.capitalize()] + + for case in hash_function_cases: + blob = Blob(data, cipher, case) + assert(blob.data == data and blob.cipher == cipher and blob.hash_function == case) + + # Invalid data + data = urandom(64) + cipher = SUPPORTED_CIPHERS[0] + hash_function = SUPPORTED_HASH_FUNCTIONS[0] + + try: + Blob(data, cipher, hash_function) + assert False, "Able to create blob with wrong data" + + except ValueError: + assert True + + # Invalid cipher + data = urandom(64).hex() + cipher = "A" * 10 + hash_function = SUPPORTED_HASH_FUNCTIONS[0] + + try: + Blob(data, cipher, hash_function) + assert False, "Able to create blob with wrong data" + + except ValueError: + assert True + + # Invalid hash function + data = urandom(64).hex() + cipher = SUPPORTED_CIPHERS[0] + hash_function = "A" * 10 + + try: + Blob(data, cipher, hash_function) + assert(False, "Able to create blob with wrong data") + + except ValueError: + assert True + + +def test_encrypt(): + # Valid data, valid key + data = urandom(64).hex() + blob = Blob(data, SUPPORTED_CIPHERS[0], SUPPORTED_HASH_FUNCTIONS[0]) + key = urandom(32).hex() + + encrypted_blob = blob.encrypt(key) + + # Invalid key (note that encrypt cannot be called with invalid data since that's checked when the Blob is created) + invalid_key = urandom(32) + + try: + blob.encrypt(invalid_key) + assert (False, "Able to create encrypt with invalid key") + + except ValueError: + assert True + + # Check that two encryptions of the same data have the same result + encrypted_blob2 = blob.encrypt(key) + + assert(encrypted_blob == encrypted_blob2 and id(encrypted_blob) != id(encrypted_blob2)) + + +logging.getLogger().disabled = True + +test_init_blob() +test_encrypt() + From e81ccd39a15a12dc840b3d4f95cc775de58d0293 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 8 Oct 2019 18:31:02 +0100 Subject: [PATCH 20/82] Adds API unit tests and modifies bitcoin_sim to be fixture compatible - Adds unit tests for API - Updates API to let BlockProcessor deal with block block related JSON-RPC - Fixes BlockProcessor get_potential_matches return - Makes bitcoin_sim runnable via function (instead of a main runnable script) to work with pytests fixture - <3 Fixture --- pisa/api.py | 6 +- pisa/block_processor.py | 2 + test/add_appointment_test.py | 121 -------------------- test/simulator/bitcoind_sim.py | 21 ++-- test/unit/test_api.py | 198 +++++++++++++++++++++++++++++++++ 5 files changed, 215 insertions(+), 133 deletions(-) delete mode 100644 test/add_appointment_test.py create mode 100644 test/unit/test_api.py diff --git a/pisa/api.py b/pisa/api.py index 2bbe448..44244ee 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -3,8 +3,10 @@ from flask import Flask, request, Response, abort, jsonify from pisa.watcher import Watcher from pisa.inspector import Inspector +from pisa import HOST, PORT, logging from pisa.appointment import Appointment -from pisa import HOST, PORT, logging, bitcoin_cli +from pisa.block_processor import BlockProcessor + # ToDo: #5-add-async-to-api app = Flask(__name__) @@ -108,7 +110,7 @@ def get_all_appointments(): @app.route('/get_block_count', methods=['GET']) def get_block_count(): - return jsonify({"block_count": bitcoin_cli.getblockcount()}) + return jsonify({"block_count": BlockProcessor.get_block_count()}) def start_api(): diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 419e26f..d426bda 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -56,6 +56,8 @@ class BlockProcessor: else: logging.info("[BlockProcessor] no potential matches found") + return potential_matches + @staticmethod def get_matches(potential_matches, locator_uuid_map, appointments): matches = [] diff --git a/test/add_appointment_test.py b/test/add_appointment_test.py deleted file mode 100644 index 1608779..0000000 --- a/test/add_appointment_test.py +++ /dev/null @@ -1,121 +0,0 @@ -import os -import json -import time -import requests -from copy import deepcopy -from hashlib import sha256 -from binascii import unhexlify - -from pisa import HOST, PORT -from apps.cli.blob import Blob -from pisa.utils.auth_proxy import AuthServiceProxy -from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT - -PISA_API = "http://{}:{}".format(HOST, PORT) - - -def generate_dummy_appointment(dispute_txid): - r = requests.get(url=PISA_API + '/get_block_count', timeout=5) - - current_height = r.json().get("block_count") - - dummy_appointment_data = {"tx": os.urandom(32).hex(), "tx_id": dispute_txid, "start_time": current_height + 5, - "end_time": current_height + 10, "dispute_delta": 20} - - cipher = "AES-GCM-128" - hash_function = "SHA256" - - locator = sha256(unhexlify(dummy_appointment_data.get("tx_id"))).hexdigest() - blob = Blob(dummy_appointment_data.get("tx"), cipher, hash_function) - - encrypted_blob = blob.encrypt((dummy_appointment_data.get("tx_id")), debug=False, logging=False) - - appointment = {"locator": locator, "start_time": dummy_appointment_data.get("start_time"), - "end_time": dummy_appointment_data.get("end_time"), - "dispute_delta": dummy_appointment_data.get("dispute_delta"), - "encrypted_blob": encrypted_blob, "cipher": cipher, "hash_function": hash_function} - - return appointment - - -def test_add_appointment(appointment=None): - if not appointment: - dispute_txid = os.urandom(32).hex() - appointment = generate_dummy_appointment(dispute_txid) - - print("Sending appointment (locator: {}) to PISA".format(appointment.get("locator"))) - r = requests.post(url=PISA_API, json=json.dumps(appointment), timeout=5) - - assert (r.status_code == 200 and r.reason == 'OK') - print(r.content.decode()) - - print("Requesting it back from PISA") - r = requests.get(url=PISA_API + "/get_appointment?locator=" + appointment["locator"]) - - assert (r.status_code == 200 and r.reason == 'OK') - - received_appointments = json.loads(r.content) - - # Take the status out and leave the received appointments ready to compare - appointment_status = [appointment.pop("status") for appointment in received_appointments] - - # Check that the appointment is within the received appoints - assert (appointment in received_appointments) - - # Check that all the appointments are being watched - assert (all([status == "being_watched" for status in appointment_status])) - - -def test_same_locator_multiple_appointments(): - dispute_txid = os.urandom(32).hex() - appointment = generate_dummy_appointment(dispute_txid) - - # Send it once - test_add_appointment(appointment) - time.sleep(0.5) - - # Try again with the same data - print("Sending it again") - test_add_appointment(appointment) - time.sleep(0.5) - - # Try again with the same data but increasing the end time - print("Sending once more") - dup_appointment = deepcopy(appointment) - dup_appointment["end_time"] += 1 - test_add_appointment(dup_appointment) - - print("Sleeping 5 sec") - time.sleep(5) - - bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) - - print("Triggering PISA with dispute tx") - bitcoin_cli.sendrawtransaction(dispute_txid) - - print("Sleeping 10 sec (waiting for a new block)") - time.sleep(10) - - print("Getting all appointments") - r = requests.get(url=PISA_API + "/get_all_appointments") - - assert (r.status_code == 200 and r.reason == 'OK') - - received_appointments = json.loads(r.content) - - # Make sure there is not pending instance of the locator in the watcher - watcher_locators = [appointment["locator"] for appointment in received_appointments["watcher_appointments"]] - assert(appointment["locator"] not in watcher_locators) - - # Make sure all the appointments went trough - target_jobs = [v for k, v in received_appointments["responder_jobs"].items() if v["locator"] == - appointment["locator"]] - - assert (len(target_jobs) == 3) - - -if __name__ == '__main__': - - test_same_locator_multiple_appointments() - - print("All good!") diff --git a/test/simulator/bitcoind_sim.py b/test/simulator/bitcoind_sim.py index 1512c1f..358968a 100644 --- a/test/simulator/bitcoind_sim.py +++ b/test/simulator/bitcoind_sim.py @@ -15,6 +15,15 @@ app = Flask(__name__) HOST = 'localhost' PORT = '18443' +mining_simulator = ZMQPublisher(topic=b'hashblock', feed_protocol=FEED_PROTOCOL, feed_addr=FEED_ADDR, + feed_port=FEED_PORT) + +mempool = [] +mined_transactions = {} +blocks = {} +blockchain = [] +TIME_BETWEEN_BLOCKS = 10 + @app.route('/', methods=['POST']) def process_request(): @@ -193,18 +202,10 @@ def simulate_mining(): print("New block mined: {}".format(block_hash)) print("\tTransactions: {}".format(txs_to_mine)) - time.sleep(10) + time.sleep(TIME_BETWEEN_BLOCKS) -if __name__ == '__main__': - mining_simulator = ZMQPublisher(topic=b'hashblock', feed_protocol=FEED_PROTOCOL, feed_addr=FEED_ADDR, - feed_port=FEED_PORT) - - mempool = [] - mined_transactions = {} - blocks = {} - blockchain = [] - +def run_simulator(): mining_thread = Thread(target=simulate_mining) mining_thread.start() diff --git a/test/unit/test_api.py b/test/unit/test_api.py new file mode 100644 index 0000000..445d128 --- /dev/null +++ b/test/unit/test_api.py @@ -0,0 +1,198 @@ +import os +import json +import pytest +import time +import requests +from hashlib import sha256 +from threading import Thread +from binascii import unhexlify + +from apps.cli.blob import Blob +from pisa.api import start_api +from pisa import HOST, PORT, logging +from pisa.utils.auth_proxy import AuthServiceProxy +from test.simulator.bitcoind_sim import run_simulator, TIME_BETWEEN_BLOCKS +from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MAX_APPOINTMENTS + +logging.getLogger().disabled = True +PISA_API = "http://{}:{}".format(HOST, PORT) +MULTIPLE_APPOINTMENTS = 50 + + +def generate_dummy_appointment(dispute_txid): + r = requests.get(url=PISA_API + '/get_block_count', timeout=5) + + current_height = r.json().get("block_count") + + dummy_appointment_data = {"tx": os.urandom(32).hex(), "tx_id": dispute_txid, "start_time": current_height + 5, + "end_time": current_height + 30, "dispute_delta": 20} + + cipher = "AES-GCM-128" + hash_function = "SHA256" + + locator = sha256(unhexlify(dummy_appointment_data.get("tx_id"))).hexdigest() + blob = Blob(dummy_appointment_data.get("tx"), cipher, hash_function) + + encrypted_blob = blob.encrypt((dummy_appointment_data.get("tx_id"))) + + appointment = {"locator": locator, "start_time": dummy_appointment_data.get("start_time"), + "end_time": dummy_appointment_data.get("end_time"), + "dispute_delta": dummy_appointment_data.get("dispute_delta"), + "encrypted_blob": encrypted_blob, "cipher": cipher, "hash_function": hash_function} + + return appointment + + +@pytest.fixture(autouse=True) +def run_api(): + api_thread = Thread(target=start_api) + api_thread.daemon = True + api_thread.start() + + # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) + time.sleep(0.1) + + +@pytest.fixture(autouse=True) +def run_bitcoind(): + bitcoind_thread = Thread(target=run_simulator) + bitcoind_thread.daemon = True + bitcoind_thread.start() + + # It takes a little bit of time to start the simulator (otherwise the requests are sent too early and they fail) + time.sleep(0.1) + + +@pytest.fixture +def new_appointment(dispute_txid=None): + appointment = create_appointment(dispute_txid) + + return appointment + + +def create_appointment(dispute_txid=None): + if dispute_txid is None: + dispute_txid = os.urandom(32).hex() + + appointment = generate_dummy_appointment(dispute_txid) + + return appointment + + +def add_appointment(appointment): + r = requests.post(url=PISA_API, json=json.dumps(appointment), timeout=5) + + return r + + +def test_add_appointment(new_appointment): + # Properly formatted appointment + r = add_appointment(new_appointment) + assert (r.status_code == 200) + + # Incorrect appointment + new_appointment["dispute_delta"] = 0 + r = add_appointment(new_appointment) + assert (r.status_code == 400) + + +def test_request_appointment(new_appointment): + # First we need to add an appointment + r = add_appointment(new_appointment) + assert (r.status_code == 200) + + # Next we can request it + r = requests.get(url=PISA_API + "/get_appointment?locator=" + new_appointment["locator"]) + assert (r.status_code == 200) + + # Each locator may point to multiple appointments, check them all + received_appointments = json.loads(r.content) + + # Take the status out and leave the received appointments ready to compare + appointment_status = [appointment.pop("status") for appointment in received_appointments] + + # Check that the appointment is within the received appoints + assert (new_appointment in received_appointments) + + # Check that all the appointments are being watched + assert (all([status == "being_watched" for status in appointment_status])) + + +def test_add_appointment_multiple_times(new_appointment, n=MULTIPLE_APPOINTMENTS): + # Multiple appointments with the same locator should be valid + # TODO: #34-store-identical-appointments + for _ in range(n): + r = add_appointment(new_appointment) + assert (r.status_code == 200) + + +def test_request_multiple_appointments_same_locator(new_appointment, n=MULTIPLE_APPOINTMENTS): + for _ in range(n): + r = add_appointment(new_appointment) + assert (r.status_code == 200) + + test_request_appointment(new_appointment) + + +def test_add_too_many_appointment(new_appointment): + for _ in range(MAX_APPOINTMENTS): + r = add_appointment(new_appointment) + assert (r.status_code == 200) + + r = add_appointment(new_appointment) + assert (r.status_code == 503) + + +def test_get_all_appointments_watcher(n=MULTIPLE_APPOINTMENTS): + appointments = [create_appointment() for _ in range(n)] + + for appointment in appointments: + r = add_appointment(appointment) + assert (r.status_code == 200 and r.reason == 'OK') + + r = requests.get(url=PISA_API + "/get_all_appointments") + assert (r.status_code == 200 and r.reason == 'OK') + + received_appointments = json.loads(r.content) + + # Make sure there all the locators re in the watcher + watcher_locators = [v["locator"] for k, v in received_appointments["watcher_appointments"].items()] + local_locators = [appointment["locator"] for appointment in appointments] + + assert(set(watcher_locators) == set(local_locators)) + assert(len(received_appointments["responder_jobs"]) == 0) + + +def test_get_all_appointments_responder(n=MAX_APPOINTMENTS): + # Create appointments send them to PISA + dispute_txids = [os.urandom(32).hex() for _ in range(n)] + appointments = [create_appointment(dispute_txid) for dispute_txid in dispute_txids] + + for appointment in appointments: + r = add_appointment(appointment) + assert (r.status_code == 200 and r.reason == 'OK') + + # Trigger all disputes + bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) + + for dispute_txid in dispute_txids: + bitcoin_cli.sendrawtransaction(dispute_txid) + + # Wait a bit for them to get confirmed + time.sleep(TIME_BETWEEN_BLOCKS) + + # Get all appointments + r = requests.get(url=PISA_API + "/get_all_appointments") + received_appointments = json.loads(r.content) + + # Make sure there is not pending locator in the watcher + responder_jobs = [v["locator"] for k, v in received_appointments["responder_jobs"].items()] + local_locators = [appointment["locator"] for appointment in appointments] + + assert (set(responder_jobs) == set(local_locators)) + assert (len(received_appointments["watcher_appointments"]) == 0) + + + + + From 9a18f759f94e64bca72a2980f035bc7ff5798b12 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 8 Oct 2019 18:56:45 +0100 Subject: [PATCH 21/82] Adds run_bitcoind fixture --- test/unit/test_inspector.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/test/unit/test_inspector.py b/test/unit/test_inspector.py index 8e777d7..1a1eb78 100644 --- a/test/unit/test_inspector.py +++ b/test/unit/test_inspector.py @@ -1,10 +1,14 @@ +import time +import pytest from os import urandom +from threading import Thread from pisa import logging from pisa.errors import * from pisa.inspector import Inspector from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor +from test.simulator.bitcoind_sim import run_simulator from pisa.conf import MIN_DISPUTE_DELTA, SUPPORTED_CIPHERS, SUPPORTED_HASH_FUNCTIONS inspector = Inspector() @@ -14,6 +18,18 @@ NO_HEX_STINGS = ["R" * 64, urandom(31).hex() + "PP", "$"*64, " "*64] WRONG_TYPES = [[], '', urandom(32).hex(), 3.2, 2.0, (), object, {}, " "*32, object()] WRONG_TYPES_NO_STR = [[], urandom(32), 3.2, 2.0, (), object, {}, object()] +logging.getLogger().disabled = True + + +@pytest.fixture(autouse=True) +def run_bitcoind(): + bitcoind_thread = Thread(target=run_simulator) + bitcoind_thread.daemon = True + bitcoind_thread.start() + + # It takes a little bit of time to start the simulator (otherwise the requests are sent too early and they fail) + time.sleep(0.1) + def test_check_locator(): # Right appointment type, size and format @@ -187,8 +203,6 @@ def test_check_hash_function(): def test_inspect(): - # Running this required bitcoind to be running (or mocked) since the block height is queried by inspect. - # At this point every single check function has been already tested, let's test inspect with an invalid and a valid # appointments. @@ -217,14 +231,3 @@ def test_inspect(): appointment.encrypted_blob.data == encrypted_blob and appointment.cipher == cipher and appointment.hash_function == hash_function) - -logging.getLogger().disabled = True - -test_check_locator() -test_check_start_time() -test_check_end_time() -test_check_delta() -test_check_blob() -test_check_cipher() -test_check_hash_function() -test_inspect() From fa5f587134c42652f2e71e9cdee5d65c29ab38d0 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 8 Oct 2019 18:57:29 +0100 Subject: [PATCH 22/82] Updates tests to be pytests friedly pytests already runs all the test_ functions, so there's no need to call them --- test/unit/test_blob.py | 7 ++----- test/unit/test_cleaner.py | 31 +++++++++++++------------------ test/unit/test_encrypted_blob.py | 6 ++---- 3 files changed, 17 insertions(+), 27 deletions(-) diff --git a/test/unit/test_blob.py b/test/unit/test_blob.py index fc95450..efd9e1a 100644 --- a/test/unit/test_blob.py +++ b/test/unit/test_blob.py @@ -4,6 +4,8 @@ from pisa import logging from apps.cli.blob import Blob from pisa.conf import SUPPORTED_CIPHERS, SUPPORTED_HASH_FUNCTIONS +logging.getLogger().disabled = True + def test_init_blob(): data = urandom(64).hex() @@ -87,8 +89,3 @@ def test_encrypt(): assert(encrypted_blob == encrypted_blob2 and id(encrypted_blob) != id(encrypted_blob2)) -logging.getLogger().disabled = True - -test_init_blob() -test_encrypt() - diff --git a/test/unit/test_cleaner.py b/test/unit/test_cleaner.py index 5308ca6..5206118 100644 --- a/test/unit/test_cleaner.py +++ b/test/unit/test_cleaner.py @@ -12,6 +12,8 @@ ITEMS = 10 MAX_ITEMS = 100 ITERATIONS = 1000 +logging.getLogger().disabled = True + def set_up_appointments(total_appointments): appointments = dict() @@ -57,30 +59,23 @@ def set_up_jobs(total_jobs): def test_delete_expired_appointment(): - appointments, locator_uuid_map = set_up_appointments(MAX_ITEMS) - expired_appointments = random.sample(list(appointments.keys()), k=ITEMS) + for _ in range(ITERATIONS): + appointments, locator_uuid_map = set_up_appointments(MAX_ITEMS) + expired_appointments = random.sample(list(appointments.keys()), k=ITEMS) - Cleaner.delete_expired_appointment(expired_appointments, appointments, locator_uuid_map) + Cleaner.delete_expired_appointment(expired_appointments, appointments, locator_uuid_map) - assert not set(expired_appointments).issubset(appointments.keys()) + assert not set(expired_appointments).issubset(appointments.keys()) def test_delete_completed_jobs(): - jobs, tx_job_map = set_up_jobs(MAX_ITEMS) - selected_jobs = random.sample(list(jobs.keys()), k=ITEMS) + for _ in range(ITERATIONS): + jobs, tx_job_map = set_up_jobs(MAX_ITEMS) + selected_jobs = random.sample(list(jobs.keys()), k=ITEMS) - completed_jobs = [(job, 6) for job in selected_jobs] + completed_jobs = [(job, 6) for job in selected_jobs] - Cleaner.delete_completed_jobs(jobs, tx_job_map, completed_jobs, 0) + Cleaner.delete_completed_jobs(jobs, tx_job_map, completed_jobs, 0) - assert not set(completed_jobs).issubset(jobs.keys()) - - -logging.getLogger().disabled = True - -for _ in range(ITERATIONS): - test_delete_expired_appointment() - -for _ in range(ITERATIONS): - test_delete_completed_jobs() + assert not set(completed_jobs).issubset(jobs.keys()) diff --git a/test/unit/test_encrypted_blob.py b/test/unit/test_encrypted_blob.py index 58ddd09..096c316 100644 --- a/test/unit/test_encrypted_blob.py +++ b/test/unit/test_encrypted_blob.py @@ -4,6 +4,8 @@ from cryptography.exceptions import InvalidTag from pisa import logging from pisa.encrypted_blob import EncryptedBlob +logging.getLogger().disabled = True + def test_init_encrypted_blob(): # No much to test here, basically that the object is properly created @@ -34,8 +36,4 @@ def test_decrypt(): assert(encrypted_blob.decrypt(key) == data) -logging.getLogger().disabled = True - -test_init_encrypted_blob() -test_decrypt() From e7530a53dc27457332a6f570291f41ac2915f83d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 9 Oct 2019 14:01:43 +0100 Subject: [PATCH 23/82] Fixes unconsistent test test_get_all_appointments_responder was failing unconsistently. It was due to how the api fixture was set up. Fix: - Sets up bitcoind and api fixtures to run session-wise (so they are not re-initialized for every test - Updates tests accordingly (e.g. reduces the number of MULTIPLE_REQUESTS so it does not cap) - Keeps track of all sent appointments so the test_request functionds don't need to create additional ones --- test/unit/test_api.py | 43 +++++++++++++++++++------------------------ 1 file changed, 19 insertions(+), 24 deletions(-) diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 445d128..23e6c4e 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -16,7 +16,10 @@ from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, logging.getLogger().disabled = True PISA_API = "http://{}:{}".format(HOST, PORT) -MULTIPLE_APPOINTMENTS = 50 +MULTIPLE_APPOINTMENTS = 10 + +appointments = [] +locator_dispute_txid_map = {} def generate_dummy_appointment(dispute_txid): @@ -43,7 +46,7 @@ def generate_dummy_appointment(dispute_txid): return appointment -@pytest.fixture(autouse=True) +@pytest.fixture(autouse=True, scope='session') def run_api(): api_thread = Thread(target=start_api) api_thread.daemon = True @@ -53,14 +56,14 @@ def run_api(): time.sleep(0.1) -@pytest.fixture(autouse=True) +@pytest.fixture(autouse=True, scope='session') def run_bitcoind(): bitcoind_thread = Thread(target=run_simulator) bitcoind_thread.daemon = True bitcoind_thread.start() - # It takes a little bit of time to start the simulator (otherwise the requests are sent too early and they fail) - time.sleep(0.1) + # # It takes a little bit of time to start the simulator (otherwise the requests are sent too early and they fail) + # time.sleep(0.1) @pytest.fixture @@ -75,6 +78,7 @@ def create_appointment(dispute_txid=None): dispute_txid = os.urandom(32).hex() appointment = generate_dummy_appointment(dispute_txid) + locator_dispute_txid_map[appointment["locator"]] = dispute_txid return appointment @@ -82,6 +86,9 @@ def create_appointment(dispute_txid=None): def add_appointment(appointment): r = requests.post(url=PISA_API, json=json.dumps(appointment), timeout=5) + if r.status_code == 200: + appointments.append(appointment) + return r @@ -135,7 +142,7 @@ def test_request_multiple_appointments_same_locator(new_appointment, n=MULTIPLE_ def test_add_too_many_appointment(new_appointment): - for _ in range(MAX_APPOINTMENTS): + for _ in range(MAX_APPOINTMENTS-len(appointments)): r = add_appointment(new_appointment) assert (r.status_code == 200) @@ -143,13 +150,7 @@ def test_add_too_many_appointment(new_appointment): assert (r.status_code == 503) -def test_get_all_appointments_watcher(n=MULTIPLE_APPOINTMENTS): - appointments = [create_appointment() for _ in range(n)] - - for appointment in appointments: - r = add_appointment(appointment) - assert (r.status_code == 200 and r.reason == 'OK') - +def test_get_all_appointments_watcher(): r = requests.get(url=PISA_API + "/get_all_appointments") assert (r.status_code == 200 and r.reason == 'OK') @@ -163,20 +164,14 @@ def test_get_all_appointments_watcher(n=MULTIPLE_APPOINTMENTS): assert(len(received_appointments["responder_jobs"]) == 0) -def test_get_all_appointments_responder(n=MAX_APPOINTMENTS): - # Create appointments send them to PISA - dispute_txids = [os.urandom(32).hex() for _ in range(n)] - appointments = [create_appointment(dispute_txid) for dispute_txid in dispute_txids] - - for appointment in appointments: - r = add_appointment(appointment) - assert (r.status_code == 200 and r.reason == 'OK') - +def test_get_all_appointments_responder(): # Trigger all disputes bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) - for dispute_txid in dispute_txids: - bitcoin_cli.sendrawtransaction(dispute_txid) + locators = [appointment["locator"] for appointment in appointments] + for locator, dispute_txid in locator_dispute_txid_map.items(): + if locator in locators: + bitcoin_cli.sendrawtransaction(dispute_txid) # Wait a bit for them to get confirmed time.sleep(TIME_BETWEEN_BLOCKS) From f59c621cc05bb36494e048cf2f933ab5917b2692 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 9 Oct 2019 14:05:15 +0100 Subject: [PATCH 24/82] Defines bitcoind fixture session-wise --- test/unit/test_inspector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/test_inspector.py b/test/unit/test_inspector.py index 1a1eb78..3aa68f6 100644 --- a/test/unit/test_inspector.py +++ b/test/unit/test_inspector.py @@ -21,7 +21,7 @@ WRONG_TYPES_NO_STR = [[], urandom(32), 3.2, 2.0, (), object, {}, object()] logging.getLogger().disabled = True -@pytest.fixture(autouse=True) +@pytest.fixture(autouse=True, scope='session') def run_bitcoind(): bitcoind_thread = Thread(target=run_simulator) bitcoind_thread.daemon = True From ecadbc62ef8940a67d2ed3130c2f6356eb1022ad Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 9 Oct 2019 15:19:03 +0100 Subject: [PATCH 25/82] Adds _eq_ method to EncryptedBlob --- pisa/encrypted_blob.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index b81db3f..ffc3e38 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -5,12 +5,15 @@ from cryptography.hazmat.primitives.ciphers.aead import AESGCM from pisa import logging -# FIXME: EncryptedBlob is assuming AESGCM. A cipher field should be part of the object and the decryption should be +# FIXME: EncryptedBlob is assuming AES-128-GCM. A cipher field should be part of the object and the decryption should be # performed depending on the cipher. class EncryptedBlob: def __init__(self, data): self.data = data + def __eq__(self, other): + return isinstance(other, EncryptedBlob) and self.data == other.data + def decrypt(self, key): # master_key = H(tx_id | tx_id) key = unhexlify(key) From ab72c7103902081a332c09121a5ea671f27e874d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 9 Oct 2019 15:19:33 +0100 Subject: [PATCH 26/82] Adds appointment unit test --- pisa/appointment.py | 1 + test/unit/test_appointment.py | 47 +++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 test/unit/test_appointment.py diff --git a/pisa/appointment.py b/pisa/appointment.py index a4d5718..ac1d3a2 100644 --- a/pisa/appointment.py +++ b/pisa/appointment.py @@ -3,6 +3,7 @@ from pisa.encrypted_blob import EncryptedBlob # Basic appointment structure class Appointment: + # TODO: 35-appointment-checks def __init__(self, locator, start_time, end_time, dispute_delta, encrypted_blob, cipher, hash_function): self.locator = locator self.start_time = start_time # ToDo: #4-standardize-appointment-fields diff --git a/test/unit/test_appointment.py b/test/unit/test_appointment.py new file mode 100644 index 0000000..0caf459 --- /dev/null +++ b/test/unit/test_appointment.py @@ -0,0 +1,47 @@ +from os import urandom +from pytest import fixture + +from pisa.appointment import Appointment +from pisa.encrypted_blob import EncryptedBlob + + +# Not much to test here, adding it for completeness + +@fixture +def appointment_data(): + locator = urandom(32).hex() + start_time = 100 + end_time = 120 + dispute_delta = 20 + encrypted_blob_data = urandom(100).hex() + cipher = "AES-GCM-128" + hash_function = "SHA256" + + return locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function + + +def test_init_appointment(appointment_data): + # The appointment has no checks whatsoever, since the inspector is the one taking care or that, and the only one + # creating appointments. + # DISCUSS: whether this makes sense by design or checks should be ported from the inspector to the appointment + # 35-appointment-checks + + locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function = appointment_data + + appointment = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function) + + assert (locator == appointment.locator and start_time == appointment.start_time and end_time == appointment.end_time + and EncryptedBlob(encrypted_blob_data) == appointment.encrypted_blob and cipher == appointment.cipher + and dispute_delta == appointment.dispute_delta and hash_function == appointment.hash_function) + + +def test_to_json(appointment_data): + locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function = appointment_data + appointment = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function) + + json_appointment = appointment.to_json() + + assert (locator == json_appointment.get("locator") and start_time == json_appointment.get("start_time") + and end_time == json_appointment.get("end_time") and dispute_delta == json_appointment.get("dispute_delta") + and cipher == json_appointment.get("cipher") and hash_function == json_appointment.get("hash_function") + and encrypted_blob_data == json_appointment.get("encrypted_blob")) From e0d0818f6b5dcebd748a9f73b78ee58d524bed54 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 9 Oct 2019 15:32:56 +0100 Subject: [PATCH 27/82] Removes unused code --- test/unit/test_api.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 23e6c4e..5742c73 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -62,9 +62,6 @@ def run_bitcoind(): bitcoind_thread.daemon = True bitcoind_thread.start() - # # It takes a little bit of time to start the simulator (otherwise the requests are sent too early and they fail) - # time.sleep(0.1) - @pytest.fixture def new_appointment(dispute_txid=None): From c67e41d185ec38e07633b0e742faae7eb88ac52a Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 30 Aug 2019 12:58:05 +0200 Subject: [PATCH 28/82] Includes getbestblockhash so it can simulate db loads --- test/simulator/bitcoind_sim.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/simulator/bitcoind_sim.py b/test/simulator/bitcoind_sim.py index 358968a..47248a1 100644 --- a/test/simulator/bitcoind_sim.py +++ b/test/simulator/bitcoind_sim.py @@ -55,6 +55,8 @@ def process_request(): getblockhash: a block hash is only queried by pisad on bootstrapping to check the network bitcoind is running on. + getbestblockhash: returns the hash of the block in the tip of the chain + help: help is only used as a sample command to test if bitcoind is running when bootstrapping pisad. It will return a 200/OK with no data. """ @@ -153,6 +155,9 @@ def process_request(): response["error"] = no_param_err response["error"]["message"] = response["error"]["message"].format("integer") + elif method == "getbestblockhash": + response["result"] = blockchain[-1] + elif method == "help": pass From b69ffdc0aa66fcce6f304852ecca213c7c791132 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 9 Oct 2019 17:31:21 +0100 Subject: [PATCH 29/82] Adds some BlockProcessor unit tests. The three last methods are missing, since they seem not to belong there (check #36) --- pisa/block_processor.py | 10 +++-- test/unit/test_block_processor.py | 75 +++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 3 deletions(-) create mode 100644 test/unit/test_block_processor.py diff --git a/pisa/block_processor.py b/pisa/block_processor.py index d426bda..eb09e45 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -8,40 +8,43 @@ from pisa.utils.auth_proxy import JSONRPCException class BlockProcessor: @staticmethod def get_block(block_hash): - block = None try: block = bitcoin_cli.getblock(block_hash) except JSONRPCException as e: + block = None logging.error("[BlockProcessor] couldn't get block from bitcoind. Error code {}".format(e)) return block @staticmethod def get_best_block_hash(): - block_hash = None try: block_hash = bitcoin_cli.getbestblockhash() except JSONRPCException as e: + block_hash = None logging.error("[BlockProcessor] couldn't get block hash. Error code {}".format(e)) return block_hash @staticmethod def get_block_count(): - block_count = None try: block_count = bitcoin_cli.getblockcount() except JSONRPCException as e: + block_count = None logging.error("[BlockProcessor] couldn't get block block count. Error code {}".format(e)) return block_count + # FIXME: The following two functions does not seem to belong here. They come from the Watcher, and need to be + # separated since they will be reused by the TimeTraveller. + # DISCUSS: 36-who-should-check-appointment-trigger @staticmethod def get_potential_matches(txids, locator_uuid_map): potential_locators = {sha256(binascii.unhexlify(txid)).hexdigest(): txid for txid in txids} @@ -80,6 +83,7 @@ class BlockProcessor: return matches + # DISCUSS: This method comes from the Responder and seems like it could go back there. @staticmethod def check_confirmations(txs, unconfirmed_txs, tx_job_map, missed_confirmations): diff --git a/test/unit/test_block_processor.py b/test/unit/test_block_processor.py new file mode 100644 index 0000000..062e917 --- /dev/null +++ b/test/unit/test_block_processor.py @@ -0,0 +1,75 @@ +import pytest +from time import sleep +from os import urandom +from uuid import uuid4 +from hashlib import sha256 +from threading import Thread +from binascii import unhexlify + +from pisa.block_processor import BlockProcessor +from test.simulator.bitcoind_sim import run_simulator + +APPOINTMENT_COUNT = 100 +TEST_SET_SIZE = 200 + + +@pytest.fixture(autouse=True, scope='session') +def run_bitcoind(): + bitcoind_thread = Thread(target=run_simulator) + bitcoind_thread.daemon = True + bitcoind_thread.start() + + sleep(0.1) + + +@pytest.fixture(scope='session') +def txids(): + return [urandom(32).hex() for _ in range(APPOINTMENT_COUNT)] + + +@pytest.fixture(scope='session') +def locator_uuid_map(txids): + return {sha256(unhexlify(txid)).hexdigest(): uuid4().hex for txid in txids} + + +@pytest.fixture +def best_block_hash(): + return BlockProcessor.get_best_block_hash() + + +def test_get_best_block_hash(best_block_hash): + # As long as bitcoind is running (or mocked in this case) we should always a block hash + assert best_block_hash is not None and isinstance(best_block_hash, str) + + +def test_get_block(best_block_hash): + # Getting a block from a block hash we are aware of should return data + block = BlockProcessor.get_block(best_block_hash) + + # Checking that the received block has at least the fields we need + # FIXME: We could be more strict here, but we'll need to add those restrictions to bitcoind_sim too + assert isinstance(block, dict) + assert block.get('hash') == best_block_hash and 'height' in block and 'previousblockhash' in block and 'tx' in block + + +def test_get_block_count(): + block_count = BlockProcessor.get_block_count() + assert isinstance(block_count, int) and block_count >= 0 + + +def test_potential_matches(txids, locator_uuid_map): + potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map) + + # All the txids must match + assert locator_uuid_map.keys() == potential_matches.keys() + + +def test_potential_matches_random_data(locator_uuid_map): + # The likelihood of finding a potential match with random data should be negligible + txids = [urandom(32).hex() for _ in range(TEST_SET_SIZE)] + + potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map) + + # None of the txids should match + assert len(potential_matches) == 0 + From 1de226374daff48f7e05174df8ddf2570f67a4e7 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 9 Oct 2019 17:33:41 +0100 Subject: [PATCH 30/82] Minot simulator updates --- test/simulator/bitcoind_sim.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/simulator/bitcoind_sim.py b/test/simulator/bitcoind_sim.py index 47248a1..b481b43 100644 --- a/test/simulator/bitcoind_sim.py +++ b/test/simulator/bitcoind_sim.py @@ -15,14 +15,12 @@ app = Flask(__name__) HOST = 'localhost' PORT = '18443' -mining_simulator = ZMQPublisher(topic=b'hashblock', feed_protocol=FEED_PROTOCOL, feed_addr=FEED_ADDR, - feed_port=FEED_PORT) +TIME_BETWEEN_BLOCKS = 10 mempool = [] mined_transactions = {} blocks = {} blockchain = [] -TIME_BETWEEN_BLOCKS = 10 @app.route('/', methods=['POST']) @@ -185,6 +183,9 @@ def simulate_mining(): global mempool, mined_transactions, blocks, blockchain prev_block_hash = None + mining_simulator = ZMQPublisher(topic=b'hashblock', feed_protocol=FEED_PROTOCOL, feed_addr=FEED_ADDR, + feed_port=FEED_PORT) + while True: block_hash = os.urandom(32).hex() coinbase_tx_hash = os.urandom(32).hex() From 8b62ff9e566e1047b79c0f197e6cd1e2fa6e2edf Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 9 Oct 2019 17:35:11 +0100 Subject: [PATCH 31/82] Update some comments/todos/discuss --- pisa/appointment.py | 2 +- pisa/responder.py | 2 +- pisa/watcher.py | 4 ---- test/unit/test_api.py | 2 +- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/pisa/appointment.py b/pisa/appointment.py index ac1d3a2..816fde6 100644 --- a/pisa/appointment.py +++ b/pisa/appointment.py @@ -3,7 +3,7 @@ from pisa.encrypted_blob import EncryptedBlob # Basic appointment structure class Appointment: - # TODO: 35-appointment-checks + # DISCUSS: 35-appointment-checks def __init__(self, locator, start_time, end_time, dispute_delta, encrypted_blob, cipher, hash_function): self.locator = locator self.start_time = start_time # ToDo: #4-standardize-appointment-fields diff --git a/pisa/responder.py b/pisa/responder.py index 7eabd9b..08f082e 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -164,7 +164,7 @@ class Responder: return completed_jobs def rebroadcast(self, jobs_to_rebroadcast): - # ToDO: #22-discuss-confirmations-before-retry + # DISCUSS: #22-discuss-confirmations-before-retry # ToDo: #23-define-behaviour-approaching-end for tx in jobs_to_rebroadcast: diff --git a/pisa/watcher.py b/pisa/watcher.py index f63e5ab..ab8da44 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -11,10 +11,6 @@ from pisa.block_processor import BlockProcessor from pisa.utils.zmq_subscriber import ZMQHandler -# WIP: MOVED BLOCKCHAIN RELATED TASKS TO BLOCK PROCESSOR IN AN AIM TO MAKE THE CODE MORE MODULAR. THIS SHOULD HELP -# WITH CODE REUSE WHEN MERGING THE DATA PERSISTENCE PART. - - class Watcher: def __init__(self, max_appointments=MAX_APPOINTMENTS): self.appointments = dict() diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 5742c73..8505bfa 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -124,7 +124,7 @@ def test_request_appointment(new_appointment): def test_add_appointment_multiple_times(new_appointment, n=MULTIPLE_APPOINTMENTS): # Multiple appointments with the same locator should be valid - # TODO: #34-store-identical-appointments + # DISCUSS: #34-store-identical-appointments for _ in range(n): r = add_appointment(new_appointment) assert (r.status_code == 200) From db635ef68814225b3112014d4d518661a7972a7b Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 9 Oct 2019 19:00:08 +0100 Subject: [PATCH 32/82] Add missing comment --- test/unit/test_block_processor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/unit/test_block_processor.py b/test/unit/test_block_processor.py index 062e917..b0a2bba 100644 --- a/test/unit/test_block_processor.py +++ b/test/unit/test_block_processor.py @@ -19,6 +19,7 @@ def run_bitcoind(): bitcoind_thread.daemon = True bitcoind_thread.start() + # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) sleep(0.1) From 1b229cb44153d70ae07b8c578b10f8f7b2208ae9 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Tue, 8 Oct 2019 14:35:30 +0700 Subject: [PATCH 33/82] Added *.pyc and .cache/ to .gitignore --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index dfdbf33..6560657 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ bitcoin.conf* apps/cli/*.json appointments/ test.py +*.pyc +.cache From ed0cb4f63291b7b23c5a0bf543095fc9854be9d3 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Tue, 8 Oct 2019 16:32:09 +0700 Subject: [PATCH 34/82] Changed log format to JSON; fixed missing return value in get_potential_matches --- pisa/__init__.py | 18 ++++++++++++++++-- pisa/api.py | 5 +++-- pisa/block_processor.py | 22 +++++++++++----------- pisa/carrier.py | 14 +++++++------- pisa/cleaner.py | 11 +++++------ pisa/encrypted_blob.py | 14 +++++++------- pisa/inspector.py | 23 ++++++++--------------- pisa/pisad.py | 8 ++++---- pisa/responder.py | 31 ++++++++++++++----------------- pisa/utils/zmq_subscriber.py | 5 ++--- pisa/watcher.py | 21 ++++++++++----------- 11 files changed, 87 insertions(+), 85 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index a279a5c..7fa2901 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -1,14 +1,28 @@ import logging +import json +import time from pisa.utils.auth_proxy import AuthServiceProxy import pisa.conf as conf - HOST = 'localhost' PORT = 9814 +class StructuredMessage(object): + def __init__(self, message, **kwargs): + self.message = message + self.time = time.asctime() + self.kwargs = kwargs + + def __str__(self): + return json.dumps({ **self.kwargs, "message": self.message, "time": self.time }) + +M = StructuredMessage # to improve readability + +logging.basicConfig(level=logging.INFO, format='%(message)s') + # Configure logging -logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[ +logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ logging.FileHandler(conf.SERVER_LOG_FILE), logging.StreamHandler() ]) diff --git a/pisa/api.py b/pisa/api.py index 44244ee..df98cd0 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,6 +1,7 @@ import json from flask import Flask, request, Response, abort, jsonify +from pisa import HOST, PORT, logging, bitcoin_cli, M from pisa.watcher import Watcher from pisa.inspector import Inspector from pisa import HOST, PORT, logging @@ -20,7 +21,7 @@ def add_appointment(): remote_addr = request.environ.get('REMOTE_ADDR') remote_port = request.environ.get('REMOTE_PORT') - logging.info('[API] connection accepted from {}:{}'.format(remote_addr, remote_port)) + logging.info(M('[API] connection accepted', from_addr_port='{}:{}'.format(remote_addr, remote_port))) # Check content type once if properly defined request_data = json.loads(request.get_json()) @@ -46,7 +47,7 @@ def add_appointment(): rcode = HTTP_BAD_REQUEST response = "appointment rejected. Request does not match the standard" - logging.info('[API] sending response and disconnecting: {} --> {}:{}'.format(response, remote_addr, remote_port)) + logging.info(M('[API] sending response and disconnecting', from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response)) return Response(response, status=rcode, mimetype='text/plain') diff --git a/pisa/block_processor.py b/pisa/block_processor.py index eb09e45..4343bb9 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -1,7 +1,7 @@ import binascii from hashlib import sha256 -from pisa import logging, bitcoin_cli +from pisa import logging, bitcoin_cli, M from pisa.utils.auth_proxy import JSONRPCException @@ -14,7 +14,7 @@ class BlockProcessor: except JSONRPCException as e: block = None - logging.error("[BlockProcessor] couldn't get block from bitcoind. Error code {}".format(e)) + logging.error(M("[BlockProcessor] couldn't get block from bitcoind.", error_code=e)) return block @@ -26,7 +26,7 @@ class BlockProcessor: except JSONRPCException as e: block_hash = None - logging.error("[BlockProcessor] couldn't get block hash. Error code {}".format(e)) + logging.error(M("[BlockProcessor] couldn't get block hash.", error_code=e)) return block_hash @@ -54,10 +54,12 @@ class BlockProcessor: potential_matches = {locator: potential_locators[locator] for locator in intersection} if len(potential_matches) > 0: - logging.info("[BlockProcessor] list of potential matches: {}".format(potential_matches)) + logging.info(M("[BlockProcessor] list of potential matches", potential_matches=potential_matches)) else: - logging.info("[BlockProcessor] no potential matches found") + logging.info(M("[BlockProcessor] no potential matches found")) + + return potential_matches return potential_matches @@ -73,13 +75,12 @@ class BlockProcessor: justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - logging.info("[BlockProcessor] match found for locator {} (uuid: {}): {}".format( - locator, uuid, justice_txid)) + logging.info(M("[BlockProcessor] match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid)) except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple # for the POC - logging.error("[BlockProcessor] can't build transaction from decoded data. Error code {}".format(e)) + logging.error(M("[BlockProcessor] can't build transaction from decoded data.", error_code=e)) return matches @@ -91,7 +92,7 @@ class BlockProcessor: if tx in tx_job_map and tx in unconfirmed_txs: unconfirmed_txs.remove(tx) - logging.info("[Responder] confirmation received for tx {}".format(tx)) + logging.info(M("[Responder] confirmation received for transaction", tx=tx)) elif tx in unconfirmed_txs: if tx in missed_confirmations: @@ -100,8 +101,7 @@ class BlockProcessor: else: missed_confirmations[tx] = 1 - logging.info("[Responder] tx {} missed a confirmation (total missed: {})" - .format(tx, missed_confirmations[tx])) + logging.info(M("[Responder] transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx])) return unconfirmed_txs, missed_confirmations diff --git a/pisa/carrier.py b/pisa/carrier.py index eb0319d..946af01 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -1,5 +1,5 @@ from pisa.rpc_errors import * -from pisa import logging, bitcoin_cli +from pisa import logging, bitcoin_cli, M from pisa.utils.auth_proxy import JSONRPCException from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION @@ -13,7 +13,7 @@ class Carrier: def send_transaction(self, rawtx, txid): try: - logging.info("[Carrier] pushing transaction to the network (txid: {})".format(rawtx)) + logging.info(M("[Carrier] pushing transaction to the network", txid=txid, rawtx=rawtx)) bitcoin_cli.sendrawtransaction(rawtx) receipt = self.Receipt(delivered=True) @@ -32,7 +32,7 @@ class Carrier: receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: - logging.info("[Carrier] {} is already in the blockchain. Getting confirmation count".format(txid)) + logging.info(M("[Carrier] Transaction is already in the blockchain. Getting confirmation count", txid=txid)) # If the transaction is already in the chain, we get the number of confirmations and watch the job # until the end of the appointment @@ -43,13 +43,13 @@ class Carrier: receipt = self.Receipt(delivered=True, confirmations=confirmations) else: - # There's a really unlike edge case where a transaction can be reorged between receiving the + # There's a really unlikely edge case where a transaction can be reorged between receiving the # notification and querying the data. In such a case we just resend self.send_transaction(rawtx, txid) else: # If something else happens (unlikely but possible) log it so we can treat it in future releases - logging.error("[Responder] JSONRPCException. Error {}".format(e)) + logging.error(M("[Responder] JSONRPCException.", error_code=e)) receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) return receipt @@ -66,12 +66,12 @@ class Carrier: # reorged while we were querying bitcoind to get the confirmation count. In such a case we just # restart the job if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - logging.info("[Carrier] transaction {} got reorged before obtaining information".format(txid)) + logging.info(M("[Carrier] transaction got reorged before obtaining information", txid=txid)) # TODO: Check RPC methods to see possible returns and avoid general else # else: # # If something else happens (unlikely but possible) log it so we can treat it in future releases - # logging.error("[Responder] JSONRPCException. Error {}".format(e)) + # logging.error(M("[Responder] JSONRPCException.", error_code=e) return tx_info diff --git a/pisa/cleaner.py b/pisa/cleaner.py index b7c2947..68ad5e2 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -1,4 +1,4 @@ -from pisa import logging +from pisa import logging, M # Dictionaries in Python are "passed-by-reference", so no return is needed for the Cleaner" # https://docs.python.org/3/faq/programming.html#how-do-i-write-a-function-with-output-parameters-call-by-reference @@ -18,14 +18,13 @@ class Cleaner: else: locator_uuid_map[locator].remove(uuid) - logging.info("[Cleaner] end time reached with no match! Deleting appointment {} (uuid: {})".format(locator, - uuid)) + logging.info(M("[Cleaner] end time reached with no match! Deleting appointment.", locator=locator, uuid=uuid)) @staticmethod def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height): for uuid, confirmations in completed_jobs: - logging.info("[Cleaner] job completed (uuid = {}). Appointment ended at block {} after {} confirmations" - .format(uuid, height, confirmations)) + logging.info(M("[Cleaner] job completed. Appointment ended after reaching enough confirmations.", + uuid=uuid, height=height, confirmations=confirmations)) # ToDo: #9-add-data-persistence justice_txid = jobs[uuid].justice_txid @@ -34,7 +33,7 @@ class Cleaner: if len(tx_job_map[justice_txid]) == 1: tx_job_map.pop(justice_txid) - logging.info("[Cleaner] no more jobs for justice_txid {}".format(justice_txid)) + logging.info(M("[Cleaner] no more jobs for justice transaction.", justice_txid=justice_txid)) else: tx_job_map[justice_txid].remove(uuid) diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index ffc3e38..3aeee13 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -1,8 +1,7 @@ from hashlib import sha256 from binascii import unhexlify, hexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM - -from pisa import logging +from pisa import logging, M # FIXME: EncryptedBlob is assuming AES-128-GCM. A cipher field should be part of the object and the decryption should be @@ -23,11 +22,12 @@ class EncryptedBlob: sk = master_key[:16] nonce = master_key[16:] - logging.info("[Watcher] creating new blob") - logging.info("[Watcher] master key: {}".format(hexlify(master_key).decode())) - logging.info("[Watcher] sk: {}".format(hexlify(sk).decode())) - logging.info("[Watcher] nonce: {}".format(hexlify(nonce).decode())) - logging.info("[Watcher] encrypted_blob: {}".format(self.data)) + logging.info(M("[Watcher] creating new blob.", + master_key=hexlify(master_key).decode(), + sk=hexlify(sk).decode(), + nonce=hexlify(sk).decode(), + encrypted_blob=self.data + )) # Decrypt aesgcm = AESGCM(sk) diff --git a/pisa/inspector.py b/pisa/inspector.py index ae9ca89..945afc1 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -2,7 +2,7 @@ import re from pisa import errors import pisa.conf as conf -from pisa import logging +from pisa import logging, bitcoin_cli, M from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor @@ -70,8 +70,7 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong locator format ({})".format(locator) - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -98,8 +97,7 @@ class Inspector: else: message = "start_time is too close to current height" - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -132,8 +130,7 @@ class Inspector: else: message = 'end_time is too close to current height' - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -155,8 +152,7 @@ class Inspector: message = "dispute delta too small. The dispute delta should be at least {} (current: {})".format( conf.MIN_DISPUTE_DELTA, dispute_delta) - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -178,8 +174,7 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong encrypted_blob format ({})".format(encrypted_blob) - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -200,8 +195,7 @@ class Inspector: rcode = errors.APPOINTMENT_CIPHER_NOT_SUPPORTED message = "cipher not supported: {}".format(cipher) - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -222,7 +216,6 @@ class Inspector: rcode = errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED message = "hash_function not supported {}".format(hash_function) - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message diff --git a/pisa/pisad.py b/pisa/pisad.py index 8fbce83..f7b3603 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,7 +1,7 @@ from sys import argv from getopt import getopt -from pisa import logging +from pisa import logging, M from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network @@ -19,8 +19,8 @@ if __name__ == '__main__': start_api() else: - logging.error("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " - "Shutting down") + logging.error(M("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " + "Shutting down")) else: - logging.error("[Pisad] can't connect to bitcoind. Shutting down") + logging.error(M("[Pisad] can't connect to bitcoind. Shutting down")) diff --git a/pisa/responder.py b/pisa/responder.py index 08f082e..e44ee61 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -3,9 +3,9 @@ from threading import Thread from hashlib import sha256 from binascii import unhexlify +from pisa import logging, M from pisa.cleaner import Cleaner from pisa.carrier import Carrier -from pisa import logging from pisa.tools import check_tx_in_chain from pisa.block_processor import BlockProcessor from pisa.utils.zmq_subscriber import ZMQHandler @@ -45,7 +45,7 @@ class Responder: def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=False): if self.asleep: - logging.info("[Responder] waking up!") + logging.info(M("[Responder] waking up!")) carrier = Carrier() receipt = carrier.send_transaction(justice_rawtx, justice_txid) @@ -80,8 +80,7 @@ class Responder: if confirmations == 0: self.unconfirmed_txs.append(justice_txid) - logging.info('[Responder] new job added (dispute txid = {}, justice txid = {}, appointment end = {})' - .format(dispute_txid, justice_txid, appointment_end)) + logging.info(M("[Responder] new job added.", dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end)) if self.asleep: self.asleep = False @@ -109,9 +108,7 @@ class Responder: txs = block.get('tx') height = block.get('height') - logging.info("[Responder] new block received {}".format(block_hash)) - logging.info("[Responder] prev. block hash {}".format(block.get('previousblockhash'))) - logging.info("[Responder] list of transactions: {}".format(txs)) + logging.info(M("[Responder] new block received", block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs)) # ToDo: #9-add-data-persistence # change prev_block_hash condition @@ -125,8 +122,8 @@ class Responder: self.rebroadcast(txs_to_rebroadcast) else: - logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}" - .format(prev_block_hash, block.get('previousblockhash'))) + logging.warning(M("[Responder] reorg found!", + local_prev_block_hash=prev_block_hash, remote_prev_block_hash=block.get('previousblockhash'))) self.handle_reorgs() @@ -136,7 +133,7 @@ class Responder: self.asleep = True self.zmq_subscriber.terminate = True - logging.info("[Responder] no more pending jobs, going back to sleep") + logging.info(M("[Responder] no more pending jobs, going back to sleep")) def get_txs_to_rebroadcast(self, txs): txs_to_rebroadcast = [] @@ -172,8 +169,8 @@ class Responder: self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) - logging.warning("[Responder] tx {} has missed {} confirmations. Rebroadcasting" - .format(self.jobs[uuid].justice_txid, CONFIRMATIONS_BEFORE_RETRY)) + logging.warning(M("[Responder] Transaction has missed many confirmations. Rebroadcasting.", + justice_txid=self.jobs[uuid].justice_txid, confirmations_missed=CONFIRMATIONS_BEFORE_RETRY)) # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): @@ -189,8 +186,8 @@ class Responder: # If both transactions are there, we only need to update the justice tx confirmation count if justice_in_chain: - logging.info("[Responder] updating confirmation count for {}: prev. {}, current {}".format( - job.justice_txid, job.confirmations, justice_confirmations)) + logging.info(M("[Responder] updating confirmation count for transaction.", + justice_txid=job.justice_txid, prev_count=job.confirmations, curr_count=justice_confirmations)) job.confirmations = justice_confirmations @@ -203,7 +200,7 @@ class Responder: else: # ToDo: #24-properly-handle-reorgs - # FIXME: if the dispute is not on chain (either in mempool or not there al all), we need to call the + # FIXME: if the dispute is not on chain (either in mempool or not there at all), we need to call the # reorg manager - logging.warning("[Responder] dispute and justice transaction missing. Calling the reorg manager") - logging.error("[Responder] reorg manager not yet implemented") + logging.warning(M("[Responder] dispute and justice transaction missing. Calling the reorg manager")) + logging.error(M("[Responder] reorg manager not yet implemented")) diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index 75e175d..0545d9c 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -1,7 +1,6 @@ import zmq import binascii - -from pisa import logging +from pisa import logging, M from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT @@ -30,4 +29,4 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - logging.info("[ZMQHandler-{}] new block received via ZMQ".format(self.parent, block_hash)) + logging.info(M("[ZMQHandler-{}] new block received via ZMQ".format(self.parent), block_hash=block_hash)) diff --git a/pisa/watcher.py b/pisa/watcher.py index ab8da44..a70bb43 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -2,7 +2,7 @@ from uuid import uuid4 from queue import Queue from threading import Thread -from pisa import logging +from pisa import logging, M from pisa.cleaner import Cleaner from pisa.conf import EXPIRY_DELTA from pisa.responder import Responder @@ -52,35 +52,34 @@ class Watcher: zmq_thread.start() watcher.start() - logging.info("[Watcher] waking up!") + logging.info(M("[Watcher] waking up!")) appointment_added = True - logging.info('[Watcher] new appointment accepted (locator = {})'.format(appointment.locator)) + logging.info(M("[Watcher] new appointment accepted.", locator=appointment.locator)) else: appointment_added = False - logging.info('[Watcher] maximum appointments reached, appointment rejected (locator = {})'.format( - appointment.locator)) + logging.info(M("[Watcher] maximum appointments reached, appointment rejected.", locator=appointment.locator)) return appointment_added def do_subscribe(self, block_queue): - self.zmq_subscriber = ZMQHandler(parent='Watcher') + self.zmq_subscriber = ZMQHandler(parent="Watcher") self.zmq_subscriber.handle(block_queue) def do_watch(self): while len(self.appointments) > 0: block_hash = self.block_queue.get() - logging.info("[Watcher] new block received {}".format(block_hash)) + logging.info(M("[Watcher] new block received", block_hash=block_hash)) block = BlockProcessor.get_block(block_hash) if block is not None: txids = block.get('tx') - logging.info("[Watcher] list of transactions: {}".format(txids)) + logging.info(M("[Watcher] list of transactions.", txids=txids)) expired_appointments = [uuid for uuid, appointment in self.appointments.items() if block["height"] > appointment.end_time + EXPIRY_DELTA] @@ -91,8 +90,8 @@ class Watcher: matches = BlockProcessor.get_matches(potential_matches, self.locator_uuid_map, self.appointments) for locator, uuid, dispute_txid, justice_txid, justice_rawtx in matches: - logging.info("[Watcher] notifying responder about {} and deleting appointment {} (uuid: {})" - .format(justice_txid, locator, uuid)) + logging.info(M("[Watcher] notifying responder and deleting appointment.", + justice_txid=justice_txid, locator=locator, uuid=uuid)) self.responder.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, self.appointments[uuid].end_time) @@ -113,4 +112,4 @@ class Watcher: self.asleep = True self.zmq_subscriber.terminate = True - logging.error("[Watcher] no more pending appointments, going back to sleep") + logging.error(M("[Watcher] no more pending appointments, going back to sleep")) From 2a5dd48950914daa950f26a07cd538b99a4a532d Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Tue, 8 Oct 2019 18:21:52 +0700 Subject: [PATCH 35/82] PEP8 linting --- pisa/__init__.py | 4 ++-- pisa/api.py | 3 ++- pisa/block_processor.py | 6 ++++-- pisa/carrier.py | 3 ++- pisa/cleaner.py | 5 +++-- pisa/encrypted_blob.py | 9 ++++----- pisa/responder.py | 16 +++++++++++----- pisa/utils/zmq_subscriber.py | 3 ++- pisa/watcher.py | 3 ++- 9 files changed, 32 insertions(+), 20 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index 7fa2901..9ba1a28 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -8,6 +8,7 @@ import pisa.conf as conf HOST = 'localhost' PORT = 9814 + class StructuredMessage(object): def __init__(self, message, **kwargs): self.message = message @@ -15,7 +16,7 @@ class StructuredMessage(object): self.kwargs = kwargs def __str__(self): - return json.dumps({ **self.kwargs, "message": self.message, "time": self.time }) + return json.dumps({**self.kwargs, "message": self.message, "time": self.time}) M = StructuredMessage # to improve readability @@ -31,4 +32,3 @@ logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ # TODO: Check if a long lived connection like this may create problems (timeouts) bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, conf.BTC_RPC_PORT)) - diff --git a/pisa/api.py b/pisa/api.py index df98cd0..b437d80 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -47,7 +47,8 @@ def add_appointment(): rcode = HTTP_BAD_REQUEST response = "appointment rejected. Request does not match the standard" - logging.info(M('[API] sending response and disconnecting', from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response)) + logging.info(M('[API] sending response and disconnecting', + from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response)) return Response(response, status=rcode, mimetype='text/plain') diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 4343bb9..08bd1c9 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -75,7 +75,8 @@ class BlockProcessor: justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - logging.info(M("[BlockProcessor] match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid)) + logging.info(M("[BlockProcessor] match found for locator.", + locator=locator, uuid=uuid, justice_txid=justice_txid)) except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple @@ -101,7 +102,8 @@ class BlockProcessor: else: missed_confirmations[tx] = 1 - logging.info(M("[Responder] transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx])) + logging.info(M("[Responder] transaction missed a confirmation", + tx=tx, missed_confirmations=missed_confirmations[tx])) return unconfirmed_txs, missed_confirmations diff --git a/pisa/carrier.py b/pisa/carrier.py index 946af01..6efa58f 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -32,7 +32,8 @@ class Carrier: receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: - logging.info(M("[Carrier] Transaction is already in the blockchain. Getting confirmation count", txid=txid)) + logging.info(M("[Carrier] Transaction is already in the blockchain. Getting confirmation count", + txid=txid)) # If the transaction is already in the chain, we get the number of confirmations and watch the job # until the end of the appointment diff --git a/pisa/cleaner.py b/pisa/cleaner.py index 68ad5e2..f9d748b 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -18,13 +18,14 @@ class Cleaner: else: locator_uuid_map[locator].remove(uuid) - logging.info(M("[Cleaner] end time reached with no match! Deleting appointment.", locator=locator, uuid=uuid)) + logging.info(M("[Cleaner] end time reached with no match! Deleting appointment.", + locator=locator, uuid=uuid)) @staticmethod def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height): for uuid, confirmations in completed_jobs: logging.info(M("[Cleaner] job completed. Appointment ended after reaching enough confirmations.", - uuid=uuid, height=height, confirmations=confirmations)) + uuid=uuid, height=height, confirmations=confirmations)) # ToDo: #9-add-data-persistence justice_txid = jobs[uuid].justice_txid diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index 3aeee13..1dcf644 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -23,11 +23,10 @@ class EncryptedBlob: nonce = master_key[16:] logging.info(M("[Watcher] creating new blob.", - master_key=hexlify(master_key).decode(), - sk=hexlify(sk).decode(), - nonce=hexlify(sk).decode(), - encrypted_blob=self.data - )) + master_key=hexlify(master_key).decode(), + sk=hexlify(sk).decode(), + nonce=hexlify(sk).decode(), + encrypted_blob=self.data)) # Decrypt aesgcm = AESGCM(sk) diff --git a/pisa/responder.py b/pisa/responder.py index e44ee61..ae1859e 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -80,7 +80,8 @@ class Responder: if confirmations == 0: self.unconfirmed_txs.append(justice_txid) - logging.info(M("[Responder] new job added.", dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end)) + logging.info(M("[Responder] new job added.", + dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end)) if self.asleep: self.asleep = False @@ -108,7 +109,8 @@ class Responder: txs = block.get('tx') height = block.get('height') - logging.info(M("[Responder] new block received", block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs)) + logging.info(M("[Responder] new block received", + block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs)) # ToDo: #9-add-data-persistence # change prev_block_hash condition @@ -123,7 +125,8 @@ class Responder: else: logging.warning(M("[Responder] reorg found!", - local_prev_block_hash=prev_block_hash, remote_prev_block_hash=block.get('previousblockhash'))) + local_prev_block_hash=prev_block_hash, + remote_prev_block_hash=block.get('previousblockhash'))) self.handle_reorgs() @@ -170,7 +173,8 @@ class Responder: self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) logging.warning(M("[Responder] Transaction has missed many confirmations. Rebroadcasting.", - justice_txid=self.jobs[uuid].justice_txid, confirmations_missed=CONFIRMATIONS_BEFORE_RETRY)) + justice_txid=self.jobs[uuid].justice_txid, + confirmations_missed=CONFIRMATIONS_BEFORE_RETRY)) # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): @@ -187,7 +191,9 @@ class Responder: # If both transactions are there, we only need to update the justice tx confirmation count if justice_in_chain: logging.info(M("[Responder] updating confirmation count for transaction.", - justice_txid=job.justice_txid, prev_count=job.confirmations, curr_count=justice_confirmations)) + justice_txid=job.justice_txid, + prev_count=job.confirmations, + curr_count=justice_confirmations)) job.confirmations = justice_confirmations diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index 0545d9c..4fbc63b 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -29,4 +29,5 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - logging.info(M("[ZMQHandler-{}] new block received via ZMQ".format(self.parent), block_hash=block_hash)) + logging.info(M("[ZMQHandler-{}] new block received via ZMQ".format(self.parent), + block_hash=block_hash)) diff --git a/pisa/watcher.py b/pisa/watcher.py index a70bb43..3e62f06 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -61,7 +61,8 @@ class Watcher: else: appointment_added = False - logging.info(M("[Watcher] maximum appointments reached, appointment rejected.", locator=appointment.locator)) + logging.info(M("[Watcher] maximum appointments reached, appointment rejected.", + locator=appointment.locator)) return appointment_added From 7f9c7d8609d423ac25755d6f898bc0e52707d417 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Tue, 8 Oct 2019 18:23:31 +0700 Subject: [PATCH 36/82] Removed double initialization of logging --- pisa/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index 9ba1a28..0d58d71 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -20,8 +20,6 @@ class StructuredMessage(object): M = StructuredMessage # to improve readability -logging.basicConfig(level=logging.INFO, format='%(message)s') - # Configure logging logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ logging.FileHandler(conf.SERVER_LOG_FILE), From bae9b6b9133608eabb15ca033aeb7a83c147ae61 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Tue, 8 Oct 2019 19:08:12 +0700 Subject: [PATCH 37/82] Added Logger class; refactored logging accordingly --- pisa/__init__.py | 17 ++++++++++++++- pisa/api.py | 10 +++++---- pisa/block_processor.py | 25 +++++++++++----------- pisa/carrier.py | 16 +++++++-------- pisa/cleaner.py | 13 ++++++------ pisa/encrypted_blob.py | 14 +++++++------ pisa/inspector.py | 18 ++++++++-------- pisa/pisad.py | 9 ++++---- pisa/responder.py | 40 +++++++++++++++++++----------------- pisa/utils/zmq_subscriber.py | 9 +++++--- pisa/watcher.py | 20 +++++++++--------- 11 files changed, 109 insertions(+), 82 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index 0d58d71..77ae6b2 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -18,7 +18,22 @@ class StructuredMessage(object): def __str__(self): return json.dumps({**self.kwargs, "message": self.message, "time": self.time}) -M = StructuredMessage # to improve readability + +class Logger(object): + def __init__(self, actor=None): + self.actor = actor + + def _add_prefix(self, msg): + return msg if self.actor is None else "[{}] {}".format(self.actor, msg) + + def info(msg, **kwargs): + logging.info(StructuredMessage(self._add_prefix(msg), **kwargs)) + + def debug(msg, **kwargs): + logging.debug(StructuredMessage(self._add_prefix(msg), **kwargs)) + + def error(msg, **kwargs): + logging.error(StructuredMessage(self._add_prefix(msg), **kwargs)) # Configure logging logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ diff --git a/pisa/api.py b/pisa/api.py index b437d80..301dfa5 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,7 +1,7 @@ import json from flask import Flask, request, Response, abort, jsonify -from pisa import HOST, PORT, logging, bitcoin_cli, M +from pisa import HOST, PORT, logging, bitcoin_cli, Logger from pisa.watcher import Watcher from pisa.inspector import Inspector from pisa import HOST, PORT, logging @@ -15,13 +15,15 @@ HTTP_OK = 200 HTTP_BAD_REQUEST = 400 HTTP_SERVICE_UNAVAILABLE = 503 +logger = Logger("API") + @app.route('/', methods=['POST']) def add_appointment(): remote_addr = request.environ.get('REMOTE_ADDR') remote_port = request.environ.get('REMOTE_PORT') - logging.info(M('[API] connection accepted', from_addr_port='{}:{}'.format(remote_addr, remote_port))) + logger.info('connection accepted', from_addr_port='{}:{}'.format(remote_addr, remote_port)) # Check content type once if properly defined request_data = json.loads(request.get_json()) @@ -47,8 +49,8 @@ def add_appointment(): rcode = HTTP_BAD_REQUEST response = "appointment rejected. Request does not match the standard" - logging.info(M('[API] sending response and disconnecting', - from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response)) + logger.info('sending response and disconnecting', + from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response) return Response(response, status=rcode, mimetype='text/plain') diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 08bd1c9..ae9530b 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -1,9 +1,11 @@ import binascii from hashlib import sha256 -from pisa import logging, bitcoin_cli, M +from pisa import bitcoin_cli, Logger from pisa.utils.auth_proxy import JSONRPCException +logger = Logger("BlockProcessor") + class BlockProcessor: @staticmethod @@ -14,7 +16,7 @@ class BlockProcessor: except JSONRPCException as e: block = None - logging.error(M("[BlockProcessor] couldn't get block from bitcoind.", error_code=e)) + logger.error("couldn't get block from bitcoind.", error_code=e) return block @@ -26,7 +28,7 @@ class BlockProcessor: except JSONRPCException as e: block_hash = None - logging.error(M("[BlockProcessor] couldn't get block hash.", error_code=e)) + logger.error("couldn't get block hash.", error_code=e) return block_hash @@ -38,7 +40,7 @@ class BlockProcessor: except JSONRPCException as e: block_count = None - logging.error("[BlockProcessor] couldn't get block block count. Error code {}".format(e)) + logger.error("couldn't get block block count", error_code=e) return block_count @@ -54,10 +56,10 @@ class BlockProcessor: potential_matches = {locator: potential_locators[locator] for locator in intersection} if len(potential_matches) > 0: - logging.info(M("[BlockProcessor] list of potential matches", potential_matches=potential_matches)) + logger.info("list of potential matches", potential_matches=potential_matches) else: - logging.info(M("[BlockProcessor] no potential matches found")) + logger.info("no potential matches found") return potential_matches @@ -75,13 +77,12 @@ class BlockProcessor: justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - logging.info(M("[BlockProcessor] match found for locator.", - locator=locator, uuid=uuid, justice_txid=justice_txid)) + logger.info("match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid) except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple # for the POC - logging.error(M("[BlockProcessor] can't build transaction from decoded data.", error_code=e)) + logger.error("can't build transaction from decoded data.", error_code=e) return matches @@ -93,7 +94,7 @@ class BlockProcessor: if tx in tx_job_map and tx in unconfirmed_txs: unconfirmed_txs.remove(tx) - logging.info(M("[Responder] confirmation received for transaction", tx=tx)) + logger.info("confirmation received for transaction", tx=tx) elif tx in unconfirmed_txs: if tx in missed_confirmations: @@ -102,8 +103,6 @@ class BlockProcessor: else: missed_confirmations[tx] = 1 - logging.info(M("[Responder] transaction missed a confirmation", - tx=tx, missed_confirmations=missed_confirmations[tx])) + logger.info("transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx]) return unconfirmed_txs, missed_confirmations - diff --git a/pisa/carrier.py b/pisa/carrier.py index 6efa58f..a82fb3f 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -1,8 +1,10 @@ from pisa.rpc_errors import * -from pisa import logging, bitcoin_cli, M +from pisa import bitcoin_cli, Logger from pisa.utils.auth_proxy import JSONRPCException from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION +logger = Logger("Carrier") + class Carrier: class Receipt: @@ -13,7 +15,7 @@ class Carrier: def send_transaction(self, rawtx, txid): try: - logging.info(M("[Carrier] pushing transaction to the network", txid=txid, rawtx=rawtx)) + logger.info("pushing transaction to the network", txid=txid, rawtx=rawtx) bitcoin_cli.sendrawtransaction(rawtx) receipt = self.Receipt(delivered=True) @@ -32,8 +34,7 @@ class Carrier: receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: - logging.info(M("[Carrier] Transaction is already in the blockchain. Getting confirmation count", - txid=txid)) + logger.info("Transaction is already in the blockchain. Getting confirmation count", txid=txid) # If the transaction is already in the chain, we get the number of confirmations and watch the job # until the end of the appointment @@ -50,7 +51,7 @@ class Carrier: else: # If something else happens (unlikely but possible) log it so we can treat it in future releases - logging.error(M("[Responder] JSONRPCException.", error_code=e)) + logger.error("JSONRPCException.", error_code=e) receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) return receipt @@ -67,12 +68,11 @@ class Carrier: # reorged while we were querying bitcoind to get the confirmation count. In such a case we just # restart the job if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - logging.info(M("[Carrier] transaction got reorged before obtaining information", txid=txid)) + logger.info("transaction got reorged before obtaining information", txid=txid) # TODO: Check RPC methods to see possible returns and avoid general else # else: # # If something else happens (unlikely but possible) log it so we can treat it in future releases - # logging.error(M("[Responder] JSONRPCException.", error_code=e) + # logger.error("JSONRPCException.", error_code=e) return tx_info - diff --git a/pisa/cleaner.py b/pisa/cleaner.py index f9d748b..39af1d5 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -1,4 +1,6 @@ -from pisa import logging, M +from pisa import Logger + +logger = Logger("Cleaner") # Dictionaries in Python are "passed-by-reference", so no return is needed for the Cleaner" # https://docs.python.org/3/faq/programming.html#how-do-i-write-a-function-with-output-parameters-call-by-reference @@ -18,14 +20,13 @@ class Cleaner: else: locator_uuid_map[locator].remove(uuid) - logging.info(M("[Cleaner] end time reached with no match! Deleting appointment.", - locator=locator, uuid=uuid)) + logger.info("end time reached with no match! Deleting appointment.", locator=locator, uuid=uuid) @staticmethod def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height): for uuid, confirmations in completed_jobs: - logging.info(M("[Cleaner] job completed. Appointment ended after reaching enough confirmations.", - uuid=uuid, height=height, confirmations=confirmations)) + logger.info("job completed. Appointment ended after reaching enough confirmations.", + uuid=uuid, height=height, confirmations=confirmations) # ToDo: #9-add-data-persistence justice_txid = jobs[uuid].justice_txid @@ -34,7 +35,7 @@ class Cleaner: if len(tx_job_map[justice_txid]) == 1: tx_job_map.pop(justice_txid) - logging.info(M("[Cleaner] no more jobs for justice transaction.", justice_txid=justice_txid)) + logger.info("no more jobs for justice transaction.", justice_txid=justice_txid) else: tx_job_map[justice_txid].remove(uuid) diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index 1dcf644..a772241 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -1,7 +1,9 @@ from hashlib import sha256 from binascii import unhexlify, hexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM -from pisa import logging, M +from pisa import Logger + +logger = Logger("Watcher") # FIXME: EncryptedBlob is assuming AES-128-GCM. A cipher field should be part of the object and the decryption should be @@ -22,11 +24,11 @@ class EncryptedBlob: sk = master_key[:16] nonce = master_key[16:] - logging.info(M("[Watcher] creating new blob.", - master_key=hexlify(master_key).decode(), - sk=hexlify(sk).decode(), - nonce=hexlify(sk).decode(), - encrypted_blob=self.data)) + logger.info("[Watcher] creating new blob.", + master_key=hexlify(master_key).decode(), + sk=hexlify(sk).decode(), + nonce=hexlify(sk).decode(), + encrypted_blob=self.data) # Decrypt aesgcm = AESGCM(sk) diff --git a/pisa/inspector.py b/pisa/inspector.py index 945afc1..0bffa75 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -2,10 +2,12 @@ import re from pisa import errors import pisa.conf as conf -from pisa import logging, bitcoin_cli, M +from pisa import bitcoin_cli, Logger from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor +logger = Logger("Inspector") + # FIXME: The inspector logs the wrong messages sent form the users. A possible attack surface would be to send a really # long field that, even if not accepted by PISA, would be stored in the logs. This is a possible DoS surface # since pisa would store any kind of message (no matter the length). Solution: truncate the length of the fields @@ -70,7 +72,7 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong locator format ({})".format(locator) - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -97,7 +99,7 @@ class Inspector: else: message = "start_time is too close to current height" - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -130,7 +132,7 @@ class Inspector: else: message = 'end_time is too close to current height' - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -152,7 +154,7 @@ class Inspector: message = "dispute delta too small. The dispute delta should be at least {} (current: {})".format( conf.MIN_DISPUTE_DELTA, dispute_delta) - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -174,7 +176,7 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong encrypted_blob format ({})".format(encrypted_blob) - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -195,7 +197,7 @@ class Inspector: rcode = errors.APPOINTMENT_CIPHER_NOT_SUPPORTED message = "cipher not supported: {}".format(cipher) - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -216,6 +218,6 @@ class Inspector: rcode = errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED message = "hash_function not supported {}".format(hash_function) - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message diff --git a/pisa/pisad.py b/pisa/pisad.py index f7b3603..c769201 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,10 +1,11 @@ from sys import argv from getopt import getopt -from pisa import logging, M +from pisa import logging, Logger from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network +logger = Logger("Pisad") if __name__ == '__main__': debug = False @@ -19,8 +20,8 @@ if __name__ == '__main__': start_api() else: - logging.error(M("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " - "Shutting down")) + logger.error("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " + "Shutting down") else: - logging.error(M("[Pisad] can't connect to bitcoind. Shutting down")) + logging.error("[Pisad] can't connect to bitcoind. Shutting down") diff --git a/pisa/responder.py b/pisa/responder.py index ae1859e..6e76904 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -3,7 +3,7 @@ from threading import Thread from hashlib import sha256 from binascii import unhexlify -from pisa import logging, M +from pisa import Logger from pisa.cleaner import Cleaner from pisa.carrier import Carrier from pisa.tools import check_tx_in_chain @@ -13,6 +13,8 @@ from pisa.utils.zmq_subscriber import ZMQHandler CONFIRMATIONS_BEFORE_RETRY = 6 MIN_CONFIRMATIONS = 6 +logger = Logger("Responder") + class Job: def __init__(self, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry_counter=0): @@ -45,7 +47,7 @@ class Responder: def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=False): if self.asleep: - logging.info(M("[Responder] waking up!")) + logger.info("waking up!") carrier = Carrier() receipt = carrier.send_transaction(justice_rawtx, justice_txid) @@ -80,8 +82,8 @@ class Responder: if confirmations == 0: self.unconfirmed_txs.append(justice_txid) - logging.info(M("[Responder] new job added.", - dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end)) + logger.info("new job added.", + dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end) if self.asleep: self.asleep = False @@ -109,8 +111,8 @@ class Responder: txs = block.get('tx') height = block.get('height') - logging.info(M("[Responder] new block received", - block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs)) + logger.info("new block received", + block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs) # ToDo: #9-add-data-persistence # change prev_block_hash condition @@ -124,9 +126,9 @@ class Responder: self.rebroadcast(txs_to_rebroadcast) else: - logging.warning(M("[Responder] reorg found!", - local_prev_block_hash=prev_block_hash, - remote_prev_block_hash=block.get('previousblockhash'))) + logger.warning("reorg found!", + local_prev_block_hash=prev_block_hash, + remote_prev_block_hash=block.get('previousblockhash')) self.handle_reorgs() @@ -136,7 +138,7 @@ class Responder: self.asleep = True self.zmq_subscriber.terminate = True - logging.info(M("[Responder] no more pending jobs, going back to sleep")) + logger.info("no more pending jobs, going back to sleep") def get_txs_to_rebroadcast(self, txs): txs_to_rebroadcast = [] @@ -172,9 +174,9 @@ class Responder: self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) - logging.warning(M("[Responder] Transaction has missed many confirmations. Rebroadcasting.", - justice_txid=self.jobs[uuid].justice_txid, - confirmations_missed=CONFIRMATIONS_BEFORE_RETRY)) + logger.warning("Transaction has missed many confirmations. Rebroadcasting.", + justice_txid=self.jobs[uuid].justice_txid, + confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): @@ -190,10 +192,10 @@ class Responder: # If both transactions are there, we only need to update the justice tx confirmation count if justice_in_chain: - logging.info(M("[Responder] updating confirmation count for transaction.", - justice_txid=job.justice_txid, - prev_count=job.confirmations, - curr_count=justice_confirmations)) + logger.info("updating confirmation count for transaction.", + justice_txid=job.justice_txid, + prev_count=job.confirmations, + curr_count=justice_confirmations) job.confirmations = justice_confirmations @@ -208,5 +210,5 @@ class Responder: # ToDo: #24-properly-handle-reorgs # FIXME: if the dispute is not on chain (either in mempool or not there at all), we need to call the # reorg manager - logging.warning(M("[Responder] dispute and justice transaction missing. Calling the reorg manager")) - logging.error(M("[Responder] reorg manager not yet implemented")) + logger.warning("dispute and justice transaction missing. Calling the reorg manager") + logger.error("reorg manager not yet implemented") diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index 4fbc63b..d28ae64 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -1,8 +1,10 @@ import zmq import binascii -from pisa import logging, M +from pisa import Logger from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT +logger = Logger("ZMQHandler") + # ToDo: #7-add-async-back-to-zmq class ZMQHandler: @@ -29,5 +31,6 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - logging.info(M("[ZMQHandler-{}] new block received via ZMQ".format(self.parent), - block_hash=block_hash)) + logger.info("new block received via ZMQ", + parent=self.parent, + block_hash=block_hash) diff --git a/pisa/watcher.py b/pisa/watcher.py index 3e62f06..8dbaa1c 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -2,7 +2,7 @@ from uuid import uuid4 from queue import Queue from threading import Thread -from pisa import logging, M +from pisa import Logger from pisa.cleaner import Cleaner from pisa.conf import EXPIRY_DELTA from pisa.responder import Responder @@ -10,6 +10,7 @@ from pisa.conf import MAX_APPOINTMENTS from pisa.block_processor import BlockProcessor from pisa.utils.zmq_subscriber import ZMQHandler +logging = Logger("Watcher") class Watcher: def __init__(self, max_appointments=MAX_APPOINTMENTS): @@ -52,17 +53,16 @@ class Watcher: zmq_thread.start() watcher.start() - logging.info(M("[Watcher] waking up!")) + logger.info("waking up!") appointment_added = True - logging.info(M("[Watcher] new appointment accepted.", locator=appointment.locator)) + logger.info("new appointment accepted.", locator=appointment.locator) else: appointment_added = False - logging.info(M("[Watcher] maximum appointments reached, appointment rejected.", - locator=appointment.locator)) + logger.info("maximum appointments reached, appointment rejected.", locator=appointment.locator) return appointment_added @@ -73,14 +73,14 @@ class Watcher: def do_watch(self): while len(self.appointments) > 0: block_hash = self.block_queue.get() - logging.info(M("[Watcher] new block received", block_hash=block_hash)) + logger.info("new block received", block_hash=block_hash) block = BlockProcessor.get_block(block_hash) if block is not None: txids = block.get('tx') - logging.info(M("[Watcher] list of transactions.", txids=txids)) + logger.info("list of transactions.", txids=txids) expired_appointments = [uuid for uuid, appointment in self.appointments.items() if block["height"] > appointment.end_time + EXPIRY_DELTA] @@ -91,8 +91,8 @@ class Watcher: matches = BlockProcessor.get_matches(potential_matches, self.locator_uuid_map, self.appointments) for locator, uuid, dispute_txid, justice_txid, justice_rawtx in matches: - logging.info(M("[Watcher] notifying responder and deleting appointment.", - justice_txid=justice_txid, locator=locator, uuid=uuid)) + logger.info("notifying responder and deleting appointment.", + justice_txid=justice_txid, locator=locator, uuid=uuid) self.responder.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, self.appointments[uuid].end_time) @@ -113,4 +113,4 @@ class Watcher: self.asleep = True self.zmq_subscriber.terminate = True - logging.error(M("[Watcher] no more pending appointments, going back to sleep")) + logger.error("no more pending appointments, going back to sleep") From 4bcc8e20a0c0af38030fae153e2554e881bfa6d9 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Wed, 9 Oct 2019 08:29:47 +0700 Subject: [PATCH 38/82] Refactored check_tx_in_chain with new log format --- pisa/pisad.py | 5 ++--- pisa/responder.py | 4 ++-- pisa/tools.py | 12 ++++++------ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index c769201..406df70 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -20,8 +20,7 @@ if __name__ == '__main__': start_api() else: - logger.error("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " - "Shutting down") + logger.error("bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down") else: - logging.error("[Pisad] can't connect to bitcoind. Shutting down") + logging.error("can't connect to bitcoind. Shutting down") diff --git a/pisa/responder.py b/pisa/responder.py index 6e76904..6855e5c 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -183,11 +183,11 @@ class Responder: for uuid, job in self.jobs.items(): # First we check if the dispute transaction is still in the blockchain. If not, the justice can not be # there either, so we'll need to call the reorg manager straight away - dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, parent='Responder', tx_label='dispute tx') + dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, logger=logger, tx_label='dispute tx') # If the dispute is there, we can check the justice tx if dispute_in_chain: - justice_in_chain, justice_confirmations = check_tx_in_chain(job.justice_txid, parent='Responder', + justice_in_chain, justice_confirmations = check_tx_in_chain(job.justice_txid, logger=logger, tx_label='justice tx') # If both transactions are there, we only need to update the justice tx confirmation count diff --git a/pisa/tools.py b/pisa/tools.py index 909c64c..a5fbe9c 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -2,12 +2,12 @@ import re from http.client import HTTPException import pisa.conf as conf -from pisa import logging, bitcoin_cli +from pisa import bitcoin_cli, Logger from pisa.utils.auth_proxy import JSONRPCException from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY -def check_tx_in_chain(tx_id, parent='', tx_label='transaction'): +def check_tx_in_chain(tx_id, logger=Logger(), tx_label='transaction'): tx_in_chain = False confirmations = 0 @@ -17,18 +17,18 @@ def check_tx_in_chain(tx_id, parent='', tx_label='transaction'): if tx_info.get("confirmations"): confirmations = int(tx_info.get("confirmations")) tx_in_chain = True - logging.error("[{}] {} found in the blockchain (txid: {}) ".format(parent, tx_label, tx_id)) + logger.error("{} found in the blockchain (txid: {}) ".format(tx_label), txid=tx_id) else: - logging.error("[{}] {} found in mempool (txid: {}) ".format(parent, tx_label, tx_id)) + logger.error("{} found in mempool (txid: {}) ".format(tx_label), txid=tx_id) except JSONRPCException as e: if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - logging.error("[{}] {} not found in mempool nor blockchain (txid: {}) ".format(parent, tx_label, tx_id)) + logger.error("{} not found in mempool nor blockchain (txid: {}) ".format(tx_label), txid=tx_id) else: # ToDO: Unhandled errors, check this properly - logging.error("[{}] JSONRPCException. Error code {}".format(parent, e)) + logger.error("JSONRPCException.", error_code=e) return tx_in_chain, confirmations From a53e7a82e38b3a2288f8daccc960022c727e4ef9 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Wed, 9 Oct 2019 09:16:35 +0700 Subject: [PATCH 39/82] Added 'actor' among the fields of the structured log messages; minor other fixes --- pisa/__init__.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index 77ae6b2..2dcde22 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -26,14 +26,14 @@ class Logger(object): def _add_prefix(self, msg): return msg if self.actor is None else "[{}] {}".format(self.actor, msg) - def info(msg, **kwargs): - logging.info(StructuredMessage(self._add_prefix(msg), **kwargs)) + def info(self, msg, **kwargs): + logging.info(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - def debug(msg, **kwargs): - logging.debug(StructuredMessage(self._add_prefix(msg), **kwargs)) + def debug(self, msg, **kwargs): + logging.debug(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - def error(msg, **kwargs): - logging.error(StructuredMessage(self._add_prefix(msg), **kwargs)) + def error(self, msg, **kwargs): + logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) # Configure logging logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ From c524319027d5ae2b2df99f4c90a2f76aaf8f39aa Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Wed, 9 Oct 2019 09:30:32 +0700 Subject: [PATCH 40/82] Several fixes and improvements --- pisa/block_processor.py | 2 -- pisa/inspector.py | 21 ++++++++++++++------- pisa/logger.py | 30 ++++++++++++++++++++++++++++++ pisa/pisad.py | 2 +- pisa/tools.py | 6 +++--- pisa/utils/zmq_subscriber.py | 9 +++------ pisa/watcher.py | 2 +- 7 files changed, 52 insertions(+), 20 deletions(-) create mode 100644 pisa/logger.py diff --git a/pisa/block_processor.py b/pisa/block_processor.py index ae9530b..7c544ea 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -63,8 +63,6 @@ class BlockProcessor: return potential_matches - return potential_matches - @staticmethod def get_matches(potential_matches, locator_uuid_map, appointments): matches = [] diff --git a/pisa/inspector.py b/pisa/inspector.py index 0bffa75..ecbf688 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -72,7 +72,8 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong locator format ({})".format(locator) - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -99,7 +100,8 @@ class Inspector: else: message = "start_time is too close to current height" - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -132,7 +134,8 @@ class Inspector: else: message = 'end_time is too close to current height' - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -154,7 +157,8 @@ class Inspector: message = "dispute delta too small. The dispute delta should be at least {} (current: {})".format( conf.MIN_DISPUTE_DELTA, dispute_delta) - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -176,7 +180,8 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong encrypted_blob format ({})".format(encrypted_blob) - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -197,7 +202,8 @@ class Inspector: rcode = errors.APPOINTMENT_CIPHER_NOT_SUPPORTED message = "cipher not supported: {}".format(cipher) - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -218,6 +224,7 @@ class Inspector: rcode = errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED message = "hash_function not supported {}".format(hash_function) - logger.error(message) + if message is not None: + logger.error(message) return rcode, message diff --git a/pisa/logger.py b/pisa/logger.py new file mode 100644 index 0000000..3dad0e0 --- /dev/null +++ b/pisa/logger.py @@ -0,0 +1,30 @@ +import logging +import time +import json + + +class StructuredMessage(object): + def __init__(self, message, **kwargs): + self.message = message + self.time = time.asctime() + self.kwargs = kwargs + + def __str__(self): + return json.dumps({**self.kwargs, "message": self.message, "time": self.time}) + + +class Logger(object): + def __init__(self, actor=None): + self.actor = actor + + def _add_prefix(self, msg): + return msg if self.actor is None else "[{}] {}".format(self.actor, msg) + + def info(self, msg, **kwargs): + logging.info(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) + + def debug(self, msg, **kwargs): + logging.debug(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) + + def error(self, msg, **kwargs): + logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) \ No newline at end of file diff --git a/pisa/pisad.py b/pisa/pisad.py index 406df70..305f156 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -23,4 +23,4 @@ if __name__ == '__main__': logger.error("bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down") else: - logging.error("can't connect to bitcoind. Shutting down") + logger.error("can't connect to bitcoind. Shutting down") diff --git a/pisa/tools.py b/pisa/tools.py index a5fbe9c..6e24716 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -17,14 +17,14 @@ def check_tx_in_chain(tx_id, logger=Logger(), tx_label='transaction'): if tx_info.get("confirmations"): confirmations = int(tx_info.get("confirmations")) tx_in_chain = True - logger.error("{} found in the blockchain (txid: {}) ".format(tx_label), txid=tx_id) + logger.error("{} found in the blockchain".format(tx_label), txid=tx_id) else: - logger.error("{} found in mempool (txid: {}) ".format(tx_label), txid=tx_id) + logger.error("{} found in mempool".format(tx_label), txid=tx_id) except JSONRPCException as e: if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - logger.error("{} not found in mempool nor blockchain (txid: {}) ".format(tx_label), txid=tx_id) + logger.error("{} not found in mempool nor blockchain".format(tx_label), txid=tx_id) else: # ToDO: Unhandled errors, check this properly diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index d28ae64..ded5922 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -3,8 +3,6 @@ import binascii from pisa import Logger from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT -logger = Logger("ZMQHandler") - # ToDo: #7-add-async-back-to-zmq class ZMQHandler: @@ -15,7 +13,8 @@ class ZMQHandler: self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0) self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock") self.zmqSubSocket.connect("%s://%s:%s" % (FEED_PROTOCOL, FEED_ADDR, FEED_PORT)) - self.parent = parent + self.logger = Logger("ZMQHandler-{}".format(parent)) + self.terminate = False def handle(self, block_queue): @@ -31,6 +30,4 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - logger.info("new block received via ZMQ", - parent=self.parent, - block_hash=block_hash) + self.logger.info("new block received via ZMQ", block_hash=block_hash) diff --git a/pisa/watcher.py b/pisa/watcher.py index 8dbaa1c..4184e58 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -10,7 +10,7 @@ from pisa.conf import MAX_APPOINTMENTS from pisa.block_processor import BlockProcessor from pisa.utils.zmq_subscriber import ZMQHandler -logging = Logger("Watcher") +logger = Logger("Watcher") class Watcher: def __init__(self, max_appointments=MAX_APPOINTMENTS): From dee93e5c62d8e871e6e2b0c91a319d0bfd052569 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Wed, 9 Oct 2019 10:20:39 +0700 Subject: [PATCH 41/82] Moved StructuredMessage and Logger to separate file; refactored pisa-cli logging using new format --- apps/cli/__init__.py | 2 +- apps/cli/blob.py | 13 ++++++++----- pisa/__init__.py | 29 ----------------------------- pisa/api.py | 3 ++- pisa/block_processor.py | 3 ++- pisa/carrier.py | 3 ++- pisa/cleaner.py | 2 +- pisa/encrypted_blob.py | 2 +- pisa/inspector.py | 3 ++- pisa/pisad.py | 3 ++- pisa/responder.py | 2 +- pisa/tools.py | 3 ++- pisa/utils/zmq_subscriber.py | 2 +- pisa/watcher.py | 2 +- 14 files changed, 26 insertions(+), 46 deletions(-) diff --git a/apps/cli/__init__.py b/apps/cli/__init__.py index 40e495c..20bb9fb 100644 --- a/apps/cli/__init__.py +++ b/apps/cli/__init__.py @@ -12,7 +12,7 @@ SUPPORTED_HASH_FUNCTIONS = ["SHA256"] SUPPORTED_CIPHERS = ["AES-GCM-128"] # Configure logging -logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[ +logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ logging.FileHandler(CLIENT_LOG_FILE), logging.StreamHandler() ]) diff --git a/apps/cli/blob.py b/apps/cli/blob.py index 6041050..204ccd7 100644 --- a/apps/cli/blob.py +++ b/apps/cli/blob.py @@ -5,6 +5,9 @@ from cryptography.hazmat.primitives.ciphers.aead import AESGCM from apps.cli import logging from apps.cli import SUPPORTED_HASH_FUNCTIONS, SUPPORTED_CIPHERS +from pisa.logger import Logger + +logger = Logger("Client") class Blob: @@ -50,10 +53,10 @@ class Blob: encrypted_blob = aesgcm.encrypt(nonce=nonce, data=tx, associated_data=None) encrypted_blob = hexlify(encrypted_blob).decode() - logging.info("[Client] creating new blob") - logging.info("[Client] master key: {}".format(hexlify(master_key).decode())) - logging.info("[Client] sk: {}".format(hexlify(sk).decode())) - logging.info("[Client] nonce: {}".format(hexlify(nonce).decode())) - logging.info("[Client] encrypted_blob: {}".format(encrypted_blob)) + logger.info("creating new blob", + master_key=hexlify(master_key).decode(), + sk=hexlify(sk).decode(), + nonce=hexlify(nonce).decode(), + encrypted_blob=encrypted_blob) return encrypted_blob diff --git a/pisa/__init__.py b/pisa/__init__.py index 2dcde22..5d03345 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -1,6 +1,4 @@ import logging -import json -import time from pisa.utils.auth_proxy import AuthServiceProxy import pisa.conf as conf @@ -8,33 +6,6 @@ import pisa.conf as conf HOST = 'localhost' PORT = 9814 - -class StructuredMessage(object): - def __init__(self, message, **kwargs): - self.message = message - self.time = time.asctime() - self.kwargs = kwargs - - def __str__(self): - return json.dumps({**self.kwargs, "message": self.message, "time": self.time}) - - -class Logger(object): - def __init__(self, actor=None): - self.actor = actor - - def _add_prefix(self, msg): - return msg if self.actor is None else "[{}] {}".format(self.actor, msg) - - def info(self, msg, **kwargs): - logging.info(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - - def debug(self, msg, **kwargs): - logging.debug(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - - def error(self, msg, **kwargs): - logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - # Configure logging logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ logging.FileHandler(conf.SERVER_LOG_FILE), diff --git a/pisa/api.py b/pisa/api.py index 301dfa5..1ab768d 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,7 +1,8 @@ import json from flask import Flask, request, Response, abort, jsonify -from pisa import HOST, PORT, logging, bitcoin_cli, Logger +from pisa import HOST, PORT, logging, bitcoin_cli +from pisa.logger import Logger from pisa.watcher import Watcher from pisa.inspector import Inspector from pisa import HOST, PORT, logging diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 7c544ea..c139f86 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -1,7 +1,8 @@ import binascii from hashlib import sha256 -from pisa import bitcoin_cli, Logger +from pisa import bitcoin_cli +from pisa.logger import Logger from pisa.utils.auth_proxy import JSONRPCException logger = Logger("BlockProcessor") diff --git a/pisa/carrier.py b/pisa/carrier.py index a82fb3f..3e11028 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -1,5 +1,6 @@ from pisa.rpc_errors import * -from pisa import bitcoin_cli, Logger +from pisa import bitcoin_cli +from pisa.logger import Logger from pisa.utils.auth_proxy import JSONRPCException from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION diff --git a/pisa/cleaner.py b/pisa/cleaner.py index 39af1d5..0dad0da 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -1,4 +1,4 @@ -from pisa import Logger +from pisa.logger import Logger logger = Logger("Cleaner") diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index a772241..d9a02ba 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -1,7 +1,7 @@ from hashlib import sha256 from binascii import unhexlify, hexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM -from pisa import Logger +from pisa.logger import Logger logger = Logger("Watcher") diff --git a/pisa/inspector.py b/pisa/inspector.py index ecbf688..53055b8 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -2,7 +2,8 @@ import re from pisa import errors import pisa.conf as conf -from pisa import bitcoin_cli, Logger +from pisa import bitcoin_cli +from pisa.logger import Logger from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor diff --git a/pisa/pisad.py b/pisa/pisad.py index 305f156..c990a2e 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,7 +1,8 @@ from sys import argv from getopt import getopt -from pisa import logging, Logger +from pisa import logging +from pisa.logger import Logger from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network diff --git a/pisa/responder.py b/pisa/responder.py index 6855e5c..9b2b1f8 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -3,7 +3,7 @@ from threading import Thread from hashlib import sha256 from binascii import unhexlify -from pisa import Logger +from pisa.logger import Logger from pisa.cleaner import Cleaner from pisa.carrier import Carrier from pisa.tools import check_tx_in_chain diff --git a/pisa/tools.py b/pisa/tools.py index 6e24716..ddc4ab5 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -2,7 +2,8 @@ import re from http.client import HTTPException import pisa.conf as conf -from pisa import bitcoin_cli, Logger +from pisa import bitcoin_cli +from pisa.logger import Logger from pisa.utils.auth_proxy import JSONRPCException from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index ded5922..f0ac469 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -1,6 +1,6 @@ import zmq import binascii -from pisa import Logger +from pisa.logger import Logger from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT diff --git a/pisa/watcher.py b/pisa/watcher.py index 4184e58..6ec6d30 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -2,7 +2,7 @@ from uuid import uuid4 from queue import Queue from threading import Thread -from pisa import Logger +from pisa.logger import Logger from pisa.cleaner import Cleaner from pisa.conf import EXPIRY_DELTA from pisa.responder import Responder From 831545ef2c0b15527ccc4974eb20178ae4d65f05 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Thu, 10 Oct 2019 09:54:50 +0700 Subject: [PATCH 42/82] Added simple tests for check_txid_format --- test/unit/test_tools.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 test/unit/test_tools.py diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py new file mode 100644 index 0000000..ba5b834 --- /dev/null +++ b/test/unit/test_tools.py @@ -0,0 +1,16 @@ +from pisa.tools import check_txid_format +from pisa import logging + +logging.getLogger().disabled = True + + +def test_check_txid_format(): + assert(check_txid_format(None) is False) + assert(check_txid_format("") is False) + assert(check_txid_format(0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef) is False) # wrong type + assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") is True) # lowercase + assert(check_txid_format("0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF") is True) # uppercase + assert(check_txid_format("0123456789abcdef0123456789ABCDEF0123456789abcdef0123456789ABCDEF") is True) # mixed case + assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdf") is False) # too short + assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0") is False) # too long + assert(check_txid_format("g123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") is False) # non-hex From aea1d1f1e02dc03ca4675f37c188fb772aa43c48 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Thu, 10 Oct 2019 10:34:03 +0700 Subject: [PATCH 43/82] Fixes from PR review --- apps/cli/blob.py | 3 +-- pisa/api.py | 4 ++-- pisa/block_processor.py | 18 +++++++++--------- pisa/carrier.py | 4 ++-- pisa/cleaner.py | 6 +++--- pisa/encrypted_blob.py | 2 +- pisa/pisad.py | 1 - pisa/responder.py | 30 +++++++++++++++--------------- pisa/tools.py | 3 ++- pisa/utils/zmq_subscriber.py | 2 +- pisa/watcher.py | 14 +++++++------- 11 files changed, 43 insertions(+), 44 deletions(-) diff --git a/apps/cli/blob.py b/apps/cli/blob.py index 204ccd7..5e6f9da 100644 --- a/apps/cli/blob.py +++ b/apps/cli/blob.py @@ -3,7 +3,6 @@ from hashlib import sha256 from binascii import hexlify, unhexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM -from apps.cli import logging from apps.cli import SUPPORTED_HASH_FUNCTIONS, SUPPORTED_CIPHERS from pisa.logger import Logger @@ -53,7 +52,7 @@ class Blob: encrypted_blob = aesgcm.encrypt(nonce=nonce, data=tx, associated_data=None) encrypted_blob = hexlify(encrypted_blob).decode() - logger.info("creating new blob", + logger.info("Creating new blob", master_key=hexlify(master_key).decode(), sk=hexlify(sk).decode(), nonce=hexlify(nonce).decode(), diff --git a/pisa/api.py b/pisa/api.py index 1ab768d..48b6c46 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -24,7 +24,7 @@ def add_appointment(): remote_addr = request.environ.get('REMOTE_ADDR') remote_port = request.environ.get('REMOTE_PORT') - logger.info('connection accepted', from_addr_port='{}:{}'.format(remote_addr, remote_port)) + logger.info('Connection accepted', from_addr_port='{}:{}'.format(remote_addr, remote_port)) # Check content type once if properly defined request_data = json.loads(request.get_json()) @@ -50,7 +50,7 @@ def add_appointment(): rcode = HTTP_BAD_REQUEST response = "appointment rejected. Request does not match the standard" - logger.info('sending response and disconnecting', + logger.info('Sending response and disconnecting', from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response) return Response(response, status=rcode, mimetype='text/plain') diff --git a/pisa/block_processor.py b/pisa/block_processor.py index c139f86..fdd71a8 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -17,7 +17,7 @@ class BlockProcessor: except JSONRPCException as e: block = None - logger.error("couldn't get block from bitcoind.", error_code=e) + logger.error("Couldn't get block from bitcoind.", error_code=e) return block @@ -29,7 +29,7 @@ class BlockProcessor: except JSONRPCException as e: block_hash = None - logger.error("couldn't get block hash.", error_code=e) + logger.error("Couldn't get block hash.", error_code=e) return block_hash @@ -41,7 +41,7 @@ class BlockProcessor: except JSONRPCException as e: block_count = None - logger.error("couldn't get block block count", error_code=e) + logger.error("Couldn't get block count", error_code=e) return block_count @@ -57,10 +57,10 @@ class BlockProcessor: potential_matches = {locator: potential_locators[locator] for locator in intersection} if len(potential_matches) > 0: - logger.info("list of potential matches", potential_matches=potential_matches) + logger.info("List of potential matches", potential_matches=potential_matches) else: - logger.info("no potential matches found") + logger.info("No potential matches found") return potential_matches @@ -76,12 +76,12 @@ class BlockProcessor: justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - logger.info("match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid) + logger.info("Match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid) except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple # for the POC - logger.error("can't build transaction from decoded data.", error_code=e) + logger.error("Can't build transaction from decoded data.", error_code=e) return matches @@ -93,7 +93,7 @@ class BlockProcessor: if tx in tx_job_map and tx in unconfirmed_txs: unconfirmed_txs.remove(tx) - logger.info("confirmation received for transaction", tx=tx) + logger.info("Confirmation received for transaction", tx=tx) elif tx in unconfirmed_txs: if tx in missed_confirmations: @@ -102,6 +102,6 @@ class BlockProcessor: else: missed_confirmations[tx] = 1 - logger.info("transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx]) + logger.info("Transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx]) return unconfirmed_txs, missed_confirmations diff --git a/pisa/carrier.py b/pisa/carrier.py index 3e11028..5658151 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -16,7 +16,7 @@ class Carrier: def send_transaction(self, rawtx, txid): try: - logger.info("pushing transaction to the network", txid=txid, rawtx=rawtx) + logger.info("Pushing transaction to the network", txid=txid, rawtx=rawtx) bitcoin_cli.sendrawtransaction(rawtx) receipt = self.Receipt(delivered=True) @@ -69,7 +69,7 @@ class Carrier: # reorged while we were querying bitcoind to get the confirmation count. In such a case we just # restart the job if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - logger.info("transaction got reorged before obtaining information", txid=txid) + logger.info("Transaction got reorged before obtaining information", txid=txid) # TODO: Check RPC methods to see possible returns and avoid general else # else: diff --git a/pisa/cleaner.py b/pisa/cleaner.py index 0dad0da..00076b4 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -20,12 +20,12 @@ class Cleaner: else: locator_uuid_map[locator].remove(uuid) - logger.info("end time reached with no match! Deleting appointment.", locator=locator, uuid=uuid) + logger.info("End time reached with no match. Deleting appointment.", locator=locator, uuid=uuid) @staticmethod def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height): for uuid, confirmations in completed_jobs: - logger.info("job completed. Appointment ended after reaching enough confirmations.", + logger.info("Job completed. Appointment ended after reaching enough confirmations.", uuid=uuid, height=height, confirmations=confirmations) # ToDo: #9-add-data-persistence @@ -35,7 +35,7 @@ class Cleaner: if len(tx_job_map[justice_txid]) == 1: tx_job_map.pop(justice_txid) - logger.info("no more jobs for justice transaction.", justice_txid=justice_txid) + logger.info("No more jobs for justice transaction.", justice_txid=justice_txid) else: tx_job_map[justice_txid].remove(uuid) diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index d9a02ba..68c1fe5 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -24,7 +24,7 @@ class EncryptedBlob: sk = master_key[:16] nonce = master_key[16:] - logger.info("[Watcher] creating new blob.", + logger.info("Creating new blob.", master_key=hexlify(master_key).decode(), sk=hexlify(sk).decode(), nonce=hexlify(sk).decode(), diff --git a/pisa/pisad.py b/pisa/pisad.py index c990a2e..918ec76 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,7 +1,6 @@ from sys import argv from getopt import getopt -from pisa import logging from pisa.logger import Logger from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network diff --git a/pisa/responder.py b/pisa/responder.py index 9b2b1f8..bbb4d71 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -47,7 +47,7 @@ class Responder: def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=False): if self.asleep: - logger.info("waking up!") + logger.info("Waking up") carrier = Carrier() receipt = carrier.send_transaction(justice_rawtx, justice_txid) @@ -82,7 +82,7 @@ class Responder: if confirmations == 0: self.unconfirmed_txs.append(justice_txid) - logger.info("new job added.", + logger.info("New job added.", dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end) if self.asleep: @@ -111,7 +111,7 @@ class Responder: txs = block.get('tx') height = block.get('height') - logger.info("new block received", + logger.info("New block received", block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs) # ToDo: #9-add-data-persistence @@ -126,9 +126,9 @@ class Responder: self.rebroadcast(txs_to_rebroadcast) else: - logger.warning("reorg found!", - local_prev_block_hash=prev_block_hash, - remote_prev_block_hash=block.get('previousblockhash')) + logger.warn("Reorg found", + local_prev_block_hash=prev_block_hash, + remote_prev_block_hash=block.get('previousblockhash')) self.handle_reorgs() @@ -138,7 +138,7 @@ class Responder: self.asleep = True self.zmq_subscriber.terminate = True - logger.info("no more pending jobs, going back to sleep") + logger.info("No more pending jobs, going back to sleep") def get_txs_to_rebroadcast(self, txs): txs_to_rebroadcast = [] @@ -174,25 +174,25 @@ class Responder: self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) - logger.warning("Transaction has missed many confirmations. Rebroadcasting.", - justice_txid=self.jobs[uuid].justice_txid, - confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) + logger.warn("Transaction has missed many confirmations. Rebroadcasting.", + justice_txid=self.jobs[uuid].justice_txid, + confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): for uuid, job in self.jobs.items(): # First we check if the dispute transaction is still in the blockchain. If not, the justice can not be # there either, so we'll need to call the reorg manager straight away - dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, logger=logger, tx_label='dispute tx') + dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, logger=logger, tx_label='Dispute tx') # If the dispute is there, we can check the justice tx if dispute_in_chain: justice_in_chain, justice_confirmations = check_tx_in_chain(job.justice_txid, logger=logger, - tx_label='justice tx') + tx_label='Justice tx') # If both transactions are there, we only need to update the justice tx confirmation count if justice_in_chain: - logger.info("updating confirmation count for transaction.", + logger.info("Updating confirmation count for transaction.", justice_txid=job.justice_txid, prev_count=job.confirmations, curr_count=justice_confirmations) @@ -210,5 +210,5 @@ class Responder: # ToDo: #24-properly-handle-reorgs # FIXME: if the dispute is not on chain (either in mempool or not there at all), we need to call the # reorg manager - logger.warning("dispute and justice transaction missing. Calling the reorg manager") - logger.error("reorg manager not yet implemented") + logger.warn("Dispute and justice transaction missing. Calling the reorg manager") + logger.error("Reorg manager not yet implemented") diff --git a/pisa/tools.py b/pisa/tools.py index ddc4ab5..db6c33d 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -8,7 +8,8 @@ from pisa.utils.auth_proxy import JSONRPCException from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY -def check_tx_in_chain(tx_id, logger=Logger(), tx_label='transaction'): +# TODO: currently only used in the Responder; might move there or in the BlockProcessor +def check_tx_in_chain(tx_id, logger=Logger(), tx_label='Transaction'): tx_in_chain = False confirmations = 0 diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index f0ac469..76f0150 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -30,4 +30,4 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - self.logger.info("new block received via ZMQ", block_hash=block_hash) + self.logger.info("New block received via ZMQ", block_hash=block_hash) diff --git a/pisa/watcher.py b/pisa/watcher.py index 6ec6d30..45ec563 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -53,16 +53,16 @@ class Watcher: zmq_thread.start() watcher.start() - logger.info("waking up!") + logger.info("Waking up") appointment_added = True - logger.info("new appointment accepted.", locator=appointment.locator) + logger.info("New appointment accepted.", locator=appointment.locator) else: appointment_added = False - logger.info("maximum appointments reached, appointment rejected.", locator=appointment.locator) + logger.info("Maximum appointments reached, appointment rejected.", locator=appointment.locator) return appointment_added @@ -73,14 +73,14 @@ class Watcher: def do_watch(self): while len(self.appointments) > 0: block_hash = self.block_queue.get() - logger.info("new block received", block_hash=block_hash) + logger.info("New block received", block_hash=block_hash) block = BlockProcessor.get_block(block_hash) if block is not None: txids = block.get('tx') - logger.info("list of transactions.", txids=txids) + logger.info("List of transactions.", txids=txids) expired_appointments = [uuid for uuid, appointment in self.appointments.items() if block["height"] > appointment.end_time + EXPIRY_DELTA] @@ -91,7 +91,7 @@ class Watcher: matches = BlockProcessor.get_matches(potential_matches, self.locator_uuid_map, self.appointments) for locator, uuid, dispute_txid, justice_txid, justice_rawtx in matches: - logger.info("notifying responder and deleting appointment.", + logger.info("Notifying responder and deleting appointment.", justice_txid=justice_txid, locator=locator, uuid=uuid) self.responder.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, @@ -113,4 +113,4 @@ class Watcher: self.asleep = True self.zmq_subscriber.terminate = True - logger.error("no more pending appointments, going back to sleep") + logger.error("No more pending appointments, going back to sleep") From 9f25ef8603790871c142b34efd5eabf0b5c2098d Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Thu, 10 Oct 2019 18:22:33 +0700 Subject: [PATCH 44/82] Fixes from PR review --- apps/cli/pisa-cli.py | 41 +++++++++++++++++-------------- pisa/api.py | 3 +-- pisa/logger.py | 5 +++- pisa/pisad.py | 4 +-- pisa/watcher.py | 1 + test/unit/test_api.py | 5 ---- test/unit/test_blob.py | 2 -- test/unit/test_block_processor.py | 1 - test/unit/test_cleaner.py | 1 - test/unit/test_encrypted_blob.py | 3 --- test/unit/test_inspector.py | 1 - test/unit/test_tools.py | 5 ++-- 12 files changed, 33 insertions(+), 39 deletions(-) diff --git a/apps/cli/pisa-cli.py b/apps/cli/pisa-cli.py index 6327214..8b181a1 100644 --- a/apps/cli/pisa-cli.py +++ b/apps/cli/pisa-cli.py @@ -2,7 +2,6 @@ import re import os import sys import json -import logging import requests from sys import argv from hashlib import sha256 @@ -10,11 +9,15 @@ from binascii import unhexlify from getopt import getopt, GetoptError from requests import ConnectTimeout, ConnectionError +from pisa.logger import Logger from apps.cli.blob import Blob from apps.cli.help import help_add_appointment, help_get_appointment from apps.cli import DEFAULT_PISA_API_SERVER, DEFAULT_PISA_API_PORT +logger = Logger("Client") + + # FIXME: TESTING ENDPOINT, WON'T BE THERE IN PRODUCTION def generate_dummy_appointment(): get_block_count_end_point = "http://{}:{}/get_block_count".format(pisa_api_server, pisa_api_port) @@ -49,14 +52,14 @@ def add_appointment(args): if os.path.isfile(fin): appointment_data = json.load(open(fin)) else: - logging.error("[Client] can't find file " + fin) + logger.error("Can't find file " + fin) else: - logging.error("[Client] no file provided as appointment. " + use_help) + logger.error("No file provided as appointment. " + use_help) else: appointment_data = json.loads(arg_opt) except json.JSONDecodeError: - logging.error("[Client] non-JSON encoded data provided as appointment. " + use_help) + logger.error("Non-JSON encoded data provided as appointment. " + use_help) if appointment_data: valid_locator = check_txid_format(appointment_data.get('tx_id')) @@ -67,22 +70,22 @@ def add_appointment(args): appointment_data.get('start_time'), appointment_data.get('end_time'), appointment_data.get('dispute_delta')) - logging.info("[Client] sending appointment to PISA") + logger.info("Sending appointment to PISA") try: r = requests.post(url=add_appointment_endpoint, json=json.dumps(appointment), timeout=5) - logging.info("[Client] {} (code: {}).".format(r.text, r.status_code)) + logger.info("{} (code: {}).".format(r.text, r.status_code)) except ConnectTimeout: - logging.error("[Client] can't connect to pisa API. Connection timeout.") + logger.error("Can't connect to pisa API. Connection timeout.") except ConnectionError: - logging.error("[Client] can't connect to pisa API. Server cannot be reached.") + logger.error("Can't connect to pisa API. Server cannot be reached.") else: - logging.error("[Client] the provided locator is not valid.") + logger.error("The provided locator is not valid.") else: - logging.error("[Client] no appointment data provided. " + use_help) + logger.error("No appointment data provided. " + use_help) def get_appointment(args): @@ -104,16 +107,16 @@ def get_appointment(args): print(json.dumps(r.json(), indent=4, sort_keys=True)) except ConnectTimeout: - logging.error("[Client] can't connect to pisa API. Connection timeout.") + logger.error("Can't connect to pisa API. Connection timeout.") except ConnectionError: - logging.error("[Client] can't connect to pisa API. Server cannot be reached.") + logger.error("Can't connect to pisa API. Server cannot be reached.") else: - logging.error("[Client] the provided locator is not valid.") + logger.error("The provided locator is not valid.") else: - logging.error("[Client] the provided locator is not valid.") + logger.error("The provided locator is not valid.") def build_appointment(tx, tx_id, start_block, end_block, dispute_delta): @@ -199,7 +202,7 @@ if __name__ == '__main__': sys.exit(help_get_appointment()) else: - logging.error("[Client] unknown command. Use help to check the list of available commands") + logger.error("Unknown command. Use help to check the list of available commands") else: sys.exit(show_usage()) @@ -210,14 +213,14 @@ if __name__ == '__main__': generate_dummy_appointment() else: - logging.error("[Client] unknown command. Use help to check the list of available commands") + logger.error("Unknown command. Use help to check the list of available commands") else: - logging.error("[Client] no command provided. Use help to check the list of available commands.") + logger.error("No command provided. Use help to check the list of available commands.") except GetoptError as e: - logging.error("[Client] {}".format(e)) + logger.error("{}".format(e)) except json.JSONDecodeError as e: - logging.error("[Client] non-JSON encoded appointment passed as parameter.") + logger.error("Non-JSON encoded appointment passed as parameter.") diff --git a/pisa/api.py b/pisa/api.py index 48b6c46..cd05e02 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,11 +1,10 @@ import json from flask import Flask, request, Response, abort, jsonify -from pisa import HOST, PORT, logging, bitcoin_cli +from pisa import HOST, PORT, logging from pisa.logger import Logger from pisa.watcher import Watcher from pisa.inspector import Inspector -from pisa import HOST, PORT, logging from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor diff --git a/pisa/logger.py b/pisa/logger.py index 3dad0e0..95c4733 100644 --- a/pisa/logger.py +++ b/pisa/logger.py @@ -27,4 +27,7 @@ class Logger(object): logging.debug(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) def error(self, msg, **kwargs): - logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) \ No newline at end of file + logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) + + def warn(self, msg, **kwargs): + logging.warn(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) diff --git a/pisa/pisad.py b/pisa/pisad.py index 918ec76..152d7c9 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -5,7 +5,7 @@ from pisa.logger import Logger from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network -logger = Logger("Pisad") +logger = Logger("Daemon") if __name__ == '__main__': debug = False @@ -23,4 +23,4 @@ if __name__ == '__main__': logger.error("bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down") else: - logger.error("can't connect to bitcoind. Shutting down") + logger.error("Can't connect to bitcoind. Shutting down") diff --git a/pisa/watcher.py b/pisa/watcher.py index 45ec563..5248b77 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -12,6 +12,7 @@ from pisa.utils.zmq_subscriber import ZMQHandler logger = Logger("Watcher") + class Watcher: def __init__(self, max_appointments=MAX_APPOINTMENTS): self.appointments = dict() diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 8505bfa..5e4a6ee 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -183,8 +183,3 @@ def test_get_all_appointments_responder(): assert (set(responder_jobs) == set(local_locators)) assert (len(received_appointments["watcher_appointments"]) == 0) - - - - - diff --git a/test/unit/test_blob.py b/test/unit/test_blob.py index efd9e1a..7eb3418 100644 --- a/test/unit/test_blob.py +++ b/test/unit/test_blob.py @@ -87,5 +87,3 @@ def test_encrypt(): encrypted_blob2 = blob.encrypt(key) assert(encrypted_blob == encrypted_blob2 and id(encrypted_blob) != id(encrypted_blob2)) - - diff --git a/test/unit/test_block_processor.py b/test/unit/test_block_processor.py index b0a2bba..a57fbb0 100644 --- a/test/unit/test_block_processor.py +++ b/test/unit/test_block_processor.py @@ -73,4 +73,3 @@ def test_potential_matches_random_data(locator_uuid_map): # None of the txids should match assert len(potential_matches) == 0 - diff --git a/test/unit/test_cleaner.py b/test/unit/test_cleaner.py index 5206118..237a2e3 100644 --- a/test/unit/test_cleaner.py +++ b/test/unit/test_cleaner.py @@ -78,4 +78,3 @@ def test_delete_completed_jobs(): Cleaner.delete_completed_jobs(jobs, tx_job_map, completed_jobs, 0) assert not set(completed_jobs).issubset(jobs.keys()) - diff --git a/test/unit/test_encrypted_blob.py b/test/unit/test_encrypted_blob.py index 096c316..67270c2 100644 --- a/test/unit/test_encrypted_blob.py +++ b/test/unit/test_encrypted_blob.py @@ -34,6 +34,3 @@ def test_decrypt(): encrypted_blob = EncryptedBlob(encrypted_data) assert(encrypted_blob.decrypt(key) == data) - - - diff --git a/test/unit/test_inspector.py b/test/unit/test_inspector.py index 3aa68f6..551cfe3 100644 --- a/test/unit/test_inspector.py +++ b/test/unit/test_inspector.py @@ -230,4 +230,3 @@ def test_inspect(): and appointment.end_time == end_time and appointment.dispute_delta == dispute_delta and appointment.encrypted_blob.data == encrypted_blob and appointment.cipher == cipher and appointment.hash_function == hash_function) - diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py index ba5b834..0e96b7c 100644 --- a/test/unit/test_tools.py +++ b/test/unit/test_tools.py @@ -8,9 +8,10 @@ def test_check_txid_format(): assert(check_txid_format(None) is False) assert(check_txid_format("") is False) assert(check_txid_format(0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef) is False) # wrong type - assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") is True) # lowercase - assert(check_txid_format("0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF") is True) # uppercase + assert(check_txid_format("abcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcd") is True) # lowercase + assert(check_txid_format("ABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCD") is True) # uppercase assert(check_txid_format("0123456789abcdef0123456789ABCDEF0123456789abcdef0123456789ABCDEF") is True) # mixed case + assert(check_txid_format("0123456789012345678901234567890123456789012345678901234567890123") is True) # only nums assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdf") is False) # too short assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0") is False) # too long assert(check_txid_format("g123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") is False) # non-hex From eb2734ccfe5e51983e72aa8c17cec89575d6c114 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 10 Oct 2019 12:34:18 +0100 Subject: [PATCH 45/82] Improves carrier - Takes receipt out of Carrier - Adds missing cases (implementation missing) - Adds notes on what to check --- pisa/carrier.py | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/pisa/carrier.py b/pisa/carrier.py index eb0319d..2286070 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -4,19 +4,20 @@ from pisa.utils.auth_proxy import JSONRPCException from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION -class Carrier: - class Receipt: - def __init__(self, delivered, confirmations=0, reason=None): - self.delivered = delivered - self.confirmations = confirmations - self.reason = reason +class Receipt: + def __init__(self, delivered, confirmations=0, reason=None): + self.delivered = delivered + self.confirmations = confirmations + self.reason = reason + +class Carrier: def send_transaction(self, rawtx, txid): try: logging.info("[Carrier] pushing transaction to the network (txid: {})".format(rawtx)) bitcoin_cli.sendrawtransaction(rawtx) - receipt = self.Receipt(delivered=True) + receipt = Receipt(delivered=True) except JSONRPCException as e: errno = e.error.get('code') @@ -26,10 +27,18 @@ class Carrier: if errno == RPC_VERIFY_REJECTED: # DISCUSS: what to do in this case # DISCUSS: invalid transactions (properly formatted but invalid, like unsigned) fit here too. - # DISCUSS: RPC_VERIFY_ERROR could also be a possible case. # DISCUSS: check errors -9 and -10 # TODO: UNKNOWN_JSON_RPC_EXCEPTION is not the proper exception here. This is long due. - receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) + receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) + + elif errno == RPC_VERIFY_ERROR: + # DISCUSS: The only reason for it seems to bea non-existing or spent input. + # https://github.com/bitcoin/bitcoin/blob/master/src/rpc/rawtransaction.cpp#L660 + # However RPC_TRANSACTION_ERROR aliases RPC_VERIFY_ERROR and it's the default return for + # RPCErrorFromTransactionError + # https://github.com/bitcoin/bitcoin/blob/master/src/rpc/util.cpp#L276 + # TODO: UNKNOWN_JSON_RPC_EXCEPTION is not the proper exception here. This is long due. + receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: logging.info("[Carrier] {} is already in the blockchain. Getting confirmation count".format(txid)) @@ -40,28 +49,34 @@ class Carrier: if tx_info is not None: confirmations = int(tx_info.get("confirmations")) - receipt = self.Receipt(delivered=True, confirmations=confirmations) + receipt = Receipt(delivered=True, confirmations=confirmations, reason=RPC_VERIFY_ALREADY_IN_CHAIN) else: # There's a really unlike edge case where a transaction can be reorged between receiving the # notification and querying the data. In such a case we just resend self.send_transaction(rawtx, txid) + elif errno == RPC_DESERIALIZATION_ERROR: + # Adding this here just for completeness. We should never end up here. The Carrier only sends txs + # handed by the Responder, who receives them from the Watcher, who checks that the tx can be properly + # deserialized + logging.info("[Carrier] tx {} cannot be deserialized".format(txid)) + receipt = Receipt(delivered=False, reason=RPC_DESERIALIZATION_ERROR) + else: # If something else happens (unlikely but possible) log it so we can treat it in future releases logging.error("[Responder] JSONRPCException. Error {}".format(e)) - receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) + receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) return receipt @staticmethod def get_transaction(txid): - tx_info = None - try: tx_info = bitcoin_cli.getrawtransaction(txid, 1) except JSONRPCException as e: + tx_info = None # While it's quite unlikely, the transaction that was already in the blockchain could have been # reorged while we were querying bitcoind to get the confirmation count. In such a case we just # restart the job From d05c2c21b785af3a72f5ccf38a76563f058e6f6f Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Thu, 10 Oct 2019 18:35:31 +0700 Subject: [PATCH 46/82] warn ==> warning --- pisa/logger.py | 4 ++-- pisa/responder.py | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pisa/logger.py b/pisa/logger.py index 95c4733..e6a1f4e 100644 --- a/pisa/logger.py +++ b/pisa/logger.py @@ -29,5 +29,5 @@ class Logger(object): def error(self, msg, **kwargs): logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - def warn(self, msg, **kwargs): - logging.warn(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) + def warning(self, msg, **kwargs): + logging.warning(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) diff --git a/pisa/responder.py b/pisa/responder.py index bbb4d71..87f64ec 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -126,9 +126,9 @@ class Responder: self.rebroadcast(txs_to_rebroadcast) else: - logger.warn("Reorg found", - local_prev_block_hash=prev_block_hash, - remote_prev_block_hash=block.get('previousblockhash')) + logger.warning("Reorg found", + local_prev_block_hash=prev_block_hash, + remote_prev_block_hash=block.get('previousblockhash')) self.handle_reorgs() @@ -174,9 +174,9 @@ class Responder: self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) - logger.warn("Transaction has missed many confirmations. Rebroadcasting.", - justice_txid=self.jobs[uuid].justice_txid, - confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) + logger.warning("Transaction has missed many confirmations. Rebroadcasting.", + justice_txid=self.jobs[uuid].justice_txid, + confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): @@ -210,5 +210,5 @@ class Responder: # ToDo: #24-properly-handle-reorgs # FIXME: if the dispute is not on chain (either in mempool or not there at all), we need to call the # reorg manager - logger.warn("Dispute and justice transaction missing. Calling the reorg manager") + logger.warning("Dispute and justice transaction missing. Calling the reorg manager") logger.error("Reorg manager not yet implemented") From 6b058dfaf9189b34d043d49f8579ca0e810fe591 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 10 Oct 2019 13:01:21 +0100 Subject: [PATCH 47/82] Fixes a couple of bugs from the last merge --- pisa/carrier.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pisa/carrier.py b/pisa/carrier.py index 20990fa..79c1972 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -63,13 +63,13 @@ class Carrier: # Adding this here just for completeness. We should never end up here. The Carrier only sends txs # handed by the Responder, who receives them from the Watcher, who checks that the tx can be properly # deserialized - logging.info("[Carrier] tx {} cannot be deserialized".format(txid)) + logger.info("[Carrier] tx {} cannot be deserialized".format(txid)) receipt = Receipt(delivered=False, reason=RPC_DESERIALIZATION_ERROR) else: # If something else happens (unlikely but possible) log it so we can treat it in future releases logger.error("JSONRPCException.", error_code=e) - receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) + receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) return receipt From 15c78072c983971c9f7cd8bad42278fc97b1ce8c Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 10 Oct 2019 16:22:46 +0100 Subject: [PATCH 48/82] Cleans comments + added method in generic exceptions tx rejection comments moved to #37 --- pisa/carrier.py | 25 ++++++++----------------- pisa/tools.py | 2 +- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/pisa/carrier.py b/pisa/carrier.py index 79c1972..6d357cb 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -24,22 +24,14 @@ class Carrier: except JSONRPCException as e: errno = e.error.get('code') - # Since we're pushing a raw transaction to the network we can get two kind of rejections: - # RPC_VERIFY_REJECTED and RPC_VERIFY_ALREADY_IN_CHAIN. The former implies that the transaction is rejected - # due to network rules, whereas the later implies that the transaction is already in the blockchain. + # Since we're pushing a raw transaction to the network we can face several rejections if errno == RPC_VERIFY_REJECTED: - # DISCUSS: what to do in this case - # DISCUSS: invalid transactions (properly formatted but invalid, like unsigned) fit here too. - # DISCUSS: check errors -9 and -10 + # DISCUSS: 37-transaction-rejection # TODO: UNKNOWN_JSON_RPC_EXCEPTION is not the proper exception here. This is long due. receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) elif errno == RPC_VERIFY_ERROR: - # DISCUSS: The only reason for it seems to bea non-existing or spent input. - # https://github.com/bitcoin/bitcoin/blob/master/src/rpc/rawtransaction.cpp#L660 - # However RPC_TRANSACTION_ERROR aliases RPC_VERIFY_ERROR and it's the default return for - # RPCErrorFromTransactionError - # https://github.com/bitcoin/bitcoin/blob/master/src/rpc/util.cpp#L276 + # DISCUSS: 37-transaction-rejection # TODO: UNKNOWN_JSON_RPC_EXCEPTION is not the proper exception here. This is long due. receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) @@ -63,12 +55,12 @@ class Carrier: # Adding this here just for completeness. We should never end up here. The Carrier only sends txs # handed by the Responder, who receives them from the Watcher, who checks that the tx can be properly # deserialized - logger.info("[Carrier] tx {} cannot be deserialized".format(txid)) + logger.info("Transaction cannot be deserialized".format(txid)) receipt = Receipt(delivered=False, reason=RPC_DESERIALIZATION_ERROR) else: # If something else happens (unlikely but possible) log it so we can treat it in future releases - logger.error("JSONRPCException.", error_code=e) + logger.error("JSONRPCException.", method='Carrier.send_transaction', error_code=e) receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) return receipt @@ -86,9 +78,8 @@ class Carrier: if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: logger.info("Transaction got reorged before obtaining information", txid=txid) - # TODO: Check RPC methods to see possible returns and avoid general else - # else: - # # If something else happens (unlikely but possible) log it so we can treat it in future releases - # logger.error("JSONRPCException.", error_code=e) + else: + # If something else happens (unlikely but possible) log it so we can treat it in future releases + logger.error("JSONRPCException.", method='Carrier.get_transaction', error_code=e) return tx_info diff --git a/pisa/tools.py b/pisa/tools.py index db6c33d..e06a62d 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -30,7 +30,7 @@ def check_tx_in_chain(tx_id, logger=Logger(), tx_label='Transaction'): else: # ToDO: Unhandled errors, check this properly - logger.error("JSONRPCException.", error_code=e) + logger.error("JSONRPCException.", method='tools.check_tx_in_chain', error_code=e) return tx_in_chain, confirmations From 960313877792aca88773a987f400cfab4546a665 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Thu, 10 Oct 2019 12:09:11 +0700 Subject: [PATCH 49/82] Added SIGINT signal handler --- pisa/pisad.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index 152d7c9..e1864c6 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,5 +1,6 @@ -from sys import argv +from sys import argv, exit from getopt import getopt +from signal import signal, SIGINT from pisa.logger import Logger from pisa.api import start_api @@ -7,7 +8,17 @@ from pisa.tools import can_connect_to_bitcoind, in_correct_network logger = Logger("Daemon") + +def handle_sigint(signal_received, frame): + print("Shutting down PISA...") + # TODO: add code to close the db, free any resources, etc. + + exit(0) + + if __name__ == '__main__': + signal(SIGINT, handle_sigint) + debug = False opts, _ = getopt(argv[1:], 'd', ['debug']) for opt, arg in opts: From 99f3400d638d0f938ae1939c18d74660223226f6 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Thu, 10 Oct 2019 22:37:06 +0700 Subject: [PATCH 50/82] Added SIGTERM and SIGQUIT --- pisa/pisad.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index e1864c6..593c19b 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,6 +1,6 @@ from sys import argv, exit from getopt import getopt -from signal import signal, SIGINT +from signal import signal, SIGINT, SIGQUIT, SIGTERM from pisa.logger import Logger from pisa.api import start_api @@ -9,7 +9,7 @@ from pisa.tools import can_connect_to_bitcoind, in_correct_network logger = Logger("Daemon") -def handle_sigint(signal_received, frame): +def handle_signals(signal_received, frame): print("Shutting down PISA...") # TODO: add code to close the db, free any resources, etc. @@ -17,7 +17,9 @@ def handle_sigint(signal_received, frame): if __name__ == '__main__': - signal(SIGINT, handle_sigint) + signal(SIGINT, handle_signals) + signal(SIGTERM, handle_signals) + signal(SIGQUIT, handle_signals) debug = False opts, _ = getopt(argv[1:], 'd', ['debug']) From 6348319dfb3be2b9ce633d2ca332ccbb32a17a78 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Fri, 11 Oct 2019 09:26:29 +0700 Subject: [PATCH 51/82] Using logging instead of print; added startup log message --- pisa/pisad.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index 593c19b..751aa53 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -10,13 +10,15 @@ logger = Logger("Daemon") def handle_signals(signal_received, frame): - print("Shutting down PISA...") + logger.info("Shutting down PISA") # TODO: add code to close the db, free any resources, etc. exit(0) if __name__ == '__main__': + logger.info("Starting PISA") + signal(SIGINT, handle_signals) signal(SIGTERM, handle_signals) signal(SIGQUIT, handle_signals) From 8acff789fe2985bfe0e863f8dc96a9a9e39fb69c Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Fri, 11 Oct 2019 11:27:03 +0700 Subject: [PATCH 52/82] Add reference to github issue for the Todo --- pisa/pisad.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index 751aa53..ac5bf86 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -11,7 +11,7 @@ logger = Logger("Daemon") def handle_signals(signal_received, frame): logger.info("Shutting down PISA") - # TODO: add code to close the db, free any resources, etc. + # TODO: #11-add-graceful-shutdown: add code to close the db, free any resources, etc. exit(0) From b78c6ebfe78ae1ff3cdf8368b32022f9ec0f4082 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 11 Oct 2019 19:07:46 +0100 Subject: [PATCH 53/82] Removes debug flags from old code --- pisa/pisad.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index ac5bf86..44d9adc 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,5 +1,5 @@ -from sys import argv, exit from getopt import getopt +from sys import argv, exit from signal import signal, SIGINT, SIGQUIT, SIGTERM from pisa.logger import Logger @@ -23,8 +23,7 @@ if __name__ == '__main__': signal(SIGTERM, handle_signals) signal(SIGQUIT, handle_signals) - debug = False - opts, _ = getopt(argv[1:], 'd', ['debug']) + opts, _ = getopt(argv[1:], '', ['']) for opt, arg in opts: # FIXME: Leaving this here for future option/arguments pass From a916c67cc5ba50566c7f0ea6d25cb929b5372c8d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 11 Oct 2019 19:32:44 +0100 Subject: [PATCH 54/82] Fixes error logging --- pisa/block_processor.py | 8 ++++---- pisa/carrier.py | 8 ++++---- pisa/tools.py | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pisa/block_processor.py b/pisa/block_processor.py index fdd71a8..11478b5 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -17,7 +17,7 @@ class BlockProcessor: except JSONRPCException as e: block = None - logger.error("Couldn't get block from bitcoind.", error_code=e) + logger.error("Couldn't get block from bitcoind.", error=e.error) return block @@ -29,7 +29,7 @@ class BlockProcessor: except JSONRPCException as e: block_hash = None - logger.error("Couldn't get block hash.", error_code=e) + logger.error("Couldn't get block hash.", error=e.error) return block_hash @@ -41,7 +41,7 @@ class BlockProcessor: except JSONRPCException as e: block_count = None - logger.error("Couldn't get block count", error_code=e) + logger.error("Couldn't get block count", error=e.error) return block_count @@ -81,7 +81,7 @@ class BlockProcessor: except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple # for the POC - logger.error("Can't build transaction from decoded data.", error_code=e) + logger.error("Can't build transaction from decoded data.", error=e.error) return matches diff --git a/pisa/carrier.py b/pisa/carrier.py index 6d357cb..545e427 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -32,8 +32,8 @@ class Carrier: elif errno == RPC_VERIFY_ERROR: # DISCUSS: 37-transaction-rejection - # TODO: UNKNOWN_JSON_RPC_EXCEPTION is not the proper exception here. This is long due. - receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) + receipt = Receipt(delivered=False, reason=RPC_VERIFY_ERROR) + logger.error("Transaction couldn't be broadcast", error=e.error) elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: logger.info("Transaction is already in the blockchain. Getting confirmation count", txid=txid) @@ -60,7 +60,7 @@ class Carrier: else: # If something else happens (unlikely but possible) log it so we can treat it in future releases - logger.error("JSONRPCException.", method='Carrier.send_transaction', error_code=e) + logger.error("JSONRPCException.", method='Carrier.send_transaction', error=e.error) receipt = Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) return receipt @@ -80,6 +80,6 @@ class Carrier: else: # If something else happens (unlikely but possible) log it so we can treat it in future releases - logger.error("JSONRPCException.", method='Carrier.get_transaction', error_code=e) + logger.error("JSONRPCException.", method='Carrier.get_transaction', error=e.error) return tx_info diff --git a/pisa/tools.py b/pisa/tools.py index e06a62d..c10f2a5 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -30,7 +30,7 @@ def check_tx_in_chain(tx_id, logger=Logger(), tx_label='Transaction'): else: # ToDO: Unhandled errors, check this properly - logger.error("JSONRPCException.", method='tools.check_tx_in_chain', error_code=e) + logger.error("JSONRPCException.", method='tools.check_tx_in_chain', error=e.error) return tx_in_chain, confirmations From 98c27f601378e2fa558e6835069a2d98176b5e22 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 11 Oct 2019 19:51:50 +0100 Subject: [PATCH 55/82] Adds basic unit tests for the Carrier Further testing is required for this one. --- pisa/carrier.py | 2 + test/unit/test_carrier.py | 81 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 test/unit/test_carrier.py diff --git a/pisa/carrier.py b/pisa/carrier.py index 545e427..d4cfbed 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -6,6 +6,8 @@ from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION logger = Logger("Carrier") +# FIXME: This class is not fully covered by unit tests + class Receipt: def __init__(self, delivered, confirmations=0, reason=None): diff --git a/test/unit/test_carrier.py b/test/unit/test_carrier.py new file mode 100644 index 0000000..9dc0f44 --- /dev/null +++ b/test/unit/test_carrier.py @@ -0,0 +1,81 @@ +import pytest +from os import urandom +from time import sleep +from threading import Thread + +from pisa.carrier import Carrier +from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR +from test.simulator.bitcoind_sim import run_simulator, TIME_BETWEEN_BLOCKS + +# FIXME: This test do not fully cover the carrier since the simulator does not support every single error bitcoind may +# return for RPC_VERIFY_REJECTED and RPC_VERIFY_ERROR. Further development of the simulator / mocks or simulation +# with bitcoind is required + + +sent_txs = [] + + +@pytest.fixture(autouse=True, scope='session') +def run_bitcoind(): + bitcoind_thread = Thread(target=run_simulator) + bitcoind_thread.daemon = True + bitcoind_thread.start() + + # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) + sleep(0.1) + + +@pytest.fixture(scope='session') +def carrier(): + return Carrier() + + +def test_send_transaction(carrier): + # We are mocking bitcoind and in our simulator txid == tx + tx = urandom(32).hex() + receipt = carrier.send_transaction(tx, tx) + + assert(receipt.delivered is True) + + +def test_send_double_spending_transaction(carrier): + # We can test what happens if the same transaction is sent twice + tx = urandom(32).hex() + receipt = carrier.send_transaction(tx, tx) + sent_txs.append(tx) + + # Wait for a block to be mined + sleep(TIME_BETWEEN_BLOCKS) + + # Try to send it again + receipt2 = carrier.send_transaction(tx, tx) + + # The carrier should report delivered True for both, but in the second case the transaction was already delivered + # (either by himself or someone else) + assert(receipt.delivered is True) + assert (receipt2.delivered is True and receipt2.confirmations == 1 + and receipt2.reason == RPC_VERIFY_ALREADY_IN_CHAIN) + + +def test_send_transaction_invalid_format(carrier): + # Test sending a transaction that does not fits the format + tx = urandom(31).hex() + receipt = carrier.send_transaction(tx, tx) + + assert (receipt.delivered is False and receipt.reason == RPC_DESERIALIZATION_ERROR) + + +def test_get_transaction(): + # We should be able to get back every transaction we've sent + for tx in sent_txs: + tx_info = Carrier.get_transaction(tx) + + assert tx_info is not None + + +def test_get_non_existing_transaction(): + tx_info = Carrier.get_transaction(urandom(32).hex()) + + assert tx_info is None + + From 6baa059431b4cfdad9496bea4b20cea60850055b Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Fri, 11 Oct 2019 20:54:31 +0100 Subject: [PATCH 56/82] Adds additional unit tests to tools --- pisa/pisad.py | 3 ++- pisa/tools.py | 9 ++++----- test/unit/test_tools.py | 37 ++++++++++++++++++++++++++++++++++++- 3 files changed, 42 insertions(+), 7 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index 44d9adc..3bbf476 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -4,6 +4,7 @@ from signal import signal, SIGINT, SIGQUIT, SIGTERM from pisa.logger import Logger from pisa.api import start_api +from pisa.conf import BTC_NETWORK from pisa.tools import can_connect_to_bitcoind, in_correct_network logger = Logger("Daemon") @@ -29,7 +30,7 @@ if __name__ == '__main__': pass if can_connect_to_bitcoind(): - if in_correct_network(): + if in_correct_network(BTC_NETWORK): # Fire the api start_api() diff --git a/pisa/tools.py b/pisa/tools.py index c10f2a5..aeaa310 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -1,7 +1,6 @@ import re from http.client import HTTPException -import pisa.conf as conf from pisa import bitcoin_cli from pisa.logger import Logger from pisa.utils.auth_proxy import JSONRPCException @@ -46,18 +45,18 @@ def can_connect_to_bitcoind(): return can_connect -def in_correct_network(): +def in_correct_network(network): mainnet_genesis_block_hash = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" testnet3_genesis_block_hash = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943" correct_network = False genesis_block_hash = bitcoin_cli.getblockhash(0) - if conf.BTC_NETWORK == 'mainnet' and genesis_block_hash == mainnet_genesis_block_hash: + if network == 'mainnet' and genesis_block_hash == mainnet_genesis_block_hash: correct_network = True - elif conf.BTC_NETWORK == 'testnet' and genesis_block_hash == testnet3_genesis_block_hash: + elif network == 'testnet' and genesis_block_hash == testnet3_genesis_block_hash: correct_network = True - elif conf.BTC_NETWORK == 'regtest' and genesis_block_hash not in [mainnet_genesis_block_hash, testnet3_genesis_block_hash]: + elif network == 'regtest' and genesis_block_hash not in [mainnet_genesis_block_hash, testnet3_genesis_block_hash]: correct_network = True return correct_network diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py index 0e96b7c..fd95e4f 100644 --- a/test/unit/test_tools.py +++ b/test/unit/test_tools.py @@ -1,9 +1,44 @@ -from pisa.tools import check_txid_format +import pytest +from time import sleep +from multiprocessing import Process + + from pisa import logging +from pisa.tools import check_txid_format +from test.simulator.bitcoind_sim import run_simulator +from pisa.tools import can_connect_to_bitcoind, in_correct_network logging.getLogger().disabled = True +@pytest.fixture(autouse=True, scope='session') +def run_bitcoind(): + bitcoind_process = Process(target=run_simulator) + bitcoind_process.start() + + # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) + sleep(0.1) + + return bitcoind_process + + +def test_in_correct_network(): + # The simulator runs as if it was regtest, so every other network should fail + assert in_correct_network('mainnet') is False + assert in_correct_network('testnet') is False + assert in_correct_network('regtest') is True + + +def test_can_connect_to_bitcoind(): + assert can_connect_to_bitcoind() is True + + +def test_can_connect_to_bitcoind_bitcoin_not_running(run_bitcoind): + # Kill the simulator thread and test the check fails + run_bitcoind.kill() + assert can_connect_to_bitcoind() is False + + def test_check_txid_format(): assert(check_txid_format(None) is False) assert(check_txid_format("") is False) From e5ab943f8ccf8f4abcac018b88c8f151da7a12bb Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 14 Oct 2019 12:59:22 +0100 Subject: [PATCH 57/82] Adds conftest and defines session fixtures --- test/unit/conftest.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 test/unit/conftest.py diff --git a/test/unit/conftest.py b/test/unit/conftest.py new file mode 100644 index 0000000..361e99f --- /dev/null +++ b/test/unit/conftest.py @@ -0,0 +1,30 @@ +import pytest +from time import sleep +from threading import Thread +from multiprocessing import Process + +from pisa.api import start_api +from test.simulator.bitcoind_sim import run_simulator + +bitcoind_process = Process(target=run_simulator) + + +@pytest.fixture(autouse=True, scope='session') +def run_bitcoind(): + global bitcoind_process + + bitcoind_process.daemon = True + bitcoind_process.start() + + # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) + sleep(0.1) + + +@pytest.fixture(autouse=True, scope='session') +def run_api(): + api_thread = Thread(target=start_api) + api_thread.daemon = True + api_thread.start() + + # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) + sleep(0.1) From abe359f7d1f93a8489714b81d6cc34d04a2978b8 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 14 Oct 2019 13:00:21 +0100 Subject: [PATCH 58/82] Fixes test to work with session fixtures Test were running fine standalone but failing / having Address reuse issues when running all together. Fixing that. --- test/unit/test_api.py | 21 +-------------------- test/unit/test_block_processor.py | 17 ++--------------- test/unit/test_carrier.py | 19 ++++--------------- test/unit/test_inspector.py | 14 -------------- test/unit/test_tools.py | 24 ++++-------------------- 5 files changed, 11 insertions(+), 84 deletions(-) diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 5e4a6ee..6027a55 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -4,14 +4,12 @@ import pytest import time import requests from hashlib import sha256 -from threading import Thread from binascii import unhexlify from apps.cli.blob import Blob -from pisa.api import start_api from pisa import HOST, PORT, logging from pisa.utils.auth_proxy import AuthServiceProxy -from test.simulator.bitcoind_sim import run_simulator, TIME_BETWEEN_BLOCKS +from test.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MAX_APPOINTMENTS logging.getLogger().disabled = True @@ -46,23 +44,6 @@ def generate_dummy_appointment(dispute_txid): return appointment -@pytest.fixture(autouse=True, scope='session') -def run_api(): - api_thread = Thread(target=start_api) - api_thread.daemon = True - api_thread.start() - - # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) - time.sleep(0.1) - - -@pytest.fixture(autouse=True, scope='session') -def run_bitcoind(): - bitcoind_thread = Thread(target=run_simulator) - bitcoind_thread.daemon = True - bitcoind_thread.start() - - @pytest.fixture def new_appointment(dispute_txid=None): appointment = create_appointment(dispute_txid) diff --git a/test/unit/test_block_processor.py b/test/unit/test_block_processor.py index a57fbb0..facf6b7 100644 --- a/test/unit/test_block_processor.py +++ b/test/unit/test_block_processor.py @@ -1,34 +1,21 @@ import pytest -from time import sleep from os import urandom from uuid import uuid4 from hashlib import sha256 -from threading import Thread from binascii import unhexlify from pisa.block_processor import BlockProcessor -from test.simulator.bitcoind_sim import run_simulator APPOINTMENT_COUNT = 100 TEST_SET_SIZE = 200 -@pytest.fixture(autouse=True, scope='session') -def run_bitcoind(): - bitcoind_thread = Thread(target=run_simulator) - bitcoind_thread.daemon = True - bitcoind_thread.start() - - # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) - sleep(0.1) - - -@pytest.fixture(scope='session') +@pytest.fixture(scope='module') def txids(): return [urandom(32).hex() for _ in range(APPOINTMENT_COUNT)] -@pytest.fixture(scope='session') +@pytest.fixture(scope='module') def locator_uuid_map(txids): return {sha256(unhexlify(txid)).hexdigest(): uuid4().hex for txid in txids} diff --git a/test/unit/test_carrier.py b/test/unit/test_carrier.py index 9dc0f44..8c71e67 100644 --- a/test/unit/test_carrier.py +++ b/test/unit/test_carrier.py @@ -1,11 +1,10 @@ import pytest from os import urandom from time import sleep -from threading import Thread from pisa.carrier import Carrier from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR -from test.simulator.bitcoind_sim import run_simulator, TIME_BETWEEN_BLOCKS +from test.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS # FIXME: This test do not fully cover the carrier since the simulator does not support every single error bitcoind may # return for RPC_VERIFY_REJECTED and RPC_VERIFY_ERROR. Further development of the simulator / mocks or simulation @@ -15,17 +14,7 @@ from test.simulator.bitcoind_sim import run_simulator, TIME_BETWEEN_BLOCKS sent_txs = [] -@pytest.fixture(autouse=True, scope='session') -def run_bitcoind(): - bitcoind_thread = Thread(target=run_simulator) - bitcoind_thread.daemon = True - bitcoind_thread.start() - - # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) - sleep(0.1) - - -@pytest.fixture(scope='session') +@pytest.fixture(scope='module') def carrier(): return Carrier() @@ -45,7 +34,7 @@ def test_send_double_spending_transaction(carrier): sent_txs.append(tx) # Wait for a block to be mined - sleep(TIME_BETWEEN_BLOCKS) + sleep(2*TIME_BETWEEN_BLOCKS) # Try to send it again receipt2 = carrier.send_transaction(tx, tx) @@ -53,7 +42,7 @@ def test_send_double_spending_transaction(carrier): # The carrier should report delivered True for both, but in the second case the transaction was already delivered # (either by himself or someone else) assert(receipt.delivered is True) - assert (receipt2.delivered is True and receipt2.confirmations == 1 + assert (receipt2.delivered is True and receipt2.confirmations >= 1 and receipt2.reason == RPC_VERIFY_ALREADY_IN_CHAIN) diff --git a/test/unit/test_inspector.py b/test/unit/test_inspector.py index 551cfe3..4bed6c5 100644 --- a/test/unit/test_inspector.py +++ b/test/unit/test_inspector.py @@ -1,14 +1,10 @@ -import time -import pytest from os import urandom -from threading import Thread from pisa import logging from pisa.errors import * from pisa.inspector import Inspector from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor -from test.simulator.bitcoind_sim import run_simulator from pisa.conf import MIN_DISPUTE_DELTA, SUPPORTED_CIPHERS, SUPPORTED_HASH_FUNCTIONS inspector = Inspector() @@ -21,16 +17,6 @@ WRONG_TYPES_NO_STR = [[], urandom(32), 3.2, 2.0, (), object, {}, object()] logging.getLogger().disabled = True -@pytest.fixture(autouse=True, scope='session') -def run_bitcoind(): - bitcoind_thread = Thread(target=run_simulator) - bitcoind_thread.daemon = True - bitcoind_thread.start() - - # It takes a little bit of time to start the simulator (otherwise the requests are sent too early and they fail) - time.sleep(0.1) - - def test_check_locator(): # Right appointment type, size and format locator = urandom(32).hex() diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py index fd95e4f..25516b3 100644 --- a/test/unit/test_tools.py +++ b/test/unit/test_tools.py @@ -1,27 +1,11 @@ -import pytest -from time import sleep -from multiprocessing import Process - - -from pisa import logging +from pisa import logging, bitcoin_cli from pisa.tools import check_txid_format -from test.simulator.bitcoind_sim import run_simulator +from test.unit.conftest import bitcoind_process from pisa.tools import can_connect_to_bitcoind, in_correct_network logging.getLogger().disabled = True -@pytest.fixture(autouse=True, scope='session') -def run_bitcoind(): - bitcoind_process = Process(target=run_simulator) - bitcoind_process.start() - - # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) - sleep(0.1) - - return bitcoind_process - - def test_in_correct_network(): # The simulator runs as if it was regtest, so every other network should fail assert in_correct_network('mainnet') is False @@ -33,9 +17,9 @@ def test_can_connect_to_bitcoind(): assert can_connect_to_bitcoind() is True -def test_can_connect_to_bitcoind_bitcoin_not_running(run_bitcoind): +def test_can_connect_to_bitcoind_bitcoin_not_running(): # Kill the simulator thread and test the check fails - run_bitcoind.kill() + bitcoind_process.kill() assert can_connect_to_bitcoind() is False From d35b9c13c463c26d3319a42117d405c2458ebbda Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 14 Oct 2019 13:00:49 +0100 Subject: [PATCH 59/82] Fixes responder missing_confirmation dict access --- pisa/responder.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pisa/responder.py b/pisa/responder.py index 87f64ec..0b948e0 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -68,7 +68,7 @@ class Responder: # ToDo: #23-define-behaviour-approaching-end if retry: self.jobs[uuid].retry_counter += 1 - self.jobs[uuid].missed_confirmations = 0 + self.missed_confirmations[justice_txid] = 0 else: self.jobs[uuid] = Job(dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations) @@ -144,7 +144,7 @@ class Responder: txs_to_rebroadcast = [] for tx in txs: - if self.missed_confirmations[tx] >= CONFIRMATIONS_BEFORE_RETRY: + if tx in self.missed_confirmations and self.missed_confirmations[tx] >= CONFIRMATIONS_BEFORE_RETRY: # If a transactions has missed too many confirmations we add it to the rebroadcast list txs_to_rebroadcast.append(tx) @@ -153,7 +153,7 @@ class Responder: def get_completed_jobs(self, height): completed_jobs = [] - for uuid, job in self.jobs: + for uuid, job in self.jobs.items(): if job.appointment_end <= height: tx = Carrier.get_transaction(job.dispute_txid) From d43ab76220828e8ee1427dd9552d8fccc57bed48 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 14 Oct 2019 13:19:54 +0100 Subject: [PATCH 60/82] Updates conftest fixtures so they do not autorun The modules that need to run either bitcoind or the api do now reference to the fixture in the first test that needs it. Since the fixtures are definexd session-wise the rest of the modules will have access to them from that point on. --- test/unit/conftest.py | 6 +++--- test/unit/test_api.py | 2 +- test/unit/test_block_processor.py | 5 ++++- test/unit/test_carrier.py | 5 ++++- test/unit/test_inspector.py | 2 +- test/unit/test_tools.py | 2 +- 6 files changed, 14 insertions(+), 8 deletions(-) diff --git a/test/unit/conftest.py b/test/unit/conftest.py index 361e99f..61f9567 100644 --- a/test/unit/conftest.py +++ b/test/unit/conftest.py @@ -9,10 +9,10 @@ from test.simulator.bitcoind_sim import run_simulator bitcoind_process = Process(target=run_simulator) -@pytest.fixture(autouse=True, scope='session') +@pytest.fixture(scope='session') def run_bitcoind(): global bitcoind_process - + bitcoind_process.daemon = True bitcoind_process.start() @@ -20,7 +20,7 @@ def run_bitcoind(): sleep(0.1) -@pytest.fixture(autouse=True, scope='session') +@pytest.fixture(scope='session') def run_api(): api_thread = Thread(target=start_api) api_thread.daemon = True diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 6027a55..8a5661d 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -70,7 +70,7 @@ def add_appointment(appointment): return r -def test_add_appointment(new_appointment): +def test_add_appointment(run_api, run_bitcoind, new_appointment): # Properly formatted appointment r = add_appointment(new_appointment) assert (r.status_code == 200) diff --git a/test/unit/test_block_processor.py b/test/unit/test_block_processor.py index facf6b7..a0ec37f 100644 --- a/test/unit/test_block_processor.py +++ b/test/unit/test_block_processor.py @@ -1,4 +1,5 @@ import pytest +import logging from os import urandom from uuid import uuid4 from hashlib import sha256 @@ -6,6 +7,8 @@ from binascii import unhexlify from pisa.block_processor import BlockProcessor +logging.getLogger().disabled = True + APPOINTMENT_COUNT = 100 TEST_SET_SIZE = 200 @@ -25,7 +28,7 @@ def best_block_hash(): return BlockProcessor.get_best_block_hash() -def test_get_best_block_hash(best_block_hash): +def test_get_best_block_hash(run_bitcoind, best_block_hash): # As long as bitcoind is running (or mocked in this case) we should always a block hash assert best_block_hash is not None and isinstance(best_block_hash, str) diff --git a/test/unit/test_carrier.py b/test/unit/test_carrier.py index 8c71e67..374dd24 100644 --- a/test/unit/test_carrier.py +++ b/test/unit/test_carrier.py @@ -1,4 +1,5 @@ import pytest +import logging from os import urandom from time import sleep @@ -6,6 +7,8 @@ from pisa.carrier import Carrier from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR from test.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS +logging.getLogger().disabled = True + # FIXME: This test do not fully cover the carrier since the simulator does not support every single error bitcoind may # return for RPC_VERIFY_REJECTED and RPC_VERIFY_ERROR. Further development of the simulator / mocks or simulation # with bitcoind is required @@ -19,7 +22,7 @@ def carrier(): return Carrier() -def test_send_transaction(carrier): +def test_send_transaction(run_bitcoind, carrier): # We are mocking bitcoind and in our simulator txid == tx tx = urandom(32).hex() receipt = carrier.send_transaction(tx, tx) diff --git a/test/unit/test_inspector.py b/test/unit/test_inspector.py index 4bed6c5..bed6a9a 100644 --- a/test/unit/test_inspector.py +++ b/test/unit/test_inspector.py @@ -188,7 +188,7 @@ def test_check_hash_function(): assert (Inspector.check_hash_function(hash_function)[0] == APPOINTMENT_EMPTY_FIELD) -def test_inspect(): +def test_inspect(run_bitcoind): # At this point every single check function has been already tested, let's test inspect with an invalid and a valid # appointments. diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py index 25516b3..9953889 100644 --- a/test/unit/test_tools.py +++ b/test/unit/test_tools.py @@ -6,7 +6,7 @@ from pisa.tools import can_connect_to_bitcoind, in_correct_network logging.getLogger().disabled = True -def test_in_correct_network(): +def test_in_correct_network(run_bitcoind): # The simulator runs as if it was regtest, so every other network should fail assert in_correct_network('mainnet') is False assert in_correct_network('testnet') is False From d7c89ddc9177ce37f0840a6318ac0b7c87dc73c9 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 14 Oct 2019 16:34:41 +0100 Subject: [PATCH 61/82] Modifies behaviour towards a failed EncryptedBlob decrpytion The decryption for the `EncryptedBlob` using AES-GCM-128 (the only cipher available atm) raises an `InvalidTag` exception. This was not properly captured by the watcher making it crash. This behavior was already discovered during the `EncryptedBlob` unit testing and left to be fixed in the `Watcher` unit testing. However, making the EncryptedBlob raise such an exception may not be a good practice, since other ciphers may run into different exceptions. Therefore, the `EncryptedBlob` has been modified to return None upon facing a decryption issue, the `BlockProcessor` will detect that and return a None justice_txm and justice_txid. Upon receiving a None `justice_txid` the `Watcher` will delete the appointment without notifiying the `Responder`. --- pisa/block_processor.py | 6 ++++-- pisa/encrypted_blob.py | 11 +++++++++-- pisa/watcher.py | 16 +++++++++------- test/unit/test_encrypted_blob.py | 11 ++++------- 4 files changed, 26 insertions(+), 18 deletions(-) diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 11478b5..a5f30e3 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -74,15 +74,17 @@ class BlockProcessor: # ToDo: #20-test-tx-decrypting-edge-cases justice_rawtx = appointments[uuid].encrypted_blob.decrypt(dispute_txid) justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') - matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - logger.info("Match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid) except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple # for the POC + justice_txid = None + justice_rawtx = None logger.error("Can't build transaction from decoded data.", error=e.error) + matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) + return matches # DISCUSS: This method comes from the Responder and seems like it could go back there. diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index 68c1fe5..2582c36 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -1,6 +1,8 @@ from hashlib import sha256 from binascii import unhexlify, hexlify +from cryptography.exceptions import InvalidTag from cryptography.hazmat.primitives.ciphers.aead import AESGCM + from pisa.logger import Logger logger = Logger("Watcher") @@ -33,7 +35,12 @@ class EncryptedBlob: # Decrypt aesgcm = AESGCM(sk) data = unhexlify(self.data.encode()) - raw_tx = aesgcm.decrypt(nonce=nonce, data=data, associated_data=None) - hex_raw_tx = hexlify(raw_tx).decode('utf8') + + try: + raw_tx = aesgcm.decrypt(nonce=nonce, data=data, associated_data=None) + hex_raw_tx = hexlify(raw_tx).decode('utf8') + + except InvalidTag: + hex_raw_tx = None return hex_raw_tx diff --git a/pisa/watcher.py b/pisa/watcher.py index 5248b77..3deda68 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -49,7 +49,7 @@ class Watcher: if self.asleep: self.asleep = False self.block_queue = Queue() - zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue]) + zmq_thread = Thread(target=self.do_subscribe) watcher = Thread(target=self.do_watch) zmq_thread.start() watcher.start() @@ -67,9 +67,9 @@ class Watcher: return appointment_added - def do_subscribe(self, block_queue): + def do_subscribe(self): self.zmq_subscriber = ZMQHandler(parent="Watcher") - self.zmq_subscriber.handle(block_queue) + self.zmq_subscriber.handle(self.block_queue) def do_watch(self): while len(self.appointments) > 0: @@ -92,11 +92,13 @@ class Watcher: matches = BlockProcessor.get_matches(potential_matches, self.locator_uuid_map, self.appointments) for locator, uuid, dispute_txid, justice_txid, justice_rawtx in matches: - logger.info("Notifying responder and deleting appointment.", - justice_txid=justice_txid, locator=locator, uuid=uuid) + # Errors decrypting the Blob will result in a None justice_txid + if justice_txid is not None: + logger.info("Notifying responder and deleting appointment.", justice_txid=justice_txid, + locator=locator, uuid=uuid) - self.responder.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, - self.appointments[uuid].end_time) + self.responder.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, + self.appointments[uuid].end_time) # Delete the appointment self.appointments.pop(uuid) diff --git a/test/unit/test_encrypted_blob.py b/test/unit/test_encrypted_blob.py index 67270c2..26e1d9e 100644 --- a/test/unit/test_encrypted_blob.py +++ b/test/unit/test_encrypted_blob.py @@ -19,13 +19,10 @@ def test_decrypt(): encrypted_data = urandom(64).hex() encrypted_blob = EncryptedBlob(encrypted_data) - # Trying to decrypt random data (in AES_GCM-128) should result in an InvalidTag exception - try: - encrypted_blob.decrypt(key) - assert False, "Able to decrypt random data with random key" - - except InvalidTag: - assert True + # Trying to decrypt random data (in AES_GCM-128) should result in an InvalidTag exception. Our decrypt function + # returns None + hex_tx = encrypted_blob.decrypt(key) + assert hex_tx is None # Valid data should run with no InvalidTag and verify data = "6097cdf52309b1b2124efeed36bd34f46dc1c25ad23ac86f28380f746254f777" From 5f87705d26db55e95f3e5c87dd6181fbd2ec7a1d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 14 Oct 2019 16:41:43 +0100 Subject: [PATCH 62/82] Dissables can_connect_to_bitcoind with a non-running backend Until a better way of handling the stop of bitcoind the test is dissabled, it created issues with other tests. --- test/unit/conftest.py | 10 +++------- test/unit/test_tools.py | 9 ++++----- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/test/unit/conftest.py b/test/unit/conftest.py index 61f9567..ab6af2b 100644 --- a/test/unit/conftest.py +++ b/test/unit/conftest.py @@ -1,20 +1,16 @@ import pytest from time import sleep from threading import Thread -from multiprocessing import Process from pisa.api import start_api from test.simulator.bitcoind_sim import run_simulator -bitcoind_process = Process(target=run_simulator) - @pytest.fixture(scope='session') def run_bitcoind(): - global bitcoind_process - - bitcoind_process.daemon = True - bitcoind_process.start() + bitcoind_thread = Thread(target=run_simulator) + bitcoind_thread.daemon = True + bitcoind_thread.start() # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) sleep(0.1) diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py index 9953889..e3bce92 100644 --- a/test/unit/test_tools.py +++ b/test/unit/test_tools.py @@ -1,6 +1,5 @@ from pisa import logging, bitcoin_cli from pisa.tools import check_txid_format -from test.unit.conftest import bitcoind_process from pisa.tools import can_connect_to_bitcoind, in_correct_network logging.getLogger().disabled = True @@ -17,10 +16,10 @@ def test_can_connect_to_bitcoind(): assert can_connect_to_bitcoind() is True -def test_can_connect_to_bitcoind_bitcoin_not_running(): - # Kill the simulator thread and test the check fails - bitcoind_process.kill() - assert can_connect_to_bitcoind() is False +# def test_can_connect_to_bitcoind_bitcoin_not_running(): +# # Kill the simulator thread and test the check fails +# bitcoind_process.kill() +# assert can_connect_to_bitcoind() is False def test_check_txid_format(): From 409f8fb5fb33b1aa8006ddf82ef82380e1924d16 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 14 Oct 2019 16:58:31 +0100 Subject: [PATCH 63/82] Adds Watcher unit tests --- test/unit/test_watcher.py | 132 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 test/unit/test_watcher.py diff --git a/test/unit/test_watcher.py b/test/unit/test_watcher.py new file mode 100644 index 0000000..5b1e308 --- /dev/null +++ b/test/unit/test_watcher.py @@ -0,0 +1,132 @@ +import pytest +import logging +from os import urandom +from time import sleep +from uuid import uuid4 +from hashlib import sha256 +from threading import Thread +from queue import Queue, Empty + +from pisa import bitcoin_cli +from pisa.watcher import Watcher +from pisa.conf import EXPIRY_DELTA +from pisa.responder import Responder +from pisa.conf import MAX_APPOINTMENTS +from pisa.appointment import Appointment +from pisa.tools import check_txid_format +from test.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS + +logging.getLogger().disabled = True +APPOINTMENTS = 5 + + +@pytest.fixture(scope="module") +def watcher(): + return Watcher() + + +def create_appointment(locator=None): + if locator is None: + locator = urandom(32).hex() + start_time = bitcoin_cli.getblockcount() + 1 + end_time = start_time + 1 + dispute_delta = 20 + encrypted_blob_data = urandom(100).hex() + cipher = "AES-GCM-128" + hash_function = "SHA256" + + return Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function) + + +def create_appointments(n): + locator_uuid_map = dict() + appointments = dict() + txids = [] + + for i in range(n): + txid = urandom(32) + uuid = uuid4().hex + locator = sha256(txid).hexdigest() + + appointments[uuid] = create_appointment(locator) + locator_uuid_map[locator] = [uuid] + txids.append(txid.hex()) + + return appointments, locator_uuid_map, txids + + +def test_init(watcher): + assert type(watcher.appointments) is dict and len(watcher.appointments) == 0 + assert type(watcher.locator_uuid_map) is dict and len(watcher.locator_uuid_map) == 0 + assert watcher.block_queue is None + assert watcher.asleep is True + assert watcher.max_appointments == MAX_APPOINTMENTS + assert watcher.zmq_subscriber is None + assert type(watcher.responder) is Responder + + +def test_add_appointment(run_bitcoind, watcher): + # The watcher automatically fire do_watch and do_subscribe on adding an appointment if it is asleep (initial state). + # Avoid this by setting the state to awake. + watcher.asleep = False + + # We should be able to add appointments up to the limit + for _ in range(10): + added_appointment = watcher.add_appointment(create_appointment()) + + assert added_appointment is True + + +def test_add_too_many_appointments(watcher): + # Any appointment on top of those should fail + watcher.appointments = dict() + + for _ in range(MAX_APPOINTMENTS): + added_appointment = watcher.add_appointment(create_appointment()) + + assert added_appointment is True + + added_appointment = watcher.add_appointment(create_appointment()) + + assert added_appointment is False + + +def test_do_subscribe(watcher): + watcher.block_queue = Queue() + + zmq_thread = Thread(target=watcher.do_subscribe) + zmq_thread.daemon = True + zmq_thread.start() + + try: + block_hash = watcher.block_queue.get(timeout=MAX_APPOINTMENTS) + assert check_txid_format(block_hash) + + except Empty: + assert False + + +def test_do_watch(watcher): + # We will wipe all the previous data and add 5 appointments + watcher.appointments, watcher.locator_uuid_map, txids = create_appointments(APPOINTMENTS) + + watch_thread = Thread(target=watcher.do_watch) + watch_thread.daemon = True + watch_thread.start() + + # Broadcast the first two + for txid in txids[:2]: + bitcoin_cli.sendrawtransaction(txid) + + # After leaving some time for the block to be mined and processed, the number of appointments should have reduced + # by two + sleep(TIME_BETWEEN_BLOCKS*2) + assert len(watcher.appointments) == APPOINTMENTS - 2 + + # The rest of appointments will timeout after the end (2) + EXPIRY_DELTA + # Wait for an additional block to be safe + + sleep((EXPIRY_DELTA + 2 + 1) * TIME_BETWEEN_BLOCKS) + + assert len(watcher.appointments) == 0 + assert watcher.asleep is True From 50d892b197c1a67206696c08277f5df085d82d26 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 14 Oct 2019 17:40:49 +0100 Subject: [PATCH 64/82] Remove wrong parenthesis --- test/unit/test_blob.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/unit/test_blob.py b/test/unit/test_blob.py index 7eb3418..de9403f 100644 --- a/test/unit/test_blob.py +++ b/test/unit/test_blob.py @@ -59,7 +59,7 @@ def test_init_blob(): try: Blob(data, cipher, hash_function) - assert(False, "Able to create blob with wrong data") + assert False, "Able to create blob with wrong data" except ValueError: assert True @@ -78,7 +78,7 @@ def test_encrypt(): try: blob.encrypt(invalid_key) - assert (False, "Able to create encrypt with invalid key") + assert False, "Able to create encrypt with invalid key" except ValueError: assert True From b74df42a2baf1cf6ca64f8f0bb9b947a155c4157 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 14 Oct 2019 17:41:09 +0100 Subject: [PATCH 65/82] Runs bitcoin-cli locally --- test/unit/test_watcher.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/unit/test_watcher.py b/test/unit/test_watcher.py index 5b1e308..a0b7ac5 100644 --- a/test/unit/test_watcher.py +++ b/test/unit/test_watcher.py @@ -7,14 +7,14 @@ from hashlib import sha256 from threading import Thread from queue import Queue, Empty -from pisa import bitcoin_cli from pisa.watcher import Watcher -from pisa.conf import EXPIRY_DELTA from pisa.responder import Responder from pisa.conf import MAX_APPOINTMENTS from pisa.appointment import Appointment from pisa.tools import check_txid_format +from pisa.utils.auth_proxy import AuthServiceProxy from test.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS +from pisa.conf import EXPIRY_DELTA, BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT logging.getLogger().disabled = True APPOINTMENTS = 5 @@ -26,8 +26,11 @@ def watcher(): def create_appointment(locator=None): + bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) + if locator is None: locator = urandom(32).hex() + start_time = bitcoin_cli.getblockcount() + 1 end_time = start_time + 1 dispute_delta = 20 @@ -107,6 +110,8 @@ def test_do_subscribe(watcher): def test_do_watch(watcher): + bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) + # We will wipe all the previous data and add 5 appointments watcher.appointments, watcher.locator_uuid_map, txids = create_appointments(APPOINTMENTS) From 3e62cb4b7002c38ccb92f66f96fc2d7e41909c29 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 16 Oct 2019 12:42:50 +0100 Subject: [PATCH 66/82] Parametrize start and end appointment offsets Also waits an additional for transactions to be cleaned to be safe --- test/unit/test_watcher.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/unit/test_watcher.py b/test/unit/test_watcher.py index a0b7ac5..9ddcbb5 100644 --- a/test/unit/test_watcher.py +++ b/test/unit/test_watcher.py @@ -18,6 +18,8 @@ from pisa.conf import EXPIRY_DELTA, BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, logging.getLogger().disabled = True APPOINTMENTS = 5 +START_TIME_OFFSET = 1 +END_TIME_OFFSET = 1 @pytest.fixture(scope="module") @@ -125,7 +127,7 @@ def test_do_watch(watcher): # After leaving some time for the block to be mined and processed, the number of appointments should have reduced # by two - sleep(TIME_BETWEEN_BLOCKS*2) + sleep(TIME_BETWEEN_BLOCKS*(START_TIME_OFFSET+END_TIME_OFFSET + 1)) assert len(watcher.appointments) == APPOINTMENTS - 2 # The rest of appointments will timeout after the end (2) + EXPIRY_DELTA From 95759793bae60fbee9fbd4342bd6fe9130b4bb08 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 17 Oct 2019 17:17:52 +0100 Subject: [PATCH 67/82] Fixes bitcoin_cli bitcoin_cli as a global variable in the main __init__.py was creating issues related to http.client.CannotSendRequest: Request-sent and connection re-usage. Define a new connection per request. --- pisa/__init__.py | 5 -- pisa/block_processor.py | 10 ++-- pisa/carrier.py | 6 +-- pisa/inspector.py | 1 - pisa/tools.py | 16 ++++-- test/simulator/bitcoind_sim.py | 92 ++++++++++++++++++++++++---------- test/unit/conftest.py | 2 +- test/unit/test_api.py | 4 +- test/unit/test_carrier.py | 2 +- test/unit/test_watcher.py | 2 +- 10 files changed, 90 insertions(+), 50 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index 5d03345..8aa5441 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -11,8 +11,3 @@ logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ logging.FileHandler(conf.SERVER_LOG_FILE), logging.StreamHandler() ]) - -# Create RPC connection with bitcoind -# TODO: Check if a long lived connection like this may create problems (timeouts) -bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, - conf.BTC_RPC_PORT)) diff --git a/pisa/block_processor.py b/pisa/block_processor.py index a5f30e3..83b0441 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -1,8 +1,8 @@ import binascii from hashlib import sha256 -from pisa import bitcoin_cli from pisa.logger import Logger +from pisa.tools import bitcoin_cli from pisa.utils.auth_proxy import JSONRPCException logger = Logger("BlockProcessor") @@ -13,7 +13,7 @@ class BlockProcessor: def get_block(block_hash): try: - block = bitcoin_cli.getblock(block_hash) + block = bitcoin_cli().getblock(block_hash) except JSONRPCException as e: block = None @@ -25,7 +25,7 @@ class BlockProcessor: def get_best_block_hash(): try: - block_hash = bitcoin_cli.getbestblockhash() + block_hash = bitcoin_cli().getbestblockhash() except JSONRPCException as e: block_hash = None @@ -37,7 +37,7 @@ class BlockProcessor: def get_block_count(): try: - block_count = bitcoin_cli.getblockcount() + block_count = bitcoin_cli().getblockcount() except JSONRPCException as e: block_count = None @@ -73,7 +73,7 @@ class BlockProcessor: try: # ToDo: #20-test-tx-decrypting-edge-cases justice_rawtx = appointments[uuid].encrypted_blob.decrypt(dispute_txid) - justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') + justice_txid = bitcoin_cli().decoderawtransaction(justice_rawtx).get('txid') logger.info("Match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid) except JSONRPCException as e: diff --git a/pisa/carrier.py b/pisa/carrier.py index d4cfbed..130cfca 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -1,6 +1,6 @@ from pisa.rpc_errors import * -from pisa import bitcoin_cli from pisa.logger import Logger +from pisa.tools import bitcoin_cli from pisa.utils.auth_proxy import JSONRPCException from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION @@ -20,7 +20,7 @@ class Carrier: def send_transaction(self, rawtx, txid): try: logger.info("Pushing transaction to the network", txid=txid, rawtx=rawtx) - bitcoin_cli.sendrawtransaction(rawtx) + bitcoin_cli().sendrawtransaction(rawtx) receipt = Receipt(delivered=True) @@ -70,7 +70,7 @@ class Carrier: @staticmethod def get_transaction(txid): try: - tx_info = bitcoin_cli.getrawtransaction(txid, 1) + tx_info = bitcoin_cli().getrawtransaction(txid, 1) except JSONRPCException as e: tx_info = None diff --git a/pisa/inspector.py b/pisa/inspector.py index 53055b8..1ae4547 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -2,7 +2,6 @@ import re from pisa import errors import pisa.conf as conf -from pisa import bitcoin_cli from pisa.logger import Logger from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor diff --git a/pisa/tools.py b/pisa/tools.py index aeaa310..adba5e5 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -1,10 +1,15 @@ import re from http.client import HTTPException -from pisa import bitcoin_cli +import pisa.conf as conf from pisa.logger import Logger -from pisa.utils.auth_proxy import JSONRPCException from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY +from pisa.utils.auth_proxy import AuthServiceProxy, JSONRPCException + + +def bitcoin_cli(): + return AuthServiceProxy("http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, + conf.BTC_RPC_PORT)) # TODO: currently only used in the Responder; might move there or in the BlockProcessor @@ -13,7 +18,7 @@ def check_tx_in_chain(tx_id, logger=Logger(), tx_label='Transaction'): confirmations = 0 try: - tx_info = bitcoin_cli.getrawtransaction(tx_id, 1) + tx_info = bitcoin_cli().getrawtransaction(tx_id, 1) if tx_info.get("confirmations"): confirmations = int(tx_info.get("confirmations")) @@ -38,7 +43,7 @@ def can_connect_to_bitcoind(): can_connect = True try: - bitcoin_cli.help() + bitcoin_cli().help() except (ConnectionRefusedError, JSONRPCException, HTTPException): can_connect = False @@ -50,7 +55,7 @@ def in_correct_network(network): testnet3_genesis_block_hash = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943" correct_network = False - genesis_block_hash = bitcoin_cli.getblockhash(0) + genesis_block_hash = bitcoin_cli().getblockhash(0) if network == 'mainnet' and genesis_block_hash == mainnet_genesis_block_hash: correct_network = True @@ -65,3 +70,4 @@ def in_correct_network(network): def check_txid_format(txid): # TODO: #12-check-txid-regexp return isinstance(txid, str) and re.search(r'^[0-9A-Fa-f]{64}$', txid) is not None + diff --git a/test/simulator/bitcoind_sim.py b/test/simulator/bitcoind_sim.py index b481b43..3098717 100644 --- a/test/simulator/bitcoind_sim.py +++ b/test/simulator/bitcoind_sim.py @@ -1,21 +1,24 @@ -from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT -from flask import Flask, request, Response, abort -from test.simulator.zmq_publisher import ZMQPublisher -from threading import Thread -from pisa.rpc_errors import * -from pisa.tools import check_txid_format -import logging -import binascii -import json +import re import os import time +import json +import logging +import binascii +from threading import Thread +from flask import Flask, request, Response, abort +from pisa.rpc_errors import * +from test2.simulator.utils import sha256d +from pisa.tools import check_txid_format +from test2.simulator.transaction import TX +from test2.simulator.zmq_publisher import ZMQPublisher +from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT app = Flask(__name__) HOST = 'localhost' PORT = '18443' -TIME_BETWEEN_BLOCKS = 10 +TIME_BETWEEN_BLOCKS = 5 mempool = [] mined_transactions = {} @@ -67,11 +70,11 @@ def process_request(): no_param_err = {"code": RPC_MISC_ERROR, "message": "JSON value is not a {} as expected"} if method == "decoderawtransaction": - txid = get_param(request_data) + rawtx = get_param(request_data) - if isinstance(txid, str): - if check_txid_format(txid): - response["result"] = {"txid": txid} + if isinstance(rawtx, str): + if TX.deserialize(rawtx) is not None: + response["result"] = {"txid": rawtx} else: response["error"] = {"code": RPC_DESERIALIZATION_ERROR, "message": "TX decode failed"} @@ -82,12 +85,12 @@ def process_request(): elif method == "sendrawtransaction": # TODO: A way of rejecting transactions should be added to test edge cases. - txid = get_param(request_data) + rawtx = get_param(request_data) - if isinstance(txid, str): - if check_txid_format(txid): - if txid not in list(mined_transactions.keys()): - mempool.append(txid) + if isinstance(rawtx, str): + if TX.deserialize(rawtx) is not None: + if rawtx not in list(mined_transactions.keys()): + mempool.append(rawtx) else: response["error"] = {"code": RPC_VERIFY_ALREADY_IN_CHAIN, @@ -120,6 +123,8 @@ def process_request(): response["error"] = no_param_err response["error"]["message"] = response["error"]["message"].format("string") + print(response) + elif method == "getblockcount": response["result"] = len(blockchain) @@ -169,6 +174,7 @@ def get_param(request_data): param = None params = request_data.get("params") + if isinstance(params, list) and len(params) > 0: param = params[0] @@ -179,6 +185,33 @@ def load_data(): pass +def create_dummy_transaction(prev_tx_id=None, prev_out_index=None): + tx = TX() + + if prev_tx_id is None: + prev_tx_id = os.urandom(32).hex() + + if prev_out_index is None: + prev_out_index = 0 + + tx.version = 1 + tx.inputs = 1 + tx.outputs = 1 + tx.prev_tx_id = [prev_tx_id] + tx.prev_out_index = [prev_out_index] + tx.nLockTime = 0 + tx.scriptSig = ['47304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860' + 'a4acdd12909d831cc56cbbac4622082221a8768d1d0901'] + tx.scriptSig_len = [77] + tx.nSequence = [4294967295] + tx.value = [5000000000] + tx.scriptPubKey = ['4104ae1a62fe09c5f51b13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c' + '1b7303b8a0626f1baded5c72a704f7e6cd84cac'] + tx.scriptPubKey_len = [67] + + return tx.serialize() + + def simulate_mining(): global mempool, mined_transactions, blocks, blockchain prev_block_hash = None @@ -188,25 +221,32 @@ def simulate_mining(): while True: block_hash = os.urandom(32).hex() - coinbase_tx_hash = os.urandom(32).hex() - txs_to_mine = [coinbase_tx_hash] + coinbase_tx = create_dummy_transaction() + coinbase_tx_hash = sha256d(coinbase_tx) + + txs_to_mine = dict({coinbase_tx_hash: coinbase_tx}) if len(mempool) != 0: # We'll mine up to 100 txs per block - txs_to_mine += mempool[:99] + for rawtx in mempool[:99]: + txid = sha256d(rawtx) + txs_to_mine[txid] = rawtx + mempool = mempool[99:] # Keep track of the mined transaction (to respond to getrawtransaction) - for tx in txs_to_mine: - mined_transactions[tx] = block_hash + for txid, tx in txs_to_mine.items(): + mined_transactions[txid] = {"tx": tx, "block": block_hash} + + blocks[block_hash] = {"tx": list(txs_to_mine.keys()), "height": len(blockchain), + "previousblockhash": prev_block_hash} - blocks[block_hash] = {"tx": txs_to_mine, "height": len(blockchain), "previousblockhash": prev_block_hash} mining_simulator.publish_data(binascii.unhexlify(block_hash)) blockchain.append(block_hash) prev_block_hash = block_hash print("New block mined: {}".format(block_hash)) - print("\tTransactions: {}".format(txs_to_mine)) + print("\tTransactions: {}".format(list(txs_to_mine.keys()))) time.sleep(TIME_BETWEEN_BLOCKS) diff --git a/test/unit/conftest.py b/test/unit/conftest.py index ab6af2b..a8a764c 100644 --- a/test/unit/conftest.py +++ b/test/unit/conftest.py @@ -3,7 +3,7 @@ from time import sleep from threading import Thread from pisa.api import start_api -from test.simulator.bitcoind_sim import run_simulator +from test2.simulator.bitcoind_sim import run_simulator @pytest.fixture(scope='session') diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 8a5661d..fd293de 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -9,7 +9,7 @@ from binascii import unhexlify from apps.cli.blob import Blob from pisa import HOST, PORT, logging from pisa.utils.auth_proxy import AuthServiceProxy -from test.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS +from test2.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS, create_dummy_transaction from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MAX_APPOINTMENTS logging.getLogger().disabled = True @@ -25,7 +25,7 @@ def generate_dummy_appointment(dispute_txid): current_height = r.json().get("block_count") - dummy_appointment_data = {"tx": os.urandom(32).hex(), "tx_id": dispute_txid, "start_time": current_height + 5, + dummy_appointment_data = {"tx": create_dummy_transaction(), "tx_id": dispute_txid, "start_time": current_height + 5, "end_time": current_height + 30, "dispute_delta": 20} cipher = "AES-GCM-128" diff --git a/test/unit/test_carrier.py b/test/unit/test_carrier.py index 374dd24..92db193 100644 --- a/test/unit/test_carrier.py +++ b/test/unit/test_carrier.py @@ -5,7 +5,7 @@ from time import sleep from pisa.carrier import Carrier from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR -from test.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS +from test2.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS logging.getLogger().disabled = True diff --git a/test/unit/test_watcher.py b/test/unit/test_watcher.py index 9ddcbb5..bc0f5e7 100644 --- a/test/unit/test_watcher.py +++ b/test/unit/test_watcher.py @@ -13,7 +13,7 @@ from pisa.conf import MAX_APPOINTMENTS from pisa.appointment import Appointment from pisa.tools import check_txid_format from pisa.utils.auth_proxy import AuthServiceProxy -from test.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS +from test2.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS from pisa.conf import EXPIRY_DELTA, BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT logging.getLogger().disabled = True From 1643a7b8873ddda02952ad316a00f374b3231a4a Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 17 Oct 2019 19:00:15 +0100 Subject: [PATCH 68/82] Multiple simulator improvements The simulator has been updated to work with real transaction structures instead of transaction hashes. It now supports: - Non-SegWit transaction format - Generation of blocks event-wise and time-wise Some small issues have also been fixed. With the new approach, the simulator can be used in a broader range of tests. Moreover tests can run faster since they do not have to wait for blocks. Instead, the generation of new blocks can be triggered by the test. --- test/simulator/bitcoin_sim_tests.py | 207 +++++++++++++++++----------- test/simulator/bitcoind_sim.py | 103 +++++++------- test/simulator/transaction.py | 150 ++++++++++++++++++++ test/simulator/utils.py | 128 +++++++++++++++++ 4 files changed, 449 insertions(+), 139 deletions(-) create mode 100644 test/simulator/transaction.py create mode 100644 test/simulator/utils.py diff --git a/test/simulator/bitcoin_sim_tests.py b/test/simulator/bitcoin_sim_tests.py index cb1c5bb..f0eafe5 100644 --- a/test/simulator/bitcoin_sim_tests.py +++ b/test/simulator/bitcoin_sim_tests.py @@ -1,107 +1,146 @@ +import re import os +import pytest +from time import sleep +from threading import Thread + +from test.simulator.transaction import TX +from test.simulator.bitcoind_sim import run_simulator from pisa.utils.auth_proxy import AuthServiceProxy, JSONRPCException from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT -from pisa.tools import check_txid_format + +MIXED_VALUES = values = [-1, 500, '', '111', [], 1.1, None, '', "a" * 31, "b" * 33, os.urandom(32).hex()] + + +@pytest.fixture(scope='module') +def run_bitcoind(): + bitcoind_thread = Thread(target=run_simulator, kwargs={"mode": "event"}) + bitcoind_thread.daemon = True + bitcoind_thread.start() + + # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) + sleep(0.1) + + +@pytest.fixture(scope="module") +def genesis_block_hash(run_bitcoind): + return bitcoin_cli.getblockhash(0) + + +def check_hash_format(txid): + # TODO: #12-check-txid-regexp + return isinstance(txid, str) and re.search(r'^[0-9A-Fa-f]{64}$', txid) is not None bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) -# Help should always return 0 -assert(bitcoin_cli.help() == 0) -# getblockhash should return a blockid (which matches the txid format) -block_hash = bitcoin_cli.getblockhash(0) -assert(check_txid_format(block_hash)) +def test_help(run_bitcoind): + # Help should always return 0 + assert(bitcoin_cli.help() == 0) -# Check that the values are within range and of the proper format (all should fail) -values = [-1, 500, None, '', '111', [], 1.1] -print("getblockhash fails ({}):".format(len(values))) -for v in values: +# FIXME: Better assert for the exceptions would be nice (check the returned errno is the expected one) + +def test_getblockhash(genesis_block_hash): + # First block + assert(check_hash_format(genesis_block_hash)) + + # Check that the values are within range and of the proper format (all should fail) + for v in MIXED_VALUES: + try: + bitcoin_cli.getblockhash(v) + assert False + except JSONRPCException as e: + assert True + + +def test_get_block(genesis_block_hash): + # getblock should return a list of transactions and the height + block = bitcoin_cli.getblock(genesis_block_hash) + assert(isinstance(block.get('tx'), list)) + assert(len(block.get('tx')) != 0) + assert(isinstance(block.get('height'), int)) + + # It should fail for wrong data formats and random ids + for v in MIXED_VALUES: + try: + bitcoin_cli.getblock(v) + assert False + except JSONRPCException as e: + assert True + + +def test_decoderawtransaction(genesis_block_hash): + # decoderawtransaction should only return if the given transaction matches a txid format + block = bitcoin_cli.getblock(genesis_block_hash) + coinbase_txid = block.get('tx')[0] + + coinbase_tx = bitcoin_cli.getrawtransaction(coinbase_txid).get("hex") + tx = bitcoin_cli.decoderawtransaction(coinbase_tx) + + assert(isinstance(tx, dict)) + assert(isinstance(tx.get('txid'), str)) + assert(check_hash_format(tx.get('txid'))) + + # Therefore should also work for a random transaction hex in our simulation + random_tx = TX.create_dummy_transaction() + tx = bitcoin_cli.decoderawtransaction(random_tx) + assert(isinstance(tx, dict)) + assert(isinstance(tx.get('txid'), str)) + assert(check_hash_format(tx.get('txid'))) + + # But it should fail for not proper formatted one + for v in MIXED_VALUES: + try: + bitcoin_cli.decoderawtransaction(v) + assert False + except JSONRPCException as e: + assert True + + +def test_sendrawtransaction(genesis_block_hash): + # sendrawtransaction should only allow txids that the simulator has not mined yet + bitcoin_cli.sendrawtransaction(TX.create_dummy_transaction()) + + # Any data not matching the txid format or that matches with an already mined transaction should fail try: - block_hash = bitcoin_cli.getblockhash(v) + genesis_tx = bitcoin_cli.getblock(genesis_block_hash).get("tx")[0] + bitcoin_cli.sendrawtransaction(genesis_tx) assert False + except JSONRPCException as e: - print('\t{}'.format(e)) + assert True -# getblock should return a list of transactions and the height -block = bitcoin_cli.getblock(block_hash) -assert(isinstance(block.get('tx'), list)) -assert(len(block.get('tx')) != 0) -assert(isinstance(block.get('height'), int)) + for v in MIXED_VALUES: + try: + bitcoin_cli.sendrawtransaction(v) + assert False + except JSONRPCException as e: + assert True -# Some fails -values += ["a"*64, os.urandom(32).hex()] -print("\ngetblock fails ({}):".format(len(values))) -for v in values: - try: - block = bitcoin_cli.getblock(v) - assert False - except JSONRPCException as e: - print('\t{}'.format(e)) +def test_getrawtransaction(genesis_block_hash): + # getrawtransaction should work for existing transactions, and fail for non-existing ones + genesis_tx = bitcoin_cli.getblock(genesis_block_hash).get("tx")[0] + tx = bitcoin_cli.getrawtransaction(genesis_tx) -# decoderawtransaction should only return if the given transaction matches a txid format -coinbase_tx = block.get('tx')[0] -tx = bitcoin_cli.decoderawtransaction(coinbase_tx) -assert(isinstance(tx, dict)) -assert(isinstance(tx.get('txid'), str)) -assert(check_txid_format(tx.get('txid'))) + assert(isinstance(tx, dict)) + assert(isinstance(tx.get('confirmations'), int)) -# Therefore should also work for a random formatted 32-byte hex in our simulation -random_tx = os.urandom(32).hex() -tx = bitcoin_cli.decoderawtransaction(random_tx) -assert(isinstance(tx, dict)) -assert(isinstance(tx.get('txid'), str)) -assert(check_txid_format(tx.get('txid'))) + for v in MIXED_VALUES: + try: + bitcoin_cli.getrawtransaction(v) + assert False + except JSONRPCException as e: + assert True -# But it should fail for not proper formatted one -values = [1, None, '', "a"*63, "b"*65, [], os.urandom(31).hex()] -print("\ndecoderawtransaction fails ({}):".format(len(values))) -for v in values: - try: - block = bitcoin_cli.decoderawtransaction(v) - assert False - except JSONRPCException as e: - print('\t{}'.format(e)) - -# sendrawtransaction should only allow txids that the simulator has not mined yet -bitcoin_cli.sendrawtransaction(os.urandom(32).hex()) - -# Any data not matching the txid format or that matches with an already mined transaction should fail -values += [coinbase_tx] - -print("\nsendrawtransaction fails ({}):".format(len(values))) - -for v in values: - try: - block = bitcoin_cli.sendrawtransaction(v) - assert False - except JSONRPCException as e: - print('\t{}'.format(e)) - -# getrawtransaction should work for existing transactions, and fail for non-existing ones -tx = bitcoin_cli.getrawtransaction(coinbase_tx) - -assert(isinstance(tx, dict)) -assert(isinstance(tx.get('confirmations'), int)) - -print("\nsendrawtransaction fails ({}):".format(len(values))) - -for v in values: - try: - block = bitcoin_cli.sendrawtransaction(v) - assert False - except JSONRPCException as e: - print('\t{}'.format(e)) - -# getblockcount should always return a positive integer -bc = bitcoin_cli.getblockcount() -assert (isinstance(bc, int)) -assert (bc >= 0) - -print("\nAll tests passed!") +def test_getblockcount(): + # getblockcount should always return a positive integer + bc = bitcoin_cli.getblockcount() + assert (isinstance(bc, int)) + assert (bc >= 0) diff --git a/test/simulator/bitcoind_sim.py b/test/simulator/bitcoind_sim.py index 3098717..4306f13 100644 --- a/test/simulator/bitcoind_sim.py +++ b/test/simulator/bitcoind_sim.py @@ -1,29 +1,36 @@ -import re import os import time import json import logging import binascii -from threading import Thread +from threading import Thread, Event from flask import Flask, request, Response, abort from pisa.rpc_errors import * -from test2.simulator.utils import sha256d -from pisa.tools import check_txid_format -from test2.simulator.transaction import TX -from test2.simulator.zmq_publisher import ZMQPublisher +from test.simulator.utils import sha256d +from test.simulator.transaction import TX +from test.simulator.zmq_publisher import ZMQPublisher from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT app = Flask(__name__) HOST = 'localhost' PORT = '18443' -TIME_BETWEEN_BLOCKS = 5 - -mempool = [] -mined_transactions = {} -blocks = {} blockchain = [] +blocks = {} +mined_transactions = {} +mempool = [] + +mine_new_block = Event() + + +@app.route('/generate', methods=['POST']) +def generate(): + global mine_new_block + + mine_new_block.set() + + return Response(status=200, mimetype='application/json') @app.route('/', methods=['POST']) @@ -72,9 +79,11 @@ def process_request(): if method == "decoderawtransaction": rawtx = get_param(request_data) - if isinstance(rawtx, str): + if isinstance(rawtx, str) and len(rawtx) % 2 is 0: + txid = sha256d(rawtx) + if TX.deserialize(rawtx) is not None: - response["result"] = {"txid": rawtx} + response["result"] = {"txid": txid} else: response["error"] = {"code": RPC_DESERIALIZATION_ERROR, "message": "TX decode failed"} @@ -87,10 +96,13 @@ def process_request(): # TODO: A way of rejecting transactions should be added to test edge cases. rawtx = get_param(request_data) - if isinstance(rawtx, str): + if isinstance(rawtx, str) and len(rawtx) % 2 is 0: + txid = sha256d(rawtx) + if TX.deserialize(rawtx) is not None: - if rawtx not in list(mined_transactions.keys()): + if txid not in list(mined_transactions.keys()): mempool.append(rawtx) + response["result"] = {"txid": txid} else: response["error"] = {"code": RPC_VERIFY_ALREADY_IN_CHAIN, @@ -107,10 +119,10 @@ def process_request(): txid = get_param(request_data) if isinstance(txid, str): - block = blocks.get(mined_transactions.get(txid)) - - if block: - response["result"] = {"confirmations": len(blockchain) - block.get('height')} + if txid in mined_transactions: + block = blocks.get(mined_transactions[txid]["block"]) + rawtx = mined_transactions[txid].get('tx') + response["result"] = {"hex": rawtx, "confirmations": len(blockchain) - block.get('height')} elif txid in mempool: response["result"] = {"confirmations": 0} @@ -123,8 +135,6 @@ def process_request(): response["error"] = no_param_err response["error"]["message"] = response["error"]["message"].format("string") - print(response) - elif method == "getblockcount": response["result"] = len(blockchain) @@ -185,43 +195,19 @@ def load_data(): pass -def create_dummy_transaction(prev_tx_id=None, prev_out_index=None): - tx = TX() - - if prev_tx_id is None: - prev_tx_id = os.urandom(32).hex() - - if prev_out_index is None: - prev_out_index = 0 - - tx.version = 1 - tx.inputs = 1 - tx.outputs = 1 - tx.prev_tx_id = [prev_tx_id] - tx.prev_out_index = [prev_out_index] - tx.nLockTime = 0 - tx.scriptSig = ['47304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860' - 'a4acdd12909d831cc56cbbac4622082221a8768d1d0901'] - tx.scriptSig_len = [77] - tx.nSequence = [4294967295] - tx.value = [5000000000] - tx.scriptPubKey = ['4104ae1a62fe09c5f51b13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c' - '1b7303b8a0626f1baded5c72a704f7e6cd84cac'] - tx.scriptPubKey_len = [67] - - return tx.serialize() - - -def simulate_mining(): - global mempool, mined_transactions, blocks, blockchain +def simulate_mining(mode, time_between_blocks): + global mempool, mined_transactions, blocks, blockchain, mine_new_block prev_block_hash = None mining_simulator = ZMQPublisher(topic=b'hashblock', feed_protocol=FEED_PROTOCOL, feed_addr=FEED_ADDR, feed_port=FEED_PORT) - while True: + # Set the mining event to initialize the blockchain with a block + mine_new_block.set() + + while mine_new_block.wait(): block_hash = os.urandom(32).hex() - coinbase_tx = create_dummy_transaction() + coinbase_tx = TX.create_dummy_transaction() coinbase_tx_hash = sha256d(coinbase_tx) txs_to_mine = dict({coinbase_tx_hash: coinbase_tx}) @@ -248,11 +234,18 @@ def simulate_mining(): print("New block mined: {}".format(block_hash)) print("\tTransactions: {}".format(list(txs_to_mine.keys()))) - time.sleep(TIME_BETWEEN_BLOCKS) + if mode == 'time': + time.sleep(time_between_blocks) + else: + mine_new_block.clear() + -def run_simulator(): - mining_thread = Thread(target=simulate_mining) +def run_simulator(mode='time', time_between_blocks=5): + if mode not in ["time", 'event']: + raise ValueError("Node must be time or event") + + mining_thread = Thread(target=simulate_mining, args=[mode, time_between_blocks]) mining_thread.start() # Setting Flask log to ERROR only so it does not mess with out logging diff --git a/test/simulator/transaction.py b/test/simulator/transaction.py new file mode 100644 index 0000000..be49a79 --- /dev/null +++ b/test/simulator/transaction.py @@ -0,0 +1,150 @@ +# Porting some functionality from https://github.com/sr-gi/bitcoin_tools with some modifications <3 +from os import urandom + +from test.simulator.utils import * + + +class TX: + """ Defines a class TX (transaction) that holds all the modifiable fields of a Bitcoin transaction, such as + version, number of inputs, reference to previous transactions, input and output scripts, value, etc. + """ + + def __init__(self): + self.version = None + self.inputs = None + self.outputs = None + self.nLockTime = None + self.prev_tx_id = [] + self.prev_out_index = [] + self.scriptSig = [] + self.scriptSig_len = [] + self.nSequence = [] + self.value = [] + self.scriptPubKey = [] + self.scriptPubKey_len = [] + + self.offset = 0 + self.hex = "" + + @classmethod + def deserialize(cls, hex_tx): + """ Builds a transaction object from the hexadecimal serialization format of a transaction that + could be obtained, for example, from a blockexplorer. + :param hex_tx: Hexadecimal serialized transaction. + :type hex_tx: hex str + :return: The transaction build using the provided hex serialized transaction. + :rtype: TX + """ + + tx = cls() + tx.hex = hex_tx + + try: + tx.version = int(change_endianness(parse_element(tx, 4)), 16) + + # INPUTS + tx.inputs = int(parse_varint(tx), 16) + + for i in range(tx.inputs): + tx.prev_tx_id.append(change_endianness(parse_element(tx, 32))) + tx.prev_out_index.append(int(change_endianness(parse_element(tx, 4)), 16)) + # ScriptSig + tx.scriptSig_len.append(int(parse_varint(tx), 16)) + tx.scriptSig.append(parse_element(tx, tx.scriptSig_len[i])) + tx.nSequence.append(int(parse_element(tx, 4), 16)) + + # OUTPUTS + tx.outputs = int(parse_varint(tx), 16) + + for i in range(tx.outputs): + tx.value.append(int(change_endianness(parse_element(tx, 8)), 16)) + # ScriptPubKey + tx.scriptPubKey_len.append(int(parse_varint(tx), 16)) + tx.scriptPubKey.append(parse_element(tx, tx.scriptPubKey_len[i])) + + tx.nLockTime = int(parse_element(tx, 4), 16) + + if tx.offset != len(tx.hex): + # There is some error in the serialized transaction passed as input. Transaction can't be built + tx = None + else: + tx.offset = 0 + + except ValueError: + # If a parsing error occurs, the deserialization stops and None is returned + tx = None + + return tx + + def serialize(self, rtype=hex): + """ Serialize all the transaction fields arranged in the proper order, resulting in a hexadecimal string + ready to be broadcast to the network. + :param self: self + :type self: TX + :param rtype: Whether the serialized transaction is returned as a hex str or a byte array. + :type rtype: hex or bool + :return: Serialized transaction representation (hexadecimal or bin depending on rtype parameter). + :rtype: hex str / bin + """ + + if rtype not in [hex, bin]: + raise Exception("Invalid return type (rtype). It should be either hex or bin.") + serialized_tx = change_endianness(int2bytes(self.version, 4)) # 4-byte version number (LE). + + # INPUTS + serialized_tx += encode_varint(self.inputs) # Varint number of inputs. + + for i in range(self.inputs): + serialized_tx += change_endianness(self.prev_tx_id[i]) # 32-byte hash of the previous transaction (LE). + serialized_tx += change_endianness(int2bytes(self.prev_out_index[i], 4)) # 4-byte output index (LE) + serialized_tx += encode_varint(len(self.scriptSig[i]) // 2) # Varint input script length. + # ScriptSig + serialized_tx += self.scriptSig[i] # Input script. + serialized_tx += int2bytes(self.nSequence[i], 4) # 4-byte sequence number. + + # OUTPUTS + serialized_tx += encode_varint(self.outputs) # Varint number of outputs. + + if self.outputs != 0: + for i in range(self.outputs): + serialized_tx += change_endianness(int2bytes(self.value[i], 8)) # 8-byte field Satoshi value (LE) + # ScriptPubKey + serialized_tx += encode_varint(len(self.scriptPubKey[i]) // 2) # Varint Output script length. + serialized_tx += self.scriptPubKey[i] # Output script. + + serialized_tx += int2bytes(self.nLockTime, 4) # 4-byte lock time field + + # If return type has been set to binary, the serialized transaction is converted. + if rtype is bin: + serialized_tx = unhexlify(serialized_tx) + + return serialized_tx + + @staticmethod + def create_dummy_transaction(prev_tx_id=None, prev_out_index=None): + tx = TX() + + if prev_tx_id is None: + prev_tx_id = urandom(32).hex() + + if prev_out_index is None: + prev_out_index = 0 + + tx.version = 1 + tx.inputs = 1 + tx.outputs = 1 + tx.prev_tx_id = [prev_tx_id] + tx.prev_out_index = [prev_out_index] + tx.nLockTime = 0 + tx.scriptSig = [ + '47304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860' + 'a4acdd12909d831cc56cbbac4622082221a8768d1d0901'] + tx.scriptSig_len = [77] + tx.nSequence = [4294967295] + tx.value = [5000000000] + tx.scriptPubKey = [ + '4104ae1a62fe09c5f51b13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c' + '1b7303b8a0626f1baded5c72a704f7e6cd84cac'] + tx.scriptPubKey_len = [67] + + return tx.serialize() diff --git a/test/simulator/utils.py b/test/simulator/utils.py new file mode 100644 index 0000000..f2f2883 --- /dev/null +++ b/test/simulator/utils.py @@ -0,0 +1,128 @@ +# Porting some functionality from https://github.com/sr-gi/bitcoin_tools with some modifications <3 +from hashlib import sha256 +from binascii import unhexlify + + +def change_endianness(x): + """ Changes the endianness (from BE to LE and vice versa) of a given value. + :param x: Given value which endianness will be changed. + :type x: hex str + :return: The opposite endianness representation of the given value. + :rtype: hex str + """ + + # If there is an odd number of elements, we make it even by adding a 0 + if (len(x) % 2) == 1: + x += "0" + + y = bytes(x, 'utf-8') + z = y[::-1] + return z.decode('utf-8') + + +def parse_varint(tx): + """ Parses a given transaction for extracting an encoded varint element. + :param tx: Transaction where the element will be extracted. + :type tx: TX + :return: The b-bytes representation of the given value (a) in hex format. + :rtype: hex str + """ + + # First of all, the offset of the hex transaction if moved to the proper position (i.e where the varint should be + # located) and the length and format of the data to be analyzed is checked. + data = tx.hex[tx.offset:] + assert (len(data) > 0) + size = int(data[:2], 16) + assert (size <= 255) + + # Then, the integer is encoded as a varint using the proper prefix, if needed. + if size <= 252: # No prefix + storage_length = 1 + elif size == 253: # 0xFD + storage_length = 3 + elif size == 254: # 0xFE + storage_length = 5 + elif size == 255: # 0xFF + storage_length = 9 + else: + raise Exception("Wrong input data size") + + # Finally, the storage length is used to extract the proper number of bytes from the transaction hex and the + # transaction offset is updated. + varint = data[:storage_length * 2] + tx.offset += storage_length * 2 + + return varint + + +def parse_element(tx, size): + """ Parses a given transaction to extract an element of a given size. + :param tx: Transaction where the element will be extracted. + :type tx: TX + :param size: Size of the parameter to be extracted. + :type size: int + :return: The extracted element. + :rtype: hex str + """ + + element = tx.hex[tx.offset:tx.offset + size * 2] + tx.offset += size * 2 + return element + + +def encode_varint(value): + """ Encodes a given integer value to a varint. It only used the four varint representation cases used by bitcoin: + 1-byte, 2-byte, 4-byte or 8-byte integers. + :param value: The integer value that will be encoded into varint. + :type value: int + :return: The varint representation of the given integer value. + :rtype: str + """ + + # The value is checked in order to choose the size of its final representation. + # 0xFD(253), 0xFE(254) and 0xFF(255) are special cases, since are the prefixes defined for 2-byte, 4-byte + # and 8-byte long values respectively. + if value < pow(2, 8) - 3: + size = 1 + varint = int2bytes(value, size) # No prefix + else: + if value < pow(2, 16): + size = 2 + prefix = 253 # 0xFD + elif value < pow(2, 32): + size = 4 + prefix = 254 # 0xFE + elif value < pow(2, 64): + size = 8 + prefix = 255 # 0xFF + else: + raise Exception("Wrong input data size") + varint = format(prefix, 'x') + change_endianness(int2bytes(value, size)) + + return varint + + +def int2bytes(a, b): + """ Converts a given integer value (a) its b-byte representation, in hex format. + :param a: Value to be converted. + :type a: int + :param b: Byte size to be filled. + :type b: int + :return: The b-bytes representation of the given value (a) in hex format. + :rtype: hex str + """ + + m = pow(2, 8*b) - 1 + if a > m: + raise Exception(str(a) + " is too big to be represented with " + str(b) + " bytes. Maximum value is " + + str(m) + ".") + + return ('%0' + str(2 * b) + 'x') % a + + +def sha256d(hex_data): + data = unhexlify(hex_data) + double_sha256 = sha256(sha256(data).digest()).hexdigest() + + return change_endianness(double_sha256) + From 6735aac0947b884a6b6c3587475aa166b39bf7d9 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 17 Oct 2019 19:08:34 +0100 Subject: [PATCH 69/82] Update tests to use bitcoind_sim event-wise Also clean son unused imports --- test/unit/conftest.py | 12 +++++- test/unit/test_api.py | 42 +++++++++----------- test/unit/test_carrier.py | 30 ++++++++------ test/unit/test_encrypted_blob.py | 1 - test/unit/test_tools.py | 2 +- test/unit/test_watcher.py | 67 ++++++++++++++++++++------------ 6 files changed, 91 insertions(+), 63 deletions(-) diff --git a/test/unit/conftest.py b/test/unit/conftest.py index a8a764c..2c0bdea 100644 --- a/test/unit/conftest.py +++ b/test/unit/conftest.py @@ -1,14 +1,15 @@ import pytest +import requests from time import sleep from threading import Thread from pisa.api import start_api -from test2.simulator.bitcoind_sim import run_simulator +from test.simulator.bitcoind_sim import run_simulator, HOST, PORT @pytest.fixture(scope='session') def run_bitcoind(): - bitcoind_thread = Thread(target=run_simulator) + bitcoind_thread = Thread(target=run_simulator, kwargs={"mode": "event"}) bitcoind_thread.daemon = True bitcoind_thread.start() @@ -24,3 +25,10 @@ def run_api(): # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail) sleep(0.1) + + +def generate_block(): + requests.post(url="http://{}:{}/generate".format(HOST, PORT), timeout=5) + sleep(0.5) + + diff --git a/test/unit/test_api.py b/test/unit/test_api.py index fd293de..ba052e5 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -1,37 +1,42 @@ -import os import json import pytest -import time import requests from hashlib import sha256 from binascii import unhexlify from apps.cli.blob import Blob from pisa import HOST, PORT, logging +from test.simulator.utils import sha256d +from test.simulator.transaction import TX +from test.unit.conftest import generate_block from pisa.utils.auth_proxy import AuthServiceProxy -from test2.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS, create_dummy_transaction from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MAX_APPOINTMENTS logging.getLogger().disabled = True + PISA_API = "http://{}:{}".format(HOST, PORT) MULTIPLE_APPOINTMENTS = 10 appointments = [] -locator_dispute_txid_map = {} +locator_dispute_tx_map = {} -def generate_dummy_appointment(dispute_txid): +def generate_dummy_appointment(): r = requests.get(url=PISA_API + '/get_block_count', timeout=5) current_height = r.json().get("block_count") - dummy_appointment_data = {"tx": create_dummy_transaction(), "tx_id": dispute_txid, "start_time": current_height + 5, + dispute_tx = TX.create_dummy_transaction() + dispute_txid = sha256d(dispute_tx) + justice_tx = TX.create_dummy_transaction(dispute_txid) + + dummy_appointment_data = {"tx": justice_tx, "tx_id": dispute_txid, "start_time": current_height + 5, "end_time": current_height + 30, "dispute_delta": 20} cipher = "AES-GCM-128" hash_function = "SHA256" - locator = sha256(unhexlify(dummy_appointment_data.get("tx_id"))).hexdigest() + locator = sha256(unhexlify(dispute_txid)).hexdigest() blob = Blob(dummy_appointment_data.get("tx"), cipher, hash_function) encrypted_blob = blob.encrypt((dummy_appointment_data.get("tx_id"))) @@ -41,22 +46,13 @@ def generate_dummy_appointment(dispute_txid): "dispute_delta": dummy_appointment_data.get("dispute_delta"), "encrypted_blob": encrypted_blob, "cipher": cipher, "hash_function": hash_function} - return appointment + return appointment, dispute_tx @pytest.fixture -def new_appointment(dispute_txid=None): - appointment = create_appointment(dispute_txid) - - return appointment - - -def create_appointment(dispute_txid=None): - if dispute_txid is None: - dispute_txid = os.urandom(32).hex() - - appointment = generate_dummy_appointment(dispute_txid) - locator_dispute_txid_map[appointment["locator"]] = dispute_txid +def new_appointment(): + appointment, dispute_tx = generate_dummy_appointment() + locator_dispute_tx_map[appointment["locator"]] = dispute_tx return appointment @@ -147,12 +143,12 @@ def test_get_all_appointments_responder(): bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) locators = [appointment["locator"] for appointment in appointments] - for locator, dispute_txid in locator_dispute_txid_map.items(): + for locator, dispute_tx in locator_dispute_tx_map.items(): if locator in locators: - bitcoin_cli.sendrawtransaction(dispute_txid) + bitcoin_cli.sendrawtransaction(dispute_tx) # Wait a bit for them to get confirmed - time.sleep(TIME_BETWEEN_BLOCKS) + generate_block() # Get all appointments r = requests.get(url=PISA_API + "/get_all_appointments") diff --git a/test/unit/test_carrier.py b/test/unit/test_carrier.py index 92db193..d73c749 100644 --- a/test/unit/test_carrier.py +++ b/test/unit/test_carrier.py @@ -1,11 +1,13 @@ import pytest import logging from os import urandom -from time import sleep + from pisa.carrier import Carrier +from test.simulator.utils import sha256d +from test.simulator.transaction import TX +from test.unit.conftest import generate_block from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR -from test2.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS logging.getLogger().disabled = True @@ -24,23 +26,28 @@ def carrier(): def test_send_transaction(run_bitcoind, carrier): # We are mocking bitcoind and in our simulator txid == tx - tx = urandom(32).hex() - receipt = carrier.send_transaction(tx, tx) + tx = TX.create_dummy_transaction() + txid = sha256d(tx) + + receipt = carrier.send_transaction(tx, txid) assert(receipt.delivered is True) def test_send_double_spending_transaction(carrier): # We can test what happens if the same transaction is sent twice - tx = urandom(32).hex() - receipt = carrier.send_transaction(tx, tx) - sent_txs.append(tx) + tx = TX.create_dummy_transaction() + txid = sha256d(tx) + + receipt = carrier.send_transaction(tx, txid) + sent_txs.append(txid) # Wait for a block to be mined - sleep(2*TIME_BETWEEN_BLOCKS) + for _ in range(2): + generate_block() # Try to send it again - receipt2 = carrier.send_transaction(tx, tx) + receipt2 = carrier.send_transaction(tx, txid) # The carrier should report delivered True for both, but in the second case the transaction was already delivered # (either by himself or someone else) @@ -51,8 +58,9 @@ def test_send_double_spending_transaction(carrier): def test_send_transaction_invalid_format(carrier): # Test sending a transaction that does not fits the format - tx = urandom(31).hex() - receipt = carrier.send_transaction(tx, tx) + tx = TX.create_dummy_transaction() + txid = sha256d(tx) + receipt = carrier.send_transaction(txid, txid) assert (receipt.delivered is False and receipt.reason == RPC_DESERIALIZATION_ERROR) diff --git a/test/unit/test_encrypted_blob.py b/test/unit/test_encrypted_blob.py index 26e1d9e..25dc78f 100644 --- a/test/unit/test_encrypted_blob.py +++ b/test/unit/test_encrypted_blob.py @@ -1,5 +1,4 @@ from os import urandom -from cryptography.exceptions import InvalidTag from pisa import logging from pisa.encrypted_blob import EncryptedBlob diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py index e3bce92..251663e 100644 --- a/test/unit/test_tools.py +++ b/test/unit/test_tools.py @@ -1,4 +1,4 @@ -from pisa import logging, bitcoin_cli +from pisa import logging from pisa.tools import check_txid_format from pisa.tools import can_connect_to_bitcoind, in_correct_network diff --git a/test/unit/test_watcher.py b/test/unit/test_watcher.py index bc0f5e7..90c2ae7 100644 --- a/test/unit/test_watcher.py +++ b/test/unit/test_watcher.py @@ -1,22 +1,25 @@ import pytest import logging -from os import urandom -from time import sleep from uuid import uuid4 from hashlib import sha256 from threading import Thread +from binascii import unhexlify from queue import Queue, Empty +from apps.cli.blob import Blob from pisa.watcher import Watcher from pisa.responder import Responder from pisa.conf import MAX_APPOINTMENTS from pisa.appointment import Appointment from pisa.tools import check_txid_format +from test.simulator.utils import sha256d +from test.simulator.transaction import TX +from test.unit.conftest import generate_block from pisa.utils.auth_proxy import AuthServiceProxy -from test2.simulator.bitcoind_sim import TIME_BETWEEN_BLOCKS from pisa.conf import EXPIRY_DELTA, BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT logging.getLogger().disabled = True + APPOINTMENTS = 5 START_TIME_OFFSET = 1 END_TIME_OFFSET = 1 @@ -27,37 +30,44 @@ def watcher(): return Watcher() -def create_appointment(locator=None): +def generate_dummy_appointment(): bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) - if locator is None: - locator = urandom(32).hex() + dispute_tx = TX.create_dummy_transaction() + dispute_txid = sha256d(dispute_tx) + justice_tx = TX.create_dummy_transaction(dispute_txid) start_time = bitcoin_cli.getblockcount() + 1 end_time = start_time + 1 dispute_delta = 20 - encrypted_blob_data = urandom(100).hex() + cipher = "AES-GCM-128" hash_function = "SHA256" - return Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob_data, cipher, hash_function) + locator = sha256(unhexlify(dispute_txid)).hexdigest() + blob = Blob(justice_tx, cipher, hash_function) + + encrypted_blob = blob.encrypt(dispute_txid) + + appointment = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob, cipher, hash_function) + + return appointment, dispute_tx def create_appointments(n): locator_uuid_map = dict() appointments = dict() - txids = [] + dispute_txs = [] for i in range(n): - txid = urandom(32) + appointment, dispute_tx = generate_dummy_appointment() uuid = uuid4().hex - locator = sha256(txid).hexdigest() - appointments[uuid] = create_appointment(locator) - locator_uuid_map[locator] = [uuid] - txids.append(txid.hex()) + appointments[uuid] = appointment + locator_uuid_map[appointment.locator] = [uuid] + dispute_txs.append(dispute_tx) - return appointments, locator_uuid_map, txids + return appointments, locator_uuid_map, dispute_txs def test_init(watcher): @@ -71,13 +81,14 @@ def test_init(watcher): def test_add_appointment(run_bitcoind, watcher): - # The watcher automatically fire do_watch and do_subscribe on adding an appointment if it is asleep (initial state). + # The watcher automatically fires do_watch and do_subscribe on adding an appointment if it is asleep (initial state) # Avoid this by setting the state to awake. watcher.asleep = False # We should be able to add appointments up to the limit for _ in range(10): - added_appointment = watcher.add_appointment(create_appointment()) + appointment, dispute_tx = generate_dummy_appointment() + added_appointment = watcher.add_appointment(appointment) assert added_appointment is True @@ -87,11 +98,13 @@ def test_add_too_many_appointments(watcher): watcher.appointments = dict() for _ in range(MAX_APPOINTMENTS): - added_appointment = watcher.add_appointment(create_appointment()) + appointment, dispute_tx = generate_dummy_appointment() + added_appointment = watcher.add_appointment(appointment) assert added_appointment is True - added_appointment = watcher.add_appointment(create_appointment()) + appointment, dispute_tx = generate_dummy_appointment() + added_appointment = watcher.add_appointment(appointment) assert added_appointment is False @@ -104,7 +117,8 @@ def test_do_subscribe(watcher): zmq_thread.start() try: - block_hash = watcher.block_queue.get(timeout=MAX_APPOINTMENTS) + generate_block() + block_hash = watcher.block_queue.get() assert check_txid_format(block_hash) except Empty: @@ -115,25 +129,28 @@ def test_do_watch(watcher): bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) # We will wipe all the previous data and add 5 appointments - watcher.appointments, watcher.locator_uuid_map, txids = create_appointments(APPOINTMENTS) + watcher.appointments, watcher.locator_uuid_map, dispute_txs = create_appointments(APPOINTMENTS) watch_thread = Thread(target=watcher.do_watch) watch_thread.daemon = True watch_thread.start() # Broadcast the first two - for txid in txids[:2]: - bitcoin_cli.sendrawtransaction(txid) + for dispute_tx in dispute_txs[:2]: + r = bitcoin_cli.sendrawtransaction(dispute_tx) # After leaving some time for the block to be mined and processed, the number of appointments should have reduced # by two - sleep(TIME_BETWEEN_BLOCKS*(START_TIME_OFFSET+END_TIME_OFFSET + 1)) + for _ in range(START_TIME_OFFSET + END_TIME_OFFSET): + generate_block() + assert len(watcher.appointments) == APPOINTMENTS - 2 # The rest of appointments will timeout after the end (2) + EXPIRY_DELTA # Wait for an additional block to be safe - sleep((EXPIRY_DELTA + 2 + 1) * TIME_BETWEEN_BLOCKS) + for _ in range(EXPIRY_DELTA + START_TIME_OFFSET + END_TIME_OFFSET): + generate_block() assert len(watcher.appointments) == 0 assert watcher.asleep is True From c09becd7fa58146379987e51e9cc8b973358afe8 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 17 Oct 2019 19:11:32 +0100 Subject: [PATCH 70/82] Fixes to_dict/to_json for and some fixes --- pisa/api.py | 2 +- pisa/responder.py | 12 +++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/pisa/api.py b/pisa/api.py index cd05e02..44fcb8c 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -102,7 +102,7 @@ def get_all_appointments(): if watcher.responder: for uuid, job in watcher.responder.jobs.items(): - responder_jobs[uuid] = job.to_json() + responder_jobs[uuid] = job.to_dict() response = jsonify({"watcher_appointments": watcher_appointments, "responder_jobs": responder_jobs}) diff --git a/pisa/responder.py b/pisa/responder.py index 0b948e0..dcce0a6 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -1,6 +1,7 @@ +import json from queue import Queue -from threading import Thread from hashlib import sha256 +from threading import Thread from binascii import unhexlify from pisa.logger import Logger @@ -29,11 +30,14 @@ class Job: # can be directly got from DB self.locator = sha256(unhexlify(dispute_txid)).hexdigest() - def to_json(self): + def to_dict(self): job = {"locator": self.locator, "justice_rawtx": self.justice_rawtx, "appointment_end": self.appointment_end} return job + def to_json(self): + return json.dumps(self.to_dict()) + class Responder: def __init__(self): @@ -62,6 +66,8 @@ class Responder: # TODO: Add the missing reasons (e.g. RPC_VERIFY_REJECTED) pass + return receipt + def create_job(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations=0, retry=False): @@ -155,7 +161,7 @@ class Responder: for uuid, job in self.jobs.items(): if job.appointment_end <= height: - tx = Carrier.get_transaction(job.dispute_txid) + tx = Carrier.get_transaction(job.justice_txid) # FIXME: Should be improved with the librarian confirmations = tx.get('confirmations') From d1d9693435f9e7b7c3b7e776bb4d39e592469cb8 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Thu, 17 Oct 2019 19:27:43 +0100 Subject: [PATCH 71/82] Adds coverage exclusions --- .coveragerc | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .coveragerc diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..74c5a28 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,6 @@ +[run] +omit = + pisa/pisad.py + pisa/sample_conf.py + pisa/time_traveler.py + pisa/utils/auth_proxy.py \ No newline at end of file From e5013d5bcabf3a7d997400d964efd3fc319aa724 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 21 Oct 2019 16:53:29 +0100 Subject: [PATCH 72/82] Dissables Flask logging --- pisa/api.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pisa/api.py b/pisa/api.py index 44fcb8c..af2e491 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,3 +1,4 @@ +import os import json from flask import Flask, request, Response, abort, jsonify @@ -11,6 +12,7 @@ from pisa.block_processor import BlockProcessor # ToDo: #5-add-async-to-api app = Flask(__name__) + HTTP_OK = 200 HTTP_BAD_REQUEST = 400 HTTP_SERVICE_UNAVAILABLE = 503 @@ -125,7 +127,8 @@ def start_api(): watcher = Watcher() inspector = Inspector() - # Setting Flask log t ERROR only so it does not mess with out logging + # Setting Flask log to ERROR only so it does not mess with out logging. Also disabling flask initial messages logging.getLogger('werkzeug').setLevel(logging.ERROR) + os.environ['WERKZEUG_RUN_MAIN'] = 'true' app.run(host=HOST, port=PORT) From 71ce7c46ec4c7720ceae2ca55fd7584a3fdd7a39 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 21 Oct 2019 16:54:48 +0100 Subject: [PATCH 73/82] Removes the parameters for do_subscribe + PEP8 --- pisa/responder.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pisa/responder.py b/pisa/responder.py index dcce0a6..d5f0f63 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -88,20 +88,20 @@ class Responder: if confirmations == 0: self.unconfirmed_txs.append(justice_txid) - logger.info("New job added.", - dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end) + logger.info("New job added.", dispute_txid=dispute_txid, justice_txid=justice_txid, + appointment_end=appointment_end) if self.asleep: self.asleep = False self.block_queue = Queue() - zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue]) + zmq_thread = Thread(target=self.do_subscribe) responder = Thread(target=self.do_watch) zmq_thread.start() responder.start() - def do_subscribe(self, block_queue): + def do_subscribe(self): self.zmq_subscriber = ZMQHandler(parent='Responder') - self.zmq_subscriber.handle(block_queue) + self.zmq_subscriber.handle(self.block_queue) def do_watch(self): # ToDo: #9-add-data-persistence From 94156a67cc81d9ad71c6dd5ffff20de41cac4f92 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 21 Oct 2019 18:12:05 +0100 Subject: [PATCH 74/82] Adds generate_blocks --- test/unit/conftest.py | 5 +++++ test/unit/test_carrier.py | 5 ++--- test/unit/test_watcher.py | 11 ++++------- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/test/unit/conftest.py b/test/unit/conftest.py index 2c0bdea..e337de9 100644 --- a/test/unit/conftest.py +++ b/test/unit/conftest.py @@ -32,3 +32,8 @@ def generate_block(): sleep(0.5) +def generate_blocks(n): + for _ in range(n): + generate_block() + + diff --git a/test/unit/test_carrier.py b/test/unit/test_carrier.py index d73c749..6671283 100644 --- a/test/unit/test_carrier.py +++ b/test/unit/test_carrier.py @@ -6,7 +6,7 @@ from os import urandom from pisa.carrier import Carrier from test.simulator.utils import sha256d from test.simulator.transaction import TX -from test.unit.conftest import generate_block +from test.unit.conftest import generate_blocks from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR logging.getLogger().disabled = True @@ -43,8 +43,7 @@ def test_send_double_spending_transaction(carrier): sent_txs.append(txid) # Wait for a block to be mined - for _ in range(2): - generate_block() + generate_blocks(2) # Try to send it again receipt2 = carrier.send_transaction(tx, txid) diff --git a/test/unit/test_watcher.py b/test/unit/test_watcher.py index 90c2ae7..6997a99 100644 --- a/test/unit/test_watcher.py +++ b/test/unit/test_watcher.py @@ -14,8 +14,8 @@ from pisa.appointment import Appointment from pisa.tools import check_txid_format from test.simulator.utils import sha256d from test.simulator.transaction import TX -from test.unit.conftest import generate_block from pisa.utils.auth_proxy import AuthServiceProxy +from test.unit.conftest import generate_block, generate_blocks from pisa.conf import EXPIRY_DELTA, BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT logging.getLogger().disabled = True @@ -137,20 +137,17 @@ def test_do_watch(watcher): # Broadcast the first two for dispute_tx in dispute_txs[:2]: - r = bitcoin_cli.sendrawtransaction(dispute_tx) + bitcoin_cli.sendrawtransaction(dispute_tx) # After leaving some time for the block to be mined and processed, the number of appointments should have reduced # by two - for _ in range(START_TIME_OFFSET + END_TIME_OFFSET): - generate_block() + generate_blocks(START_TIME_OFFSET + END_TIME_OFFSET) assert len(watcher.appointments) == APPOINTMENTS - 2 # The rest of appointments will timeout after the end (2) + EXPIRY_DELTA # Wait for an additional block to be safe - - for _ in range(EXPIRY_DELTA + START_TIME_OFFSET + END_TIME_OFFSET): - generate_block() + generate_blocks(EXPIRY_DELTA + START_TIME_OFFSET + END_TIME_OFFSET) assert len(watcher.appointments) == 0 assert watcher.asleep is True From 3afcf9e7088c41f780319c1953d3eeddb1b4573d Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Mon, 21 Oct 2019 19:53:34 +0100 Subject: [PATCH 75/82] Flags methods not covered by unit tests --- pisa/block_processor.py | 2 ++ pisa/carrier.py | 3 +++ pisa/tools.py | 3 +++ 3 files changed, 8 insertions(+) diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 83b0441..26f16c1 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -65,6 +65,7 @@ class BlockProcessor: return potential_matches @staticmethod + # NOTCOVERED def get_matches(potential_matches, locator_uuid_map, appointments): matches = [] @@ -89,6 +90,7 @@ class BlockProcessor: # DISCUSS: This method comes from the Responder and seems like it could go back there. @staticmethod + # NOTCOVERED def check_confirmations(txs, unconfirmed_txs, tx_job_map, missed_confirmations): for tx in txs: diff --git a/pisa/carrier.py b/pisa/carrier.py index 130cfca..a0dce76 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -17,6 +17,7 @@ class Receipt: class Carrier: + # NOTCOVERED def send_transaction(self, rawtx, txid): try: logger.info("Pushing transaction to the network", txid=txid, rawtx=rawtx) @@ -49,6 +50,8 @@ class Carrier: receipt = Receipt(delivered=True, confirmations=confirmations, reason=RPC_VERIFY_ALREADY_IN_CHAIN) else: + # WIP: It would be better to return and let the caller call again if necessary + # There's a really unlikely edge case where a transaction can be reorged between receiving the # notification and querying the data. In such a case we just resend self.send_transaction(rawtx, txid) diff --git a/pisa/tools.py b/pisa/tools.py index adba5e5..b1e76d0 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -7,12 +7,14 @@ from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY from pisa.utils.auth_proxy import AuthServiceProxy, JSONRPCException +# NOTCOVERED def bitcoin_cli(): return AuthServiceProxy("http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, conf.BTC_RPC_PORT)) # TODO: currently only used in the Responder; might move there or in the BlockProcessor +# NOTCOVERED def check_tx_in_chain(tx_id, logger=Logger(), tx_label='Transaction'): tx_in_chain = False confirmations = 0 @@ -39,6 +41,7 @@ def check_tx_in_chain(tx_id, logger=Logger(), tx_label='Transaction'): return tx_in_chain, confirmations +# NOTCOVERED def can_connect_to_bitcoind(): can_connect = True From 018faa07d1ccdece14b83a22527cc2042cacd01e Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 22 Oct 2019 13:37:30 +0100 Subject: [PATCH 76/82] Fixes change endianness Porting change_endianness from Python2 to Python3 introduced a bug. --- test/simulator/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/simulator/utils.py b/test/simulator/utils.py index f2f2883..98ac072 100644 --- a/test/simulator/utils.py +++ b/test/simulator/utils.py @@ -1,6 +1,6 @@ # Porting some functionality from https://github.com/sr-gi/bitcoin_tools with some modifications <3 from hashlib import sha256 -from binascii import unhexlify +from binascii import unhexlify, hexlify def change_endianness(x): @@ -15,9 +15,9 @@ def change_endianness(x): if (len(x) % 2) == 1: x += "0" - y = bytes(x, 'utf-8') + y = unhexlify(x) z = y[::-1] - return z.decode('utf-8') + return hexlify(z).decode('utf-8') def parse_varint(tx): From 45552e390cc74034a0754577f68cc9d7527a9048 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 22 Oct 2019 15:01:50 +0100 Subject: [PATCH 77/82] Changes send_transaction to not be recursive send_transaction had an edge case that was treated recursively. Return a receipt instead and handle that in the caller --- pisa/carrier.py | 9 ++++----- pisa/errors.py | 3 +++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pisa/carrier.py b/pisa/carrier.py index a0dce76..a4faab2 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -2,7 +2,7 @@ from pisa.rpc_errors import * from pisa.logger import Logger from pisa.tools import bitcoin_cli from pisa.utils.auth_proxy import JSONRPCException -from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION +from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION, RPC_TX_REORGED_AFTER_BROADCAST logger = Logger("Carrier") @@ -50,11 +50,10 @@ class Carrier: receipt = Receipt(delivered=True, confirmations=confirmations, reason=RPC_VERIFY_ALREADY_IN_CHAIN) else: - # WIP: It would be better to return and let the caller call again if necessary - # There's a really unlikely edge case where a transaction can be reorged between receiving the - # notification and querying the data. In such a case we just resend - self.send_transaction(rawtx, txid) + # notification and querying the data. Notice that this implies the tx being also kicked off the + # mempool, which again is really unlikely. + receipt = Receipt(delivered=False, reason=RPC_TX_REORGED_AFTER_BROADCAST) elif errno == RPC_DESERIALIZATION_ERROR: # Adding this here just for completeness. We should never end up here. The Carrier only sends txs diff --git a/pisa/errors.py b/pisa/errors.py index 8aec660..c2e5a4d 100644 --- a/pisa/errors.py +++ b/pisa/errors.py @@ -9,6 +9,9 @@ APPOINTMENT_WRONG_FIELD = -7 APPOINTMENT_CIPHER_NOT_SUPPORTED = -8 APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED = -9 +# Custom RPC errors +RPC_TX_REORGED_AFTER_BROADCAST = -98 + # UNHANDLED UNKNOWN_JSON_RPC_EXCEPTION = -99 From be16d8af73e811492c09a5e02eb0fca2e88a8d00 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 22 Oct 2019 15:03:18 +0100 Subject: [PATCH 78/82] Improves responder. Several changes have been performed: - Retry counter has been removed (#23) - Rebroadcast return receipts now - Re-calling send_transaction if a fixable error occurs should be handled in the responder now (missing) - Fixes some small bugs --- pisa/responder.py | 81 ++++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/pisa/responder.py b/pisa/responder.py index d5f0f63..058b658 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -18,14 +18,12 @@ logger = Logger("Responder") class Job: - def __init__(self, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry_counter=0): + def __init__(self, dispute_txid, justice_txid, justice_rawtx, appointment_end): self.dispute_txid = dispute_txid self.justice_txid = justice_txid self.justice_rawtx = justice_rawtx self.appointment_end = appointment_end - self.retry_counter = retry_counter - # FIXME: locator is here so we can give info about jobs for now. It can be either passed from watcher or info # can be directly got from DB self.locator = sha256(unhexlify(dispute_txid)).hexdigest() @@ -56,11 +54,11 @@ class Responder: carrier = Carrier() receipt = carrier.send_transaction(justice_rawtx, justice_txid) + # do_watch can call add_response recursively if a broadcast transaction does not get confirmations + # retry holds that information. If retry is true the job already exists if receipt.delivered: - # do_watch can call add_response recursively if a broadcast transaction does not get confirmations - # retry holds such information. - self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry, - confirmations=receipt.confirmations) + if not retry: + self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, receipt.confirmations) else: # TODO: Add the missing reasons (e.g. RPC_VERIFY_REJECTED) @@ -68,25 +66,17 @@ class Responder: return receipt - def create_job(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations=0, - retry=False): + def create_job(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations=0): + self.jobs[uuid] = Job(dispute_txid, justice_txid, justice_rawtx, appointment_end) - # ToDo: #23-define-behaviour-approaching-end - if retry: - self.jobs[uuid].retry_counter += 1 - self.missed_confirmations[justice_txid] = 0 + if justice_txid in self.tx_job_map: + self.tx_job_map[justice_txid].append(uuid) else: - self.jobs[uuid] = Job(dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations) + self.tx_job_map[justice_txid] = [uuid] - if justice_txid in self.tx_job_map: - self.tx_job_map[justice_txid].append(uuid) - - else: - self.tx_job_map[justice_txid] = [uuid] - - if confirmations == 0: - self.unconfirmed_txs.append(justice_txid) + if confirmations == 0: + self.unconfirmed_txs.append(justice_txid) logger.info("New job added.", dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end) @@ -106,7 +96,7 @@ class Responder: def do_watch(self): # ToDo: #9-add-data-persistence # change prev_block_hash to the last known tip when bootstrapping - prev_block_hash = 0 + prev_block_hash = BlockProcessor.get_best_block_hash() while len(self.jobs) > 0: # We get notified for every new received block @@ -121,21 +111,21 @@ class Responder: block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs) # ToDo: #9-add-data-persistence - # change prev_block_hash condition - if prev_block_hash == block.get('previousblockhash') or prev_block_hash == 0: + if prev_block_hash == block.get('previousblockhash'): self.unconfirmed_txs, self.missed_confirmations = BlockProcessor.check_confirmations( txs, self.unconfirmed_txs, self.tx_job_map, self.missed_confirmations) txs_to_rebroadcast = self.get_txs_to_rebroadcast(txs) - Cleaner.delete_completed_jobs(self.jobs, self.tx_job_map, self.get_completed_jobs(height), height) + completed_jobs = self.get_completed_jobs(height) + Cleaner.delete_completed_jobs(self.jobs, self.tx_job_map, completed_jobs, height) self.rebroadcast(txs_to_rebroadcast) else: - logger.warning("Reorg found", - local_prev_block_hash=prev_block_hash, + logger.warning("Reorg found", local_prev_block_hash=prev_block_hash, remote_prev_block_hash=block.get('previousblockhash')) + # ToDo: #24-properly-handle-reorgs self.handle_reorgs() prev_block_hash = block.get('hash') @@ -160,31 +150,42 @@ class Responder: completed_jobs = [] for uuid, job in self.jobs.items(): - if job.appointment_end <= height: + if job.appointment_end <= height and job.justice_txid not in self.unconfirmed_txs: tx = Carrier.get_transaction(job.justice_txid) # FIXME: Should be improved with the librarian - confirmations = tx.get('confirmations') - if tx is not None and confirmations > MIN_CONFIRMATIONS: - # The end of the appointment has been reached - completed_jobs.append((uuid, confirmations)) + if tx is not None: + confirmations = tx.get('confirmations') + + if confirmations >= MIN_CONFIRMATIONS: + # The end of the appointment has been reached + completed_jobs.append((uuid, confirmations)) return completed_jobs - def rebroadcast(self, jobs_to_rebroadcast): + def rebroadcast(self, txs_to_rebroadcast): # DISCUSS: #22-discuss-confirmations-before-retry # ToDo: #23-define-behaviour-approaching-end - for tx in jobs_to_rebroadcast: - for uuid in self.tx_job_map[tx]: - self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, - self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) + receipts = [] + + for txid in txs_to_rebroadcast: + self.missed_confirmations[txid] = 0 + + for uuid in self.tx_job_map[txid]: + job = self.jobs[uuid] + receipt = self.add_response(uuid, job.dispute_txid, job.justice_txid, job.justice_rawtx, + job.appointment_end, retry=True) logger.warning("Transaction has missed many confirmations. Rebroadcasting.", - justice_txid=self.jobs[uuid].justice_txid, - confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) + justice_txid=job.justice_txid, confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) + + receipts.append((txid, receipt)) + + return receipts # FIXME: Legacy code, must be checked and updated/fixed + # NOTCOVERED def handle_reorgs(self): for uuid, job in self.jobs.items(): # First we check if the dispute transaction is still in the blockchain. If not, the justice can not be From 7a222d43fe373349b006ccf8c9d51519b7095840 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 22 Oct 2019 15:09:47 +0100 Subject: [PATCH 79/82] Updates test cleaner and removes retry_counter for responder --- test/unit/test_cleaner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/unit/test_cleaner.py b/test/unit/test_cleaner.py index 237a2e3..0fdf5df 100644 --- a/test/unit/test_cleaner.py +++ b/test/unit/test_cleaner.py @@ -45,14 +45,14 @@ def set_up_jobs(total_jobs): txid = urandom(32).hex() # Assign both justice_txid and dispute_txid the same id (it shouldn't matter) - jobs[uuid] = Job(txid, txid, None, None, None) + jobs[uuid] = Job(txid, txid, None, None) tx_job_map[txid] = [uuid] # Each justice_txid can have more than one uuid assigned to it. Do a coin toss to add multiple ones while random.randint(0, 1): uuid = uuid4().hex - jobs[uuid] = Job(txid, txid, None, None, None) + jobs[uuid] = Job(txid, txid, None, None) tx_job_map[txid].append(uuid) return jobs, tx_job_map From 9d9d7b1e5b849aef971be48edde736749b5a91a2 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 22 Oct 2019 15:14:38 +0100 Subject: [PATCH 80/82] Adds responder unit tests --- test/unit/test_responder.py | 333 ++++++++++++++++++++++++++++++++++++ 1 file changed, 333 insertions(+) create mode 100644 test/unit/test_responder.py diff --git a/test/unit/test_responder.py b/test/unit/test_responder.py new file mode 100644 index 0000000..72e617e --- /dev/null +++ b/test/unit/test_responder.py @@ -0,0 +1,333 @@ +import json +import pytest +from os import urandom +from uuid import uuid4 +from threading import Thread +from queue import Queue, Empty + +from pisa.tools import check_txid_format +from test.simulator.utils import sha256d +from pisa.responder import Responder, Job +from test.simulator.bitcoind_sim import TX +from test.unit.conftest import generate_block, generate_blocks +from pisa.utils.auth_proxy import AuthServiceProxy +from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT + + +@pytest.fixture(scope="module") +def responder(): + return Responder() + + +def create_dummy_job_data(random_txid=False, justice_rawtx=None): + bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) + + # The following transaction data corresponds to a valid transaction. For some test it may be interesting to have + # some valid data, but for others we may need multiple different justice_txids. + + dispute_txid = "0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9" + justice_txid = "f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16" + + if justice_rawtx is None: + justice_rawtx = "0100000001c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704000000004847304402" \ + "204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4" \ + "acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff0200ca9a3b00000000434104ae1a62fe09c5f51b" \ + "13905f07f06b99a2f7159b2225f374cd378d71302fa28414e7aab37397f554a7df5f142c21c1b7303b8a0626f1ba" \ + "ded5c72a704f7e6cd84cac00286bee0000000043410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482e" \ + "cad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac00000000" + + else: + justice_txid = sha256d(justice_rawtx) + + if random_txid is True: + justice_txid = urandom(32).hex() + + appointment_end = bitcoin_cli.getblockcount() + 2 + + return dispute_txid, justice_txid, justice_rawtx, appointment_end + + +def create_dummy_job(random_txid=False, justice_rawtx=None): + dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(random_txid, justice_rawtx) + return Job(dispute_txid, justice_txid, justice_rawtx, appointment_end) + + +def test_job_init(run_bitcoind): + dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data() + job = Job(dispute_txid, justice_txid, justice_rawtx, appointment_end) + + assert job.dispute_txid == dispute_txid and job.justice_txid == justice_txid \ + and job.justice_rawtx == justice_rawtx and job.appointment_end == appointment_end + + +def test_job_to_dict(): + job = create_dummy_job() + job_dict = job.to_dict() + + assert job.locator == job_dict["locator"] and job.justice_rawtx == job_dict["justice_rawtx"] \ + and job.appointment_end == job_dict["appointment_end"] + + +def test_job_to_json(): + job = create_dummy_job() + job_dict = json.loads(job.to_json()) + + assert job.locator == job_dict["locator"] and job.justice_rawtx == job_dict["justice_rawtx"] \ + and job.appointment_end == job_dict["appointment_end"] + + +def test_init_responder(responder): + assert type(responder.jobs) is dict and len(responder.jobs) == 0 + assert type(responder.tx_job_map) is dict and len(responder.tx_job_map) == 0 + assert type(responder.unconfirmed_txs) is list and len(responder.unconfirmed_txs) == 0 + assert type(responder.missed_confirmations) is dict and len(responder.missed_confirmations) == 0 + assert responder.block_queue is None + assert responder.asleep is True + assert responder.zmq_subscriber is None + + +def test_add_response(responder): + uuid = uuid4().hex + job = create_dummy_job() + + # The responder automatically fires create_job on adding a job if it is asleep (initial state). Avoid this by + # setting the state to awake. + responder.asleep = False + + receipt = responder.add_response(uuid, job.dispute_txid, job.justice_txid, job.justice_rawtx, job.appointment_end) + + assert receipt.delivered is True + + +def test_create_job(responder): + responder.asleep = False + + for _ in range(20): + uuid = uuid4().hex + confirmations = 0 + dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data(random_txid=True) + + # Check the job is not within the responder jobs before adding it + assert uuid not in responder.jobs + assert justice_txid not in responder.tx_job_map + assert justice_txid not in responder.unconfirmed_txs + + # And that it is afterwards + responder.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations) + assert uuid in responder.jobs + assert justice_txid in responder.tx_job_map + assert justice_txid in responder.unconfirmed_txs + + # Check that the rest of job data also matches + job = responder.jobs[uuid] + assert job.dispute_txid == dispute_txid and job.justice_txid == justice_txid \ + and job.justice_rawtx == justice_rawtx and job.appointment_end == appointment_end \ + and job.appointment_end == appointment_end + + +def test_create_job_already_confirmed(responder): + responder.asleep = False + + for i in range(20): + uuid = uuid4().hex + confirmations = i+1 + dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data( + justice_rawtx=TX.create_dummy_transaction()) + + responder.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations) + + assert justice_txid not in responder.unconfirmed_txs + + +def test_do_subscribe(responder): + responder.block_queue = Queue() + + zmq_thread = Thread(target=responder.do_subscribe) + zmq_thread.daemon = True + zmq_thread.start() + + try: + generate_block() + block_hash = responder.block_queue.get() + assert check_txid_format(block_hash) + + except Empty: + assert False + + +def test_do_watch(responder): + # Reinitializing responder (but keeping the subscriber) + responder.jobs = dict() + responder.tx_job_map = dict() + responder.unconfirmed_txs = [] + responder.missed_confirmations = dict() + + bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) + + jobs = [create_dummy_job(justice_rawtx=TX.create_dummy_transaction()) for _ in range(20)] + + # Let's set up the jobs first + for job in jobs: + uuid = uuid4().hex + + responder.jobs[uuid] = job + responder.tx_job_map[job.justice_txid] = [uuid] + responder.missed_confirmations[job.justice_txid] = 0 + responder.unconfirmed_txs.append(job.justice_txid) + + # Let's start to watch + watch_thread = Thread(target=responder.do_watch) + watch_thread.daemon = True + watch_thread.start() + + # And broadcast some of the transactions + broadcast_txs = [] + for job in jobs[:5]: + bitcoin_cli.sendrawtransaction(job.justice_rawtx) + broadcast_txs.append(job.justice_txid) + + # Mine a block + generate_block() + + # The transactions we sent shouldn't be in the unconfirmed transaction list anymore + assert not set(broadcast_txs).issubset(responder.unconfirmed_txs) + + # TODO: test that reorgs can be detected once data persistence is merged (new version of the simulator) + + # Generating 5 additional blocks should complete the 5 jobs + generate_blocks(5) + + assert not set(broadcast_txs).issubset(responder.tx_job_map) + + # Do the rest + broadcast_txs = [] + for job in jobs[5:]: + bitcoin_cli.sendrawtransaction(job.justice_rawtx) + broadcast_txs.append(job.justice_txid) + + # Mine a block + generate_blocks(6) + + assert len(responder.tx_job_map) == 0 + assert responder.asleep is True + + +def test_get_txs_to_rebroadcast(responder): + # Let's create a few fake txids and assign at least 6 missing confirmations to each + txs_missing_too_many_conf = {urandom(32).hex(): 6+i for i in range(10)} + + # Let's create some other transaction that has missed some confirmations but not that many + txs_missing_some_conf = {urandom(32).hex(): 3 for _ in range(10)} + + # All the txs in the first dict should be flagged as to_rebroadcast + responder.missed_confirmations = txs_missing_too_many_conf + txs_to_rebroadcast = responder.get_txs_to_rebroadcast(txs_missing_too_many_conf) + assert txs_to_rebroadcast == list(txs_missing_too_many_conf.keys()) + + # Non of the txs in the second dict should be flagged + responder.missed_confirmations = txs_missing_some_conf + txs_to_rebroadcast = responder.get_txs_to_rebroadcast(txs_missing_some_conf) + assert txs_to_rebroadcast == [] + + # Let's check that it also works with a mixed dict + responder.missed_confirmations.update(txs_missing_too_many_conf) + txs_to_rebroadcast = responder.get_txs_to_rebroadcast(txs_missing_some_conf) + assert txs_to_rebroadcast == list(txs_missing_too_many_conf.keys()) + + +def test_get_completed_jobs(): + bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) + initial_height = bitcoin_cli.getblockcount() + + # Let's use a fresh responder for this to make it easier to compare the results + responder = Responder() + + # A complete job is a job that has reached the appointment end with enough confirmations (> MIN_CONFIRMATIONS) + # We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached + jobs_end_conf = {uuid4().hex: create_dummy_job(justice_rawtx=TX.create_dummy_transaction()) for _ in range(10)} + + jobs_end_no_conf = {} + for _ in range(10): + job = create_dummy_job(justice_rawtx=TX.create_dummy_transaction()) + responder.unconfirmed_txs.append(job.justice_txid) + jobs_end_no_conf[uuid4().hex] = job + + jobs_no_end = {} + for _ in range(10): + job = create_dummy_job(justice_rawtx=TX.create_dummy_transaction()) + job.appointment_end += 10 + jobs_no_end[uuid4().hex] = job + + # Let's add all to the responder + responder.jobs.update(jobs_end_conf) + responder.jobs.update(jobs_end_no_conf) + responder.jobs.update(jobs_no_end) + + for uuid, job in responder.jobs.items(): + bitcoin_cli.sendrawtransaction(job.justice_rawtx) + + # The dummy appointments have a end_appointment time of current + 2, but jobs need at least 6 confs by default + generate_blocks(6) + + # And now let's check + completed_jobs = responder.get_completed_jobs(initial_height + 6) + completed_jobs_ids = [job_id for job_id, confirmations in completed_jobs] + ended_jobs_keys = list(jobs_end_conf.keys()) + assert set(completed_jobs_ids) == set(ended_jobs_keys) + + # Generating 6 additional blocks should also confirm jobs_no_end + generate_blocks(6) + + completed_jobs = responder.get_completed_jobs(initial_height + 12) + completed_jobs_ids = [job_id for job_id, confirmations in completed_jobs] + ended_jobs_keys.extend(list(jobs_no_end.keys())) + + assert set(completed_jobs_ids) == set(ended_jobs_keys) + + +def test_rebroadcast(): + responder = Responder() + responder.asleep = False + + txs_to_rebroadcast = [] + + # Rebroadcast calls add_response with retry=True. The job data is already in jobs. + for i in range(20): + uuid = uuid4().hex + dispute_txid, justice_txid, justice_rawtx, appointment_end = create_dummy_job_data( + justice_rawtx=TX.create_dummy_transaction()) + + responder.jobs[uuid] = Job(dispute_txid, justice_txid, justice_rawtx, appointment_end) + responder.tx_job_map[justice_txid] = [uuid] + responder.unconfirmed_txs.append(justice_txid) + + # Let's add some of the txs in the rebroadcast list + if (i % 2) == 0: + txs_to_rebroadcast.append(justice_txid) + + receipts = responder.rebroadcast(txs_to_rebroadcast) + + # All txs should have been delivered and the missed confirmation reset + for txid, receipt in receipts: + # Sanity check + assert txid in txs_to_rebroadcast + + assert receipt.delivered is True + assert responder.missed_confirmations[txid] == 0 + + + + + + + + + + + + + + + + + From af0e9c81b5971be4963431622cec4be9ca980b1f Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Tue, 22 Oct 2019 16:38:28 +0100 Subject: [PATCH 81/82] Adds some missing tests --- .coveragerc | 1 + pisa/api.py | 2 +- pisa/responder.py | 1 + test/unit/test_api.py | 11 +++++++++++ test/unit/test_block_processor.py | 15 +++++++++++++++ test/unit/test_carrier.py | 1 - test/unit/test_tools.py | 11 ++++++++++- 7 files changed, 39 insertions(+), 3 deletions(-) diff --git a/.coveragerc b/.coveragerc index 74c5a28..53a6100 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,6 +1,7 @@ [run] omit = pisa/pisad.py + pisa/logger.py pisa/sample_conf.py pisa/time_traveler.py pisa/utils/auth_proxy.py \ No newline at end of file diff --git a/pisa/api.py b/pisa/api.py index af2e491..1398d98 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -84,7 +84,7 @@ def get_appointment(): response.append(job_data) if not response: - response.append({"locator": locator, "status": "not found"}) + response.append({"locator": locator, "status": "not_found"}) response = jsonify(response) diff --git a/pisa/responder.py b/pisa/responder.py index 058b658..2309b03 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -121,6 +121,7 @@ class Responder: Cleaner.delete_completed_jobs(self.jobs, self.tx_job_map, completed_jobs, height) self.rebroadcast(txs_to_rebroadcast) + # NOTCOVERED else: logger.warning("Reorg found", local_prev_block_hash=prev_block_hash, remote_prev_block_hash=block.get('previousblockhash')) diff --git a/test/unit/test_api.py b/test/unit/test_api.py index ba052e5..63b2967 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -1,6 +1,7 @@ import json import pytest import requests +from os import urandom from hashlib import sha256 from binascii import unhexlify @@ -99,6 +100,16 @@ def test_request_appointment(new_appointment): assert (all([status == "being_watched" for status in appointment_status])) +def test_request_random_appointment(): + r = requests.get(url=PISA_API + "/get_appointment?locator=" + urandom(32).hex()) + assert (r.status_code == 200) + + received_appointments = json.loads(r.content) + appointment_status = [appointment.pop("status") for appointment in received_appointments] + + assert (all([status == "not_found" for status in appointment_status])) + + def test_add_appointment_multiple_times(new_appointment, n=MULTIPLE_APPOINTMENTS): # Multiple appointments with the same locator should be valid # DISCUSS: #34-store-identical-appointments diff --git a/test/unit/test_block_processor.py b/test/unit/test_block_processor.py index a0ec37f..5183842 100644 --- a/test/unit/test_block_processor.py +++ b/test/unit/test_block_processor.py @@ -43,6 +43,12 @@ def test_get_block(best_block_hash): assert block.get('hash') == best_block_hash and 'height' in block and 'previousblockhash' in block and 'tx' in block +def test_get_random_block(): + block = BlockProcessor.get_block(urandom(32).hex()) + + assert block is None + + def test_get_block_count(): block_count = BlockProcessor.get_block_count() assert isinstance(block_count, int) and block_count >= 0 @@ -55,6 +61,15 @@ def test_potential_matches(txids, locator_uuid_map): assert locator_uuid_map.keys() == potential_matches.keys() +def test_potential_matches_random(locator_uuid_map): + txids = [urandom(32).hex() for _ in range(len(locator_uuid_map))] + + potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map) + + # None of the ids should match + assert len(potential_matches) == 0 + + def test_potential_matches_random_data(locator_uuid_map): # The likelihood of finding a potential match with random data should be negligible txids = [urandom(32).hex() for _ in range(TEST_SET_SIZE)] diff --git a/test/unit/test_carrier.py b/test/unit/test_carrier.py index 6671283..165d417 100644 --- a/test/unit/test_carrier.py +++ b/test/unit/test_carrier.py @@ -25,7 +25,6 @@ def carrier(): def test_send_transaction(run_bitcoind, carrier): - # We are mocking bitcoind and in our simulator txid == tx tx = TX.create_dummy_transaction() txid = sha256d(tx) diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py index 251663e..f9bdca2 100644 --- a/test/unit/test_tools.py +++ b/test/unit/test_tools.py @@ -1,6 +1,6 @@ from pisa import logging from pisa.tools import check_txid_format -from pisa.tools import can_connect_to_bitcoind, in_correct_network +from pisa.tools import can_connect_to_bitcoind, in_correct_network, bitcoin_cli logging.getLogger().disabled = True @@ -22,6 +22,15 @@ def test_can_connect_to_bitcoind(): # assert can_connect_to_bitcoind() is False +def test_bitcoin_cli(): + try: + bitcoin_cli().help() + assert True + + except Exception: + assert False + + def test_check_txid_format(): assert(check_txid_format(None) is False) assert(check_txid_format("") is False) From 52f52f57f82d18b8da7bd65f36aca0f09ec7c556 Mon Sep 17 00:00:00 2001 From: Sergi Delgado Segura Date: Wed, 23 Oct 2019 12:40:25 +0100 Subject: [PATCH 82/82] Apply requested changes - Adds a PRG based on a hardcoded seed to make the tests reproducible (get_random_value_hex) - Updates all the tests replacing urandom for get_random_value_hex - Properly places misplaced bitcoin_cli in bitcoin_sim_tests - Typos --- pisa/pisad.py | 14 +++++++------- test/simulator/bitcoin_sim_tests.py | 9 ++++----- test/unit/conftest.py | 12 ++++++++++++ test/unit/test_api.py | 5 ++--- test/unit/test_appointment.py | 6 +++--- test/unit/test_blob.py | 17 +++++++++-------- test/unit/test_block_processor.py | 10 +++++----- test/unit/test_carrier.py | 5 ++--- test/unit/test_cleaner.py | 5 +++-- test/unit/test_encrypted_blob.py | 9 ++++----- test/unit/test_inspector.py | 29 +++++++++++++++-------------- test/unit/test_responder.py | 10 +++++----- 12 files changed, 71 insertions(+), 60 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index 3bbf476..307d4b3 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -29,13 +29,13 @@ if __name__ == '__main__': # FIXME: Leaving this here for future option/arguments pass - if can_connect_to_bitcoind(): - if in_correct_network(BTC_NETWORK): - # Fire the api - start_api() + if not can_connect_to_bitcoind(): + logger.error("Can't connect to bitcoind. Shutting down") - else: - logger.error("bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down") + elif not in_correct_network(BTC_NETWORK): + logger.error("bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down") else: - logger.error("Can't connect to bitcoind. Shutting down") + # Fire the api + start_api() + diff --git a/test/simulator/bitcoin_sim_tests.py b/test/simulator/bitcoin_sim_tests.py index f0eafe5..37717cb 100644 --- a/test/simulator/bitcoin_sim_tests.py +++ b/test/simulator/bitcoin_sim_tests.py @@ -1,15 +1,17 @@ import re -import os import pytest from time import sleep from threading import Thread from test.simulator.transaction import TX +from test.unit.conftest import get_random_value_hex from test.simulator.bitcoind_sim import run_simulator from pisa.utils.auth_proxy import AuthServiceProxy, JSONRPCException from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT -MIXED_VALUES = values = [-1, 500, '', '111', [], 1.1, None, '', "a" * 31, "b" * 33, os.urandom(32).hex()] +MIXED_VALUES = values = [-1, 500, '', '111', [], 1.1, None, '', "a" * 31, "b" * 33, get_random_value_hex(32)] + +bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) @pytest.fixture(scope='module') @@ -32,9 +34,6 @@ def check_hash_format(txid): return isinstance(txid, str) and re.search(r'^[0-9A-Fa-f]{64}$', txid) is not None -bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT)) - - def test_help(run_bitcoind): # Help should always return 0 assert(bitcoin_cli.help() == 0) diff --git a/test/unit/conftest.py b/test/unit/conftest.py index e337de9..5fd697c 100644 --- a/test/unit/conftest.py +++ b/test/unit/conftest.py @@ -1,4 +1,5 @@ import pytest +import random import requests from time import sleep from threading import Thread @@ -27,6 +28,17 @@ def run_api(): sleep(0.1) +@pytest.fixture(scope='session', autouse=True) +def prng_seed(): + random.seed(0) + + +def get_random_value_hex(nbytes): + pseudo_random_value = random.getrandbits(8*nbytes) + prv_hex = '{:x}'.format(pseudo_random_value) + return prv_hex.zfill(2*nbytes) + + def generate_block(): requests.post(url="http://{}:{}/generate".format(HOST, PORT), timeout=5) sleep(0.5) diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 63b2967..242430e 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -1,7 +1,6 @@ import json import pytest import requests -from os import urandom from hashlib import sha256 from binascii import unhexlify @@ -9,8 +8,8 @@ from apps.cli.blob import Blob from pisa import HOST, PORT, logging from test.simulator.utils import sha256d from test.simulator.transaction import TX -from test.unit.conftest import generate_block from pisa.utils.auth_proxy import AuthServiceProxy +from test.unit.conftest import generate_block, get_random_value_hex from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MAX_APPOINTMENTS logging.getLogger().disabled = True @@ -101,7 +100,7 @@ def test_request_appointment(new_appointment): def test_request_random_appointment(): - r = requests.get(url=PISA_API + "/get_appointment?locator=" + urandom(32).hex()) + r = requests.get(url=PISA_API + "/get_appointment?locator=" + get_random_value_hex(32)) assert (r.status_code == 200) received_appointments = json.loads(r.content) diff --git a/test/unit/test_appointment.py b/test/unit/test_appointment.py index 0caf459..b6ed6b1 100644 --- a/test/unit/test_appointment.py +++ b/test/unit/test_appointment.py @@ -1,19 +1,19 @@ -from os import urandom from pytest import fixture from pisa.appointment import Appointment from pisa.encrypted_blob import EncryptedBlob +from test.unit.conftest import get_random_value_hex # Not much to test here, adding it for completeness @fixture def appointment_data(): - locator = urandom(32).hex() + locator = get_random_value_hex(32) start_time = 100 end_time = 120 dispute_delta = 20 - encrypted_blob_data = urandom(100).hex() + encrypted_blob_data = get_random_value_hex(100) cipher = "AES-GCM-128" hash_function = "SHA256" diff --git a/test/unit/test_blob.py b/test/unit/test_blob.py index de9403f..9bdd7d3 100644 --- a/test/unit/test_blob.py +++ b/test/unit/test_blob.py @@ -1,14 +1,15 @@ -from os import urandom +from binascii import unhexlify from pisa import logging from apps.cli.blob import Blob +from test.unit.conftest import get_random_value_hex from pisa.conf import SUPPORTED_CIPHERS, SUPPORTED_HASH_FUNCTIONS logging.getLogger().disabled = True def test_init_blob(): - data = urandom(64).hex() + data = get_random_value_hex(64) # Fixed (valid) hash function, try different valid ciphers hash_function = SUPPORTED_HASH_FUNCTIONS[0] @@ -29,7 +30,7 @@ def test_init_blob(): assert(blob.data == data and blob.cipher == cipher and blob.hash_function == case) # Invalid data - data = urandom(64) + data = unhexlify(get_random_value_hex(64)) cipher = SUPPORTED_CIPHERS[0] hash_function = SUPPORTED_HASH_FUNCTIONS[0] @@ -41,7 +42,7 @@ def test_init_blob(): assert True # Invalid cipher - data = urandom(64).hex() + data = get_random_value_hex(64) cipher = "A" * 10 hash_function = SUPPORTED_HASH_FUNCTIONS[0] @@ -53,7 +54,7 @@ def test_init_blob(): assert True # Invalid hash function - data = urandom(64).hex() + data = get_random_value_hex(64) cipher = SUPPORTED_CIPHERS[0] hash_function = "A" * 10 @@ -67,14 +68,14 @@ def test_init_blob(): def test_encrypt(): # Valid data, valid key - data = urandom(64).hex() + data = get_random_value_hex(64) blob = Blob(data, SUPPORTED_CIPHERS[0], SUPPORTED_HASH_FUNCTIONS[0]) - key = urandom(32).hex() + key = get_random_value_hex(32) encrypted_blob = blob.encrypt(key) # Invalid key (note that encrypt cannot be called with invalid data since that's checked when the Blob is created) - invalid_key = urandom(32) + invalid_key = unhexlify(get_random_value_hex(32)) try: blob.encrypt(invalid_key) diff --git a/test/unit/test_block_processor.py b/test/unit/test_block_processor.py index 5183842..d7d8e72 100644 --- a/test/unit/test_block_processor.py +++ b/test/unit/test_block_processor.py @@ -1,11 +1,11 @@ import pytest import logging -from os import urandom from uuid import uuid4 from hashlib import sha256 from binascii import unhexlify from pisa.block_processor import BlockProcessor +from test.unit.conftest import get_random_value_hex logging.getLogger().disabled = True @@ -15,7 +15,7 @@ TEST_SET_SIZE = 200 @pytest.fixture(scope='module') def txids(): - return [urandom(32).hex() for _ in range(APPOINTMENT_COUNT)] + return [get_random_value_hex(32) for _ in range(APPOINTMENT_COUNT)] @pytest.fixture(scope='module') @@ -44,7 +44,7 @@ def test_get_block(best_block_hash): def test_get_random_block(): - block = BlockProcessor.get_block(urandom(32).hex()) + block = BlockProcessor.get_block(get_random_value_hex(32)) assert block is None @@ -62,7 +62,7 @@ def test_potential_matches(txids, locator_uuid_map): def test_potential_matches_random(locator_uuid_map): - txids = [urandom(32).hex() for _ in range(len(locator_uuid_map))] + txids = [get_random_value_hex(32) for _ in range(len(locator_uuid_map))] potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map) @@ -72,7 +72,7 @@ def test_potential_matches_random(locator_uuid_map): def test_potential_matches_random_data(locator_uuid_map): # The likelihood of finding a potential match with random data should be negligible - txids = [urandom(32).hex() for _ in range(TEST_SET_SIZE)] + txids = [get_random_value_hex(32) for _ in range(TEST_SET_SIZE)] potential_matches = BlockProcessor.get_potential_matches(txids, locator_uuid_map) diff --git a/test/unit/test_carrier.py b/test/unit/test_carrier.py index 165d417..595dc0c 100644 --- a/test/unit/test_carrier.py +++ b/test/unit/test_carrier.py @@ -1,12 +1,11 @@ import pytest import logging -from os import urandom - from pisa.carrier import Carrier from test.simulator.utils import sha256d from test.simulator.transaction import TX from test.unit.conftest import generate_blocks +from test.unit.conftest import get_random_value_hex from pisa.rpc_errors import RPC_VERIFY_ALREADY_IN_CHAIN, RPC_DESERIALIZATION_ERROR logging.getLogger().disabled = True @@ -72,7 +71,7 @@ def test_get_transaction(): def test_get_non_existing_transaction(): - tx_info = Carrier.get_transaction(urandom(32).hex()) + tx_info = Carrier.get_transaction(get_random_value_hex(32)) assert tx_info is None diff --git a/test/unit/test_cleaner.py b/test/unit/test_cleaner.py index 0fdf5df..92c2a35 100644 --- a/test/unit/test_cleaner.py +++ b/test/unit/test_cleaner.py @@ -6,6 +6,7 @@ from pisa import logging from pisa.responder import Job from pisa.cleaner import Cleaner from pisa.appointment import Appointment +from test.unit.conftest import get_random_value_hex CONFIRMATIONS = 6 ITEMS = 10 @@ -21,7 +22,7 @@ def set_up_appointments(total_appointments): for _ in range(total_appointments): uuid = uuid4().hex - locator = urandom(32).hex() + locator = get_random_value_hex(32) appointments[uuid] = Appointment(locator, None, None, None, None, None, None) locator_uuid_map[locator] = [uuid] @@ -42,7 +43,7 @@ def set_up_jobs(total_jobs): for _ in range(total_jobs): uuid = uuid4().hex - txid = urandom(32).hex() + txid = get_random_value_hex(32) # Assign both justice_txid and dispute_txid the same id (it shouldn't matter) jobs[uuid] = Job(txid, txid, None, None) diff --git a/test/unit/test_encrypted_blob.py b/test/unit/test_encrypted_blob.py index 25dc78f..f05422d 100644 --- a/test/unit/test_encrypted_blob.py +++ b/test/unit/test_encrypted_blob.py @@ -1,21 +1,20 @@ -from os import urandom - from pisa import logging from pisa.encrypted_blob import EncryptedBlob +from test.unit.conftest import get_random_value_hex logging.getLogger().disabled = True def test_init_encrypted_blob(): # No much to test here, basically that the object is properly created - data = urandom(64).hex() + data = get_random_value_hex(64) assert (EncryptedBlob(data).data == data) def test_decrypt(): # TODO: The decryption tests are assuming the cipher is AES-GCM-128, since EncryptedBlob assumes the same. Fix this. - key = urandom(32).hex() - encrypted_data = urandom(64).hex() + key = get_random_value_hex(32) + encrypted_data = get_random_value_hex(64) encrypted_blob = EncryptedBlob(encrypted_data) # Trying to decrypt random data (in AES_GCM-128) should result in an InvalidTag exception. Our decrypt function diff --git a/test/unit/test_inspector.py b/test/unit/test_inspector.py index bed6a9a..b393614 100644 --- a/test/unit/test_inspector.py +++ b/test/unit/test_inspector.py @@ -1,33 +1,34 @@ -from os import urandom +from binascii import unhexlify from pisa import logging from pisa.errors import * from pisa.inspector import Inspector from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor +from test.unit.conftest import get_random_value_hex from pisa.conf import MIN_DISPUTE_DELTA, SUPPORTED_CIPHERS, SUPPORTED_HASH_FUNCTIONS inspector = Inspector() APPOINTMENT_OK = (0, None) -NO_HEX_STINGS = ["R" * 64, urandom(31).hex() + "PP", "$"*64, " "*64] -WRONG_TYPES = [[], '', urandom(32).hex(), 3.2, 2.0, (), object, {}, " "*32, object()] -WRONG_TYPES_NO_STR = [[], urandom(32), 3.2, 2.0, (), object, {}, object()] +NO_HEX_STRINGS = ["R" * 64, get_random_value_hex(31) + "PP", "$"*64, " "*64] +WRONG_TYPES = [[], '', get_random_value_hex(32), 3.2, 2.0, (), object, {}, " "*32, object()] +WRONG_TYPES_NO_STR = [[], unhexlify(get_random_value_hex(32)), 3.2, 2.0, (), object, {}, object()] logging.getLogger().disabled = True def test_check_locator(): # Right appointment type, size and format - locator = urandom(32).hex() + locator = get_random_value_hex(32) assert(Inspector.check_locator(locator) == APPOINTMENT_OK) # Wrong size (too big) - locator = urandom(33).hex() + locator = get_random_value_hex(33) assert(Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_SIZE) # Wrong size (too small) - locator = urandom(31).hex() + locator = get_random_value_hex(31) assert(Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_SIZE) # Empty @@ -41,7 +42,7 @@ def test_check_locator(): assert (Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_TYPE) # Wrong format (no hex) - locators = NO_HEX_STINGS + locators = NO_HEX_STRINGS for locator in locators: assert (Inspector.check_locator(locator)[0] == APPOINTMENT_WRONG_FIELD_FORMAT) @@ -122,7 +123,7 @@ def test_check_delta(): def test_check_blob(): # Right format and length - encrypted_blob = urandom(120).hex() + encrypted_blob = get_random_value_hex(120) assert(Inspector.check_blob(encrypted_blob) == APPOINTMENT_OK) # # Wrong content @@ -139,7 +140,7 @@ def test_check_blob(): assert (Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_EMPTY_FIELD) # Wrong format (no hex) - encrypted_blobs = NO_HEX_STINGS + encrypted_blobs = NO_HEX_STRINGS for encrypted_blob in encrypted_blobs: assert (Inspector.check_blob(encrypted_blob)[0] == APPOINTMENT_WRONG_FIELD_FORMAT) @@ -157,7 +158,7 @@ def test_check_cipher(): assert(Inspector.check_cipher(cipher)[0] == APPOINTMENT_WRONG_FIELD_TYPE) # Wrong value - ciphers = NO_HEX_STINGS + ciphers = NO_HEX_STRINGS for cipher in ciphers: assert(Inspector.check_cipher(cipher)[0] == APPOINTMENT_CIPHER_NOT_SUPPORTED) @@ -179,7 +180,7 @@ def test_check_hash_function(): assert (Inspector.check_hash_function(hash_function)[0] == APPOINTMENT_WRONG_FIELD_TYPE) # Wrong value - hash_functions = NO_HEX_STINGS + hash_functions = NO_HEX_STRINGS for hash_function in hash_functions: assert (Inspector.check_hash_function(hash_function)[0] == APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED) @@ -198,11 +199,11 @@ def test_inspect(run_bitcoind): assert (type(appointment) == tuple and appointment[0] != 0) # Valid appointment - locator = urandom(32).hex() + locator = get_random_value_hex(32) start_time = BlockProcessor.get_block_count() + 5 end_time = start_time + 20 dispute_delta = MIN_DISPUTE_DELTA - encrypted_blob = urandom(64).hex() + encrypted_blob = get_random_value_hex(64) cipher = SUPPORTED_CIPHERS[0] hash_function = SUPPORTED_HASH_FUNCTIONS[0] diff --git a/test/unit/test_responder.py b/test/unit/test_responder.py index 72e617e..af1fefd 100644 --- a/test/unit/test_responder.py +++ b/test/unit/test_responder.py @@ -1,6 +1,5 @@ import json import pytest -from os import urandom from uuid import uuid4 from threading import Thread from queue import Queue, Empty @@ -9,8 +8,9 @@ from pisa.tools import check_txid_format from test.simulator.utils import sha256d from pisa.responder import Responder, Job from test.simulator.bitcoind_sim import TX -from test.unit.conftest import generate_block, generate_blocks from pisa.utils.auth_proxy import AuthServiceProxy +from test.unit.conftest import get_random_value_hex +from test.unit.conftest import generate_block, generate_blocks from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT @@ -40,7 +40,7 @@ def create_dummy_job_data(random_txid=False, justice_rawtx=None): justice_txid = sha256d(justice_rawtx) if random_txid is True: - justice_txid = urandom(32).hex() + justice_txid = get_random_value_hex(32) appointment_end = bitcoin_cli.getblockcount() + 2 @@ -214,10 +214,10 @@ def test_do_watch(responder): def test_get_txs_to_rebroadcast(responder): # Let's create a few fake txids and assign at least 6 missing confirmations to each - txs_missing_too_many_conf = {urandom(32).hex(): 6+i for i in range(10)} + txs_missing_too_many_conf = {get_random_value_hex(32): 6+i for i in range(10)} # Let's create some other transaction that has missed some confirmations but not that many - txs_missing_some_conf = {urandom(32).hex(): 3 for _ in range(10)} + txs_missing_some_conf = {get_random_value_hex(32): 3 for _ in range(10)} # All the txs in the first dict should be flagged as to_rebroadcast responder.missed_confirmations = txs_missing_too_many_conf