From 1b229cb44153d70ae07b8c578b10f8f7b2208ae9 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Tue, 8 Oct 2019 14:35:30 +0700 Subject: [PATCH 01/13] Added *.pyc and .cache/ to .gitignore --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index dfdbf33..6560657 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ bitcoin.conf* apps/cli/*.json appointments/ test.py +*.pyc +.cache From ed0cb4f63291b7b23c5a0bf543095fc9854be9d3 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Tue, 8 Oct 2019 16:32:09 +0700 Subject: [PATCH 02/13] Changed log format to JSON; fixed missing return value in get_potential_matches --- pisa/__init__.py | 18 ++++++++++++++++-- pisa/api.py | 5 +++-- pisa/block_processor.py | 22 +++++++++++----------- pisa/carrier.py | 14 +++++++------- pisa/cleaner.py | 11 +++++------ pisa/encrypted_blob.py | 14 +++++++------- pisa/inspector.py | 23 ++++++++--------------- pisa/pisad.py | 8 ++++---- pisa/responder.py | 31 ++++++++++++++----------------- pisa/utils/zmq_subscriber.py | 5 ++--- pisa/watcher.py | 21 ++++++++++----------- 11 files changed, 87 insertions(+), 85 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index a279a5c..7fa2901 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -1,14 +1,28 @@ import logging +import json +import time from pisa.utils.auth_proxy import AuthServiceProxy import pisa.conf as conf - HOST = 'localhost' PORT = 9814 +class StructuredMessage(object): + def __init__(self, message, **kwargs): + self.message = message + self.time = time.asctime() + self.kwargs = kwargs + + def __str__(self): + return json.dumps({ **self.kwargs, "message": self.message, "time": self.time }) + +M = StructuredMessage # to improve readability + +logging.basicConfig(level=logging.INFO, format='%(message)s') + # Configure logging -logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[ +logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ logging.FileHandler(conf.SERVER_LOG_FILE), logging.StreamHandler() ]) diff --git a/pisa/api.py b/pisa/api.py index 44244ee..df98cd0 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,6 +1,7 @@ import json from flask import Flask, request, Response, abort, jsonify +from pisa import HOST, PORT, logging, bitcoin_cli, M from pisa.watcher import Watcher from pisa.inspector import Inspector from pisa import HOST, PORT, logging @@ -20,7 +21,7 @@ def add_appointment(): remote_addr = request.environ.get('REMOTE_ADDR') remote_port = request.environ.get('REMOTE_PORT') - logging.info('[API] connection accepted from {}:{}'.format(remote_addr, remote_port)) + logging.info(M('[API] connection accepted', from_addr_port='{}:{}'.format(remote_addr, remote_port))) # Check content type once if properly defined request_data = json.loads(request.get_json()) @@ -46,7 +47,7 @@ def add_appointment(): rcode = HTTP_BAD_REQUEST response = "appointment rejected. Request does not match the standard" - logging.info('[API] sending response and disconnecting: {} --> {}:{}'.format(response, remote_addr, remote_port)) + logging.info(M('[API] sending response and disconnecting', from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response)) return Response(response, status=rcode, mimetype='text/plain') diff --git a/pisa/block_processor.py b/pisa/block_processor.py index eb09e45..4343bb9 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -1,7 +1,7 @@ import binascii from hashlib import sha256 -from pisa import logging, bitcoin_cli +from pisa import logging, bitcoin_cli, M from pisa.utils.auth_proxy import JSONRPCException @@ -14,7 +14,7 @@ class BlockProcessor: except JSONRPCException as e: block = None - logging.error("[BlockProcessor] couldn't get block from bitcoind. Error code {}".format(e)) + logging.error(M("[BlockProcessor] couldn't get block from bitcoind.", error_code=e)) return block @@ -26,7 +26,7 @@ class BlockProcessor: except JSONRPCException as e: block_hash = None - logging.error("[BlockProcessor] couldn't get block hash. Error code {}".format(e)) + logging.error(M("[BlockProcessor] couldn't get block hash.", error_code=e)) return block_hash @@ -54,10 +54,12 @@ class BlockProcessor: potential_matches = {locator: potential_locators[locator] for locator in intersection} if len(potential_matches) > 0: - logging.info("[BlockProcessor] list of potential matches: {}".format(potential_matches)) + logging.info(M("[BlockProcessor] list of potential matches", potential_matches=potential_matches)) else: - logging.info("[BlockProcessor] no potential matches found") + logging.info(M("[BlockProcessor] no potential matches found")) + + return potential_matches return potential_matches @@ -73,13 +75,12 @@ class BlockProcessor: justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - logging.info("[BlockProcessor] match found for locator {} (uuid: {}): {}".format( - locator, uuid, justice_txid)) + logging.info(M("[BlockProcessor] match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid)) except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple # for the POC - logging.error("[BlockProcessor] can't build transaction from decoded data. Error code {}".format(e)) + logging.error(M("[BlockProcessor] can't build transaction from decoded data.", error_code=e)) return matches @@ -91,7 +92,7 @@ class BlockProcessor: if tx in tx_job_map and tx in unconfirmed_txs: unconfirmed_txs.remove(tx) - logging.info("[Responder] confirmation received for tx {}".format(tx)) + logging.info(M("[Responder] confirmation received for transaction", tx=tx)) elif tx in unconfirmed_txs: if tx in missed_confirmations: @@ -100,8 +101,7 @@ class BlockProcessor: else: missed_confirmations[tx] = 1 - logging.info("[Responder] tx {} missed a confirmation (total missed: {})" - .format(tx, missed_confirmations[tx])) + logging.info(M("[Responder] transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx])) return unconfirmed_txs, missed_confirmations diff --git a/pisa/carrier.py b/pisa/carrier.py index eb0319d..946af01 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -1,5 +1,5 @@ from pisa.rpc_errors import * -from pisa import logging, bitcoin_cli +from pisa import logging, bitcoin_cli, M from pisa.utils.auth_proxy import JSONRPCException from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION @@ -13,7 +13,7 @@ class Carrier: def send_transaction(self, rawtx, txid): try: - logging.info("[Carrier] pushing transaction to the network (txid: {})".format(rawtx)) + logging.info(M("[Carrier] pushing transaction to the network", txid=txid, rawtx=rawtx)) bitcoin_cli.sendrawtransaction(rawtx) receipt = self.Receipt(delivered=True) @@ -32,7 +32,7 @@ class Carrier: receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: - logging.info("[Carrier] {} is already in the blockchain. Getting confirmation count".format(txid)) + logging.info(M("[Carrier] Transaction is already in the blockchain. Getting confirmation count", txid=txid)) # If the transaction is already in the chain, we get the number of confirmations and watch the job # until the end of the appointment @@ -43,13 +43,13 @@ class Carrier: receipt = self.Receipt(delivered=True, confirmations=confirmations) else: - # There's a really unlike edge case where a transaction can be reorged between receiving the + # There's a really unlikely edge case where a transaction can be reorged between receiving the # notification and querying the data. In such a case we just resend self.send_transaction(rawtx, txid) else: # If something else happens (unlikely but possible) log it so we can treat it in future releases - logging.error("[Responder] JSONRPCException. Error {}".format(e)) + logging.error(M("[Responder] JSONRPCException.", error_code=e)) receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) return receipt @@ -66,12 +66,12 @@ class Carrier: # reorged while we were querying bitcoind to get the confirmation count. In such a case we just # restart the job if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - logging.info("[Carrier] transaction {} got reorged before obtaining information".format(txid)) + logging.info(M("[Carrier] transaction got reorged before obtaining information", txid=txid)) # TODO: Check RPC methods to see possible returns and avoid general else # else: # # If something else happens (unlikely but possible) log it so we can treat it in future releases - # logging.error("[Responder] JSONRPCException. Error {}".format(e)) + # logging.error(M("[Responder] JSONRPCException.", error_code=e) return tx_info diff --git a/pisa/cleaner.py b/pisa/cleaner.py index b7c2947..68ad5e2 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -1,4 +1,4 @@ -from pisa import logging +from pisa import logging, M # Dictionaries in Python are "passed-by-reference", so no return is needed for the Cleaner" # https://docs.python.org/3/faq/programming.html#how-do-i-write-a-function-with-output-parameters-call-by-reference @@ -18,14 +18,13 @@ class Cleaner: else: locator_uuid_map[locator].remove(uuid) - logging.info("[Cleaner] end time reached with no match! Deleting appointment {} (uuid: {})".format(locator, - uuid)) + logging.info(M("[Cleaner] end time reached with no match! Deleting appointment.", locator=locator, uuid=uuid)) @staticmethod def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height): for uuid, confirmations in completed_jobs: - logging.info("[Cleaner] job completed (uuid = {}). Appointment ended at block {} after {} confirmations" - .format(uuid, height, confirmations)) + logging.info(M("[Cleaner] job completed. Appointment ended after reaching enough confirmations.", + uuid=uuid, height=height, confirmations=confirmations)) # ToDo: #9-add-data-persistence justice_txid = jobs[uuid].justice_txid @@ -34,7 +33,7 @@ class Cleaner: if len(tx_job_map[justice_txid]) == 1: tx_job_map.pop(justice_txid) - logging.info("[Cleaner] no more jobs for justice_txid {}".format(justice_txid)) + logging.info(M("[Cleaner] no more jobs for justice transaction.", justice_txid=justice_txid)) else: tx_job_map[justice_txid].remove(uuid) diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index ffc3e38..3aeee13 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -1,8 +1,7 @@ from hashlib import sha256 from binascii import unhexlify, hexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM - -from pisa import logging +from pisa import logging, M # FIXME: EncryptedBlob is assuming AES-128-GCM. A cipher field should be part of the object and the decryption should be @@ -23,11 +22,12 @@ class EncryptedBlob: sk = master_key[:16] nonce = master_key[16:] - logging.info("[Watcher] creating new blob") - logging.info("[Watcher] master key: {}".format(hexlify(master_key).decode())) - logging.info("[Watcher] sk: {}".format(hexlify(sk).decode())) - logging.info("[Watcher] nonce: {}".format(hexlify(nonce).decode())) - logging.info("[Watcher] encrypted_blob: {}".format(self.data)) + logging.info(M("[Watcher] creating new blob.", + master_key=hexlify(master_key).decode(), + sk=hexlify(sk).decode(), + nonce=hexlify(sk).decode(), + encrypted_blob=self.data + )) # Decrypt aesgcm = AESGCM(sk) diff --git a/pisa/inspector.py b/pisa/inspector.py index ae9ca89..945afc1 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -2,7 +2,7 @@ import re from pisa import errors import pisa.conf as conf -from pisa import logging +from pisa import logging, bitcoin_cli, M from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor @@ -70,8 +70,7 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong locator format ({})".format(locator) - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -98,8 +97,7 @@ class Inspector: else: message = "start_time is too close to current height" - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -132,8 +130,7 @@ class Inspector: else: message = 'end_time is too close to current height' - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -155,8 +152,7 @@ class Inspector: message = "dispute delta too small. The dispute delta should be at least {} (current: {})".format( conf.MIN_DISPUTE_DELTA, dispute_delta) - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -178,8 +174,7 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong encrypted_blob format ({})".format(encrypted_blob) - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -200,8 +195,7 @@ class Inspector: rcode = errors.APPOINTMENT_CIPHER_NOT_SUPPORTED message = "cipher not supported: {}".format(cipher) - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message @@ -222,7 +216,6 @@ class Inspector: rcode = errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED message = "hash_function not supported {}".format(hash_function) - if message is not None: - logging.error("[Inspector] {}".format(message)) + logging.error(M("[Inspector] {}".format(message))) return rcode, message diff --git a/pisa/pisad.py b/pisa/pisad.py index 8fbce83..f7b3603 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,7 +1,7 @@ from sys import argv from getopt import getopt -from pisa import logging +from pisa import logging, M from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network @@ -19,8 +19,8 @@ if __name__ == '__main__': start_api() else: - logging.error("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " - "Shutting down") + logging.error(M("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " + "Shutting down")) else: - logging.error("[Pisad] can't connect to bitcoind. Shutting down") + logging.error(M("[Pisad] can't connect to bitcoind. Shutting down")) diff --git a/pisa/responder.py b/pisa/responder.py index 08f082e..e44ee61 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -3,9 +3,9 @@ from threading import Thread from hashlib import sha256 from binascii import unhexlify +from pisa import logging, M from pisa.cleaner import Cleaner from pisa.carrier import Carrier -from pisa import logging from pisa.tools import check_tx_in_chain from pisa.block_processor import BlockProcessor from pisa.utils.zmq_subscriber import ZMQHandler @@ -45,7 +45,7 @@ class Responder: def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=False): if self.asleep: - logging.info("[Responder] waking up!") + logging.info(M("[Responder] waking up!")) carrier = Carrier() receipt = carrier.send_transaction(justice_rawtx, justice_txid) @@ -80,8 +80,7 @@ class Responder: if confirmations == 0: self.unconfirmed_txs.append(justice_txid) - logging.info('[Responder] new job added (dispute txid = {}, justice txid = {}, appointment end = {})' - .format(dispute_txid, justice_txid, appointment_end)) + logging.info(M("[Responder] new job added.", dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end)) if self.asleep: self.asleep = False @@ -109,9 +108,7 @@ class Responder: txs = block.get('tx') height = block.get('height') - logging.info("[Responder] new block received {}".format(block_hash)) - logging.info("[Responder] prev. block hash {}".format(block.get('previousblockhash'))) - logging.info("[Responder] list of transactions: {}".format(txs)) + logging.info(M("[Responder] new block received", block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs)) # ToDo: #9-add-data-persistence # change prev_block_hash condition @@ -125,8 +122,8 @@ class Responder: self.rebroadcast(txs_to_rebroadcast) else: - logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}" - .format(prev_block_hash, block.get('previousblockhash'))) + logging.warning(M("[Responder] reorg found!", + local_prev_block_hash=prev_block_hash, remote_prev_block_hash=block.get('previousblockhash'))) self.handle_reorgs() @@ -136,7 +133,7 @@ class Responder: self.asleep = True self.zmq_subscriber.terminate = True - logging.info("[Responder] no more pending jobs, going back to sleep") + logging.info(M("[Responder] no more pending jobs, going back to sleep")) def get_txs_to_rebroadcast(self, txs): txs_to_rebroadcast = [] @@ -172,8 +169,8 @@ class Responder: self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) - logging.warning("[Responder] tx {} has missed {} confirmations. Rebroadcasting" - .format(self.jobs[uuid].justice_txid, CONFIRMATIONS_BEFORE_RETRY)) + logging.warning(M("[Responder] Transaction has missed many confirmations. Rebroadcasting.", + justice_txid=self.jobs[uuid].justice_txid, confirmations_missed=CONFIRMATIONS_BEFORE_RETRY)) # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): @@ -189,8 +186,8 @@ class Responder: # If both transactions are there, we only need to update the justice tx confirmation count if justice_in_chain: - logging.info("[Responder] updating confirmation count for {}: prev. {}, current {}".format( - job.justice_txid, job.confirmations, justice_confirmations)) + logging.info(M("[Responder] updating confirmation count for transaction.", + justice_txid=job.justice_txid, prev_count=job.confirmations, curr_count=justice_confirmations)) job.confirmations = justice_confirmations @@ -203,7 +200,7 @@ class Responder: else: # ToDo: #24-properly-handle-reorgs - # FIXME: if the dispute is not on chain (either in mempool or not there al all), we need to call the + # FIXME: if the dispute is not on chain (either in mempool or not there at all), we need to call the # reorg manager - logging.warning("[Responder] dispute and justice transaction missing. Calling the reorg manager") - logging.error("[Responder] reorg manager not yet implemented") + logging.warning(M("[Responder] dispute and justice transaction missing. Calling the reorg manager")) + logging.error(M("[Responder] reorg manager not yet implemented")) diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index 75e175d..0545d9c 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -1,7 +1,6 @@ import zmq import binascii - -from pisa import logging +from pisa import logging, M from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT @@ -30,4 +29,4 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - logging.info("[ZMQHandler-{}] new block received via ZMQ".format(self.parent, block_hash)) + logging.info(M("[ZMQHandler-{}] new block received via ZMQ".format(self.parent), block_hash=block_hash)) diff --git a/pisa/watcher.py b/pisa/watcher.py index ab8da44..a70bb43 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -2,7 +2,7 @@ from uuid import uuid4 from queue import Queue from threading import Thread -from pisa import logging +from pisa import logging, M from pisa.cleaner import Cleaner from pisa.conf import EXPIRY_DELTA from pisa.responder import Responder @@ -52,35 +52,34 @@ class Watcher: zmq_thread.start() watcher.start() - logging.info("[Watcher] waking up!") + logging.info(M("[Watcher] waking up!")) appointment_added = True - logging.info('[Watcher] new appointment accepted (locator = {})'.format(appointment.locator)) + logging.info(M("[Watcher] new appointment accepted.", locator=appointment.locator)) else: appointment_added = False - logging.info('[Watcher] maximum appointments reached, appointment rejected (locator = {})'.format( - appointment.locator)) + logging.info(M("[Watcher] maximum appointments reached, appointment rejected.", locator=appointment.locator)) return appointment_added def do_subscribe(self, block_queue): - self.zmq_subscriber = ZMQHandler(parent='Watcher') + self.zmq_subscriber = ZMQHandler(parent="Watcher") self.zmq_subscriber.handle(block_queue) def do_watch(self): while len(self.appointments) > 0: block_hash = self.block_queue.get() - logging.info("[Watcher] new block received {}".format(block_hash)) + logging.info(M("[Watcher] new block received", block_hash=block_hash)) block = BlockProcessor.get_block(block_hash) if block is not None: txids = block.get('tx') - logging.info("[Watcher] list of transactions: {}".format(txids)) + logging.info(M("[Watcher] list of transactions.", txids=txids)) expired_appointments = [uuid for uuid, appointment in self.appointments.items() if block["height"] > appointment.end_time + EXPIRY_DELTA] @@ -91,8 +90,8 @@ class Watcher: matches = BlockProcessor.get_matches(potential_matches, self.locator_uuid_map, self.appointments) for locator, uuid, dispute_txid, justice_txid, justice_rawtx in matches: - logging.info("[Watcher] notifying responder about {} and deleting appointment {} (uuid: {})" - .format(justice_txid, locator, uuid)) + logging.info(M("[Watcher] notifying responder and deleting appointment.", + justice_txid=justice_txid, locator=locator, uuid=uuid)) self.responder.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, self.appointments[uuid].end_time) @@ -113,4 +112,4 @@ class Watcher: self.asleep = True self.zmq_subscriber.terminate = True - logging.error("[Watcher] no more pending appointments, going back to sleep") + logging.error(M("[Watcher] no more pending appointments, going back to sleep")) From 2a5dd48950914daa950f26a07cd538b99a4a532d Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Tue, 8 Oct 2019 18:21:52 +0700 Subject: [PATCH 03/13] PEP8 linting --- pisa/__init__.py | 4 ++-- pisa/api.py | 3 ++- pisa/block_processor.py | 6 ++++-- pisa/carrier.py | 3 ++- pisa/cleaner.py | 5 +++-- pisa/encrypted_blob.py | 9 ++++----- pisa/responder.py | 16 +++++++++++----- pisa/utils/zmq_subscriber.py | 3 ++- pisa/watcher.py | 3 ++- 9 files changed, 32 insertions(+), 20 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index 7fa2901..9ba1a28 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -8,6 +8,7 @@ import pisa.conf as conf HOST = 'localhost' PORT = 9814 + class StructuredMessage(object): def __init__(self, message, **kwargs): self.message = message @@ -15,7 +16,7 @@ class StructuredMessage(object): self.kwargs = kwargs def __str__(self): - return json.dumps({ **self.kwargs, "message": self.message, "time": self.time }) + return json.dumps({**self.kwargs, "message": self.message, "time": self.time}) M = StructuredMessage # to improve readability @@ -31,4 +32,3 @@ logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ # TODO: Check if a long lived connection like this may create problems (timeouts) bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, conf.BTC_RPC_PORT)) - diff --git a/pisa/api.py b/pisa/api.py index df98cd0..b437d80 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -47,7 +47,8 @@ def add_appointment(): rcode = HTTP_BAD_REQUEST response = "appointment rejected. Request does not match the standard" - logging.info(M('[API] sending response and disconnecting', from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response)) + logging.info(M('[API] sending response and disconnecting', + from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response)) return Response(response, status=rcode, mimetype='text/plain') diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 4343bb9..08bd1c9 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -75,7 +75,8 @@ class BlockProcessor: justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - logging.info(M("[BlockProcessor] match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid)) + logging.info(M("[BlockProcessor] match found for locator.", + locator=locator, uuid=uuid, justice_txid=justice_txid)) except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple @@ -101,7 +102,8 @@ class BlockProcessor: else: missed_confirmations[tx] = 1 - logging.info(M("[Responder] transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx])) + logging.info(M("[Responder] transaction missed a confirmation", + tx=tx, missed_confirmations=missed_confirmations[tx])) return unconfirmed_txs, missed_confirmations diff --git a/pisa/carrier.py b/pisa/carrier.py index 946af01..6efa58f 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -32,7 +32,8 @@ class Carrier: receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: - logging.info(M("[Carrier] Transaction is already in the blockchain. Getting confirmation count", txid=txid)) + logging.info(M("[Carrier] Transaction is already in the blockchain. Getting confirmation count", + txid=txid)) # If the transaction is already in the chain, we get the number of confirmations and watch the job # until the end of the appointment diff --git a/pisa/cleaner.py b/pisa/cleaner.py index 68ad5e2..f9d748b 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -18,13 +18,14 @@ class Cleaner: else: locator_uuid_map[locator].remove(uuid) - logging.info(M("[Cleaner] end time reached with no match! Deleting appointment.", locator=locator, uuid=uuid)) + logging.info(M("[Cleaner] end time reached with no match! Deleting appointment.", + locator=locator, uuid=uuid)) @staticmethod def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height): for uuid, confirmations in completed_jobs: logging.info(M("[Cleaner] job completed. Appointment ended after reaching enough confirmations.", - uuid=uuid, height=height, confirmations=confirmations)) + uuid=uuid, height=height, confirmations=confirmations)) # ToDo: #9-add-data-persistence justice_txid = jobs[uuid].justice_txid diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index 3aeee13..1dcf644 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -23,11 +23,10 @@ class EncryptedBlob: nonce = master_key[16:] logging.info(M("[Watcher] creating new blob.", - master_key=hexlify(master_key).decode(), - sk=hexlify(sk).decode(), - nonce=hexlify(sk).decode(), - encrypted_blob=self.data - )) + master_key=hexlify(master_key).decode(), + sk=hexlify(sk).decode(), + nonce=hexlify(sk).decode(), + encrypted_blob=self.data)) # Decrypt aesgcm = AESGCM(sk) diff --git a/pisa/responder.py b/pisa/responder.py index e44ee61..ae1859e 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -80,7 +80,8 @@ class Responder: if confirmations == 0: self.unconfirmed_txs.append(justice_txid) - logging.info(M("[Responder] new job added.", dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end)) + logging.info(M("[Responder] new job added.", + dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end)) if self.asleep: self.asleep = False @@ -108,7 +109,8 @@ class Responder: txs = block.get('tx') height = block.get('height') - logging.info(M("[Responder] new block received", block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs)) + logging.info(M("[Responder] new block received", + block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs)) # ToDo: #9-add-data-persistence # change prev_block_hash condition @@ -123,7 +125,8 @@ class Responder: else: logging.warning(M("[Responder] reorg found!", - local_prev_block_hash=prev_block_hash, remote_prev_block_hash=block.get('previousblockhash'))) + local_prev_block_hash=prev_block_hash, + remote_prev_block_hash=block.get('previousblockhash'))) self.handle_reorgs() @@ -170,7 +173,8 @@ class Responder: self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) logging.warning(M("[Responder] Transaction has missed many confirmations. Rebroadcasting.", - justice_txid=self.jobs[uuid].justice_txid, confirmations_missed=CONFIRMATIONS_BEFORE_RETRY)) + justice_txid=self.jobs[uuid].justice_txid, + confirmations_missed=CONFIRMATIONS_BEFORE_RETRY)) # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): @@ -187,7 +191,9 @@ class Responder: # If both transactions are there, we only need to update the justice tx confirmation count if justice_in_chain: logging.info(M("[Responder] updating confirmation count for transaction.", - justice_txid=job.justice_txid, prev_count=job.confirmations, curr_count=justice_confirmations)) + justice_txid=job.justice_txid, + prev_count=job.confirmations, + curr_count=justice_confirmations)) job.confirmations = justice_confirmations diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index 0545d9c..4fbc63b 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -29,4 +29,5 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - logging.info(M("[ZMQHandler-{}] new block received via ZMQ".format(self.parent), block_hash=block_hash)) + logging.info(M("[ZMQHandler-{}] new block received via ZMQ".format(self.parent), + block_hash=block_hash)) diff --git a/pisa/watcher.py b/pisa/watcher.py index a70bb43..3e62f06 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -61,7 +61,8 @@ class Watcher: else: appointment_added = False - logging.info(M("[Watcher] maximum appointments reached, appointment rejected.", locator=appointment.locator)) + logging.info(M("[Watcher] maximum appointments reached, appointment rejected.", + locator=appointment.locator)) return appointment_added From 7f9c7d8609d423ac25755d6f898bc0e52707d417 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Tue, 8 Oct 2019 18:23:31 +0700 Subject: [PATCH 04/13] Removed double initialization of logging --- pisa/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index 9ba1a28..0d58d71 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -20,8 +20,6 @@ class StructuredMessage(object): M = StructuredMessage # to improve readability -logging.basicConfig(level=logging.INFO, format='%(message)s') - # Configure logging logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ logging.FileHandler(conf.SERVER_LOG_FILE), From bae9b6b9133608eabb15ca033aeb7a83c147ae61 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Tue, 8 Oct 2019 19:08:12 +0700 Subject: [PATCH 05/13] Added Logger class; refactored logging accordingly --- pisa/__init__.py | 17 ++++++++++++++- pisa/api.py | 10 +++++---- pisa/block_processor.py | 25 +++++++++++----------- pisa/carrier.py | 16 +++++++-------- pisa/cleaner.py | 13 ++++++------ pisa/encrypted_blob.py | 14 +++++++------ pisa/inspector.py | 18 ++++++++-------- pisa/pisad.py | 9 ++++---- pisa/responder.py | 40 +++++++++++++++++++----------------- pisa/utils/zmq_subscriber.py | 9 +++++--- pisa/watcher.py | 20 +++++++++--------- 11 files changed, 109 insertions(+), 82 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index 0d58d71..77ae6b2 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -18,7 +18,22 @@ class StructuredMessage(object): def __str__(self): return json.dumps({**self.kwargs, "message": self.message, "time": self.time}) -M = StructuredMessage # to improve readability + +class Logger(object): + def __init__(self, actor=None): + self.actor = actor + + def _add_prefix(self, msg): + return msg if self.actor is None else "[{}] {}".format(self.actor, msg) + + def info(msg, **kwargs): + logging.info(StructuredMessage(self._add_prefix(msg), **kwargs)) + + def debug(msg, **kwargs): + logging.debug(StructuredMessage(self._add_prefix(msg), **kwargs)) + + def error(msg, **kwargs): + logging.error(StructuredMessage(self._add_prefix(msg), **kwargs)) # Configure logging logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ diff --git a/pisa/api.py b/pisa/api.py index b437d80..301dfa5 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,7 +1,7 @@ import json from flask import Flask, request, Response, abort, jsonify -from pisa import HOST, PORT, logging, bitcoin_cli, M +from pisa import HOST, PORT, logging, bitcoin_cli, Logger from pisa.watcher import Watcher from pisa.inspector import Inspector from pisa import HOST, PORT, logging @@ -15,13 +15,15 @@ HTTP_OK = 200 HTTP_BAD_REQUEST = 400 HTTP_SERVICE_UNAVAILABLE = 503 +logger = Logger("API") + @app.route('/', methods=['POST']) def add_appointment(): remote_addr = request.environ.get('REMOTE_ADDR') remote_port = request.environ.get('REMOTE_PORT') - logging.info(M('[API] connection accepted', from_addr_port='{}:{}'.format(remote_addr, remote_port))) + logger.info('connection accepted', from_addr_port='{}:{}'.format(remote_addr, remote_port)) # Check content type once if properly defined request_data = json.loads(request.get_json()) @@ -47,8 +49,8 @@ def add_appointment(): rcode = HTTP_BAD_REQUEST response = "appointment rejected. Request does not match the standard" - logging.info(M('[API] sending response and disconnecting', - from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response)) + logger.info('sending response and disconnecting', + from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response) return Response(response, status=rcode, mimetype='text/plain') diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 08bd1c9..ae9530b 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -1,9 +1,11 @@ import binascii from hashlib import sha256 -from pisa import logging, bitcoin_cli, M +from pisa import bitcoin_cli, Logger from pisa.utils.auth_proxy import JSONRPCException +logger = Logger("BlockProcessor") + class BlockProcessor: @staticmethod @@ -14,7 +16,7 @@ class BlockProcessor: except JSONRPCException as e: block = None - logging.error(M("[BlockProcessor] couldn't get block from bitcoind.", error_code=e)) + logger.error("couldn't get block from bitcoind.", error_code=e) return block @@ -26,7 +28,7 @@ class BlockProcessor: except JSONRPCException as e: block_hash = None - logging.error(M("[BlockProcessor] couldn't get block hash.", error_code=e)) + logger.error("couldn't get block hash.", error_code=e) return block_hash @@ -38,7 +40,7 @@ class BlockProcessor: except JSONRPCException as e: block_count = None - logging.error("[BlockProcessor] couldn't get block block count. Error code {}".format(e)) + logger.error("couldn't get block block count", error_code=e) return block_count @@ -54,10 +56,10 @@ class BlockProcessor: potential_matches = {locator: potential_locators[locator] for locator in intersection} if len(potential_matches) > 0: - logging.info(M("[BlockProcessor] list of potential matches", potential_matches=potential_matches)) + logger.info("list of potential matches", potential_matches=potential_matches) else: - logging.info(M("[BlockProcessor] no potential matches found")) + logger.info("no potential matches found") return potential_matches @@ -75,13 +77,12 @@ class BlockProcessor: justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - logging.info(M("[BlockProcessor] match found for locator.", - locator=locator, uuid=uuid, justice_txid=justice_txid)) + logger.info("match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid) except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple # for the POC - logging.error(M("[BlockProcessor] can't build transaction from decoded data.", error_code=e)) + logger.error("can't build transaction from decoded data.", error_code=e) return matches @@ -93,7 +94,7 @@ class BlockProcessor: if tx in tx_job_map and tx in unconfirmed_txs: unconfirmed_txs.remove(tx) - logging.info(M("[Responder] confirmation received for transaction", tx=tx)) + logger.info("confirmation received for transaction", tx=tx) elif tx in unconfirmed_txs: if tx in missed_confirmations: @@ -102,8 +103,6 @@ class BlockProcessor: else: missed_confirmations[tx] = 1 - logging.info(M("[Responder] transaction missed a confirmation", - tx=tx, missed_confirmations=missed_confirmations[tx])) + logger.info("transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx]) return unconfirmed_txs, missed_confirmations - diff --git a/pisa/carrier.py b/pisa/carrier.py index 6efa58f..a82fb3f 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -1,8 +1,10 @@ from pisa.rpc_errors import * -from pisa import logging, bitcoin_cli, M +from pisa import bitcoin_cli, Logger from pisa.utils.auth_proxy import JSONRPCException from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION +logger = Logger("Carrier") + class Carrier: class Receipt: @@ -13,7 +15,7 @@ class Carrier: def send_transaction(self, rawtx, txid): try: - logging.info(M("[Carrier] pushing transaction to the network", txid=txid, rawtx=rawtx)) + logger.info("pushing transaction to the network", txid=txid, rawtx=rawtx) bitcoin_cli.sendrawtransaction(rawtx) receipt = self.Receipt(delivered=True) @@ -32,8 +34,7 @@ class Carrier: receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) elif errno == RPC_VERIFY_ALREADY_IN_CHAIN: - logging.info(M("[Carrier] Transaction is already in the blockchain. Getting confirmation count", - txid=txid)) + logger.info("Transaction is already in the blockchain. Getting confirmation count", txid=txid) # If the transaction is already in the chain, we get the number of confirmations and watch the job # until the end of the appointment @@ -50,7 +51,7 @@ class Carrier: else: # If something else happens (unlikely but possible) log it so we can treat it in future releases - logging.error(M("[Responder] JSONRPCException.", error_code=e)) + logger.error("JSONRPCException.", error_code=e) receipt = self.Receipt(delivered=False, reason=UNKNOWN_JSON_RPC_EXCEPTION) return receipt @@ -67,12 +68,11 @@ class Carrier: # reorged while we were querying bitcoind to get the confirmation count. In such a case we just # restart the job if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - logging.info(M("[Carrier] transaction got reorged before obtaining information", txid=txid)) + logger.info("transaction got reorged before obtaining information", txid=txid) # TODO: Check RPC methods to see possible returns and avoid general else # else: # # If something else happens (unlikely but possible) log it so we can treat it in future releases - # logging.error(M("[Responder] JSONRPCException.", error_code=e) + # logger.error("JSONRPCException.", error_code=e) return tx_info - diff --git a/pisa/cleaner.py b/pisa/cleaner.py index f9d748b..39af1d5 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -1,4 +1,6 @@ -from pisa import logging, M +from pisa import Logger + +logger = Logger("Cleaner") # Dictionaries in Python are "passed-by-reference", so no return is needed for the Cleaner" # https://docs.python.org/3/faq/programming.html#how-do-i-write-a-function-with-output-parameters-call-by-reference @@ -18,14 +20,13 @@ class Cleaner: else: locator_uuid_map[locator].remove(uuid) - logging.info(M("[Cleaner] end time reached with no match! Deleting appointment.", - locator=locator, uuid=uuid)) + logger.info("end time reached with no match! Deleting appointment.", locator=locator, uuid=uuid) @staticmethod def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height): for uuid, confirmations in completed_jobs: - logging.info(M("[Cleaner] job completed. Appointment ended after reaching enough confirmations.", - uuid=uuid, height=height, confirmations=confirmations)) + logger.info("job completed. Appointment ended after reaching enough confirmations.", + uuid=uuid, height=height, confirmations=confirmations) # ToDo: #9-add-data-persistence justice_txid = jobs[uuid].justice_txid @@ -34,7 +35,7 @@ class Cleaner: if len(tx_job_map[justice_txid]) == 1: tx_job_map.pop(justice_txid) - logging.info(M("[Cleaner] no more jobs for justice transaction.", justice_txid=justice_txid)) + logger.info("no more jobs for justice transaction.", justice_txid=justice_txid) else: tx_job_map[justice_txid].remove(uuid) diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index 1dcf644..a772241 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -1,7 +1,9 @@ from hashlib import sha256 from binascii import unhexlify, hexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM -from pisa import logging, M +from pisa import Logger + +logger = Logger("Watcher") # FIXME: EncryptedBlob is assuming AES-128-GCM. A cipher field should be part of the object and the decryption should be @@ -22,11 +24,11 @@ class EncryptedBlob: sk = master_key[:16] nonce = master_key[16:] - logging.info(M("[Watcher] creating new blob.", - master_key=hexlify(master_key).decode(), - sk=hexlify(sk).decode(), - nonce=hexlify(sk).decode(), - encrypted_blob=self.data)) + logger.info("[Watcher] creating new blob.", + master_key=hexlify(master_key).decode(), + sk=hexlify(sk).decode(), + nonce=hexlify(sk).decode(), + encrypted_blob=self.data) # Decrypt aesgcm = AESGCM(sk) diff --git a/pisa/inspector.py b/pisa/inspector.py index 945afc1..0bffa75 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -2,10 +2,12 @@ import re from pisa import errors import pisa.conf as conf -from pisa import logging, bitcoin_cli, M +from pisa import bitcoin_cli, Logger from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor +logger = Logger("Inspector") + # FIXME: The inspector logs the wrong messages sent form the users. A possible attack surface would be to send a really # long field that, even if not accepted by PISA, would be stored in the logs. This is a possible DoS surface # since pisa would store any kind of message (no matter the length). Solution: truncate the length of the fields @@ -70,7 +72,7 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong locator format ({})".format(locator) - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -97,7 +99,7 @@ class Inspector: else: message = "start_time is too close to current height" - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -130,7 +132,7 @@ class Inspector: else: message = 'end_time is too close to current height' - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -152,7 +154,7 @@ class Inspector: message = "dispute delta too small. The dispute delta should be at least {} (current: {})".format( conf.MIN_DISPUTE_DELTA, dispute_delta) - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -174,7 +176,7 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong encrypted_blob format ({})".format(encrypted_blob) - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -195,7 +197,7 @@ class Inspector: rcode = errors.APPOINTMENT_CIPHER_NOT_SUPPORTED message = "cipher not supported: {}".format(cipher) - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message @@ -216,6 +218,6 @@ class Inspector: rcode = errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED message = "hash_function not supported {}".format(hash_function) - logging.error(M("[Inspector] {}".format(message))) + logger.error(message) return rcode, message diff --git a/pisa/pisad.py b/pisa/pisad.py index f7b3603..c769201 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,10 +1,11 @@ from sys import argv from getopt import getopt -from pisa import logging, M +from pisa import logging, Logger from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network +logger = Logger("Pisad") if __name__ == '__main__': debug = False @@ -19,8 +20,8 @@ if __name__ == '__main__': start_api() else: - logging.error(M("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " - "Shutting down")) + logger.error("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " + "Shutting down") else: - logging.error(M("[Pisad] can't connect to bitcoind. Shutting down")) + logging.error("[Pisad] can't connect to bitcoind. Shutting down") diff --git a/pisa/responder.py b/pisa/responder.py index ae1859e..6e76904 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -3,7 +3,7 @@ from threading import Thread from hashlib import sha256 from binascii import unhexlify -from pisa import logging, M +from pisa import Logger from pisa.cleaner import Cleaner from pisa.carrier import Carrier from pisa.tools import check_tx_in_chain @@ -13,6 +13,8 @@ from pisa.utils.zmq_subscriber import ZMQHandler CONFIRMATIONS_BEFORE_RETRY = 6 MIN_CONFIRMATIONS = 6 +logger = Logger("Responder") + class Job: def __init__(self, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry_counter=0): @@ -45,7 +47,7 @@ class Responder: def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=False): if self.asleep: - logging.info(M("[Responder] waking up!")) + logger.info("waking up!") carrier = Carrier() receipt = carrier.send_transaction(justice_rawtx, justice_txid) @@ -80,8 +82,8 @@ class Responder: if confirmations == 0: self.unconfirmed_txs.append(justice_txid) - logging.info(M("[Responder] new job added.", - dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end)) + logger.info("new job added.", + dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end) if self.asleep: self.asleep = False @@ -109,8 +111,8 @@ class Responder: txs = block.get('tx') height = block.get('height') - logging.info(M("[Responder] new block received", - block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs)) + logger.info("new block received", + block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs) # ToDo: #9-add-data-persistence # change prev_block_hash condition @@ -124,9 +126,9 @@ class Responder: self.rebroadcast(txs_to_rebroadcast) else: - logging.warning(M("[Responder] reorg found!", - local_prev_block_hash=prev_block_hash, - remote_prev_block_hash=block.get('previousblockhash'))) + logger.warning("reorg found!", + local_prev_block_hash=prev_block_hash, + remote_prev_block_hash=block.get('previousblockhash')) self.handle_reorgs() @@ -136,7 +138,7 @@ class Responder: self.asleep = True self.zmq_subscriber.terminate = True - logging.info(M("[Responder] no more pending jobs, going back to sleep")) + logger.info("no more pending jobs, going back to sleep") def get_txs_to_rebroadcast(self, txs): txs_to_rebroadcast = [] @@ -172,9 +174,9 @@ class Responder: self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) - logging.warning(M("[Responder] Transaction has missed many confirmations. Rebroadcasting.", - justice_txid=self.jobs[uuid].justice_txid, - confirmations_missed=CONFIRMATIONS_BEFORE_RETRY)) + logger.warning("Transaction has missed many confirmations. Rebroadcasting.", + justice_txid=self.jobs[uuid].justice_txid, + confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): @@ -190,10 +192,10 @@ class Responder: # If both transactions are there, we only need to update the justice tx confirmation count if justice_in_chain: - logging.info(M("[Responder] updating confirmation count for transaction.", - justice_txid=job.justice_txid, - prev_count=job.confirmations, - curr_count=justice_confirmations)) + logger.info("updating confirmation count for transaction.", + justice_txid=job.justice_txid, + prev_count=job.confirmations, + curr_count=justice_confirmations) job.confirmations = justice_confirmations @@ -208,5 +210,5 @@ class Responder: # ToDo: #24-properly-handle-reorgs # FIXME: if the dispute is not on chain (either in mempool or not there at all), we need to call the # reorg manager - logging.warning(M("[Responder] dispute and justice transaction missing. Calling the reorg manager")) - logging.error(M("[Responder] reorg manager not yet implemented")) + logger.warning("dispute and justice transaction missing. Calling the reorg manager") + logger.error("reorg manager not yet implemented") diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index 4fbc63b..d28ae64 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -1,8 +1,10 @@ import zmq import binascii -from pisa import logging, M +from pisa import Logger from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT +logger = Logger("ZMQHandler") + # ToDo: #7-add-async-back-to-zmq class ZMQHandler: @@ -29,5 +31,6 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - logging.info(M("[ZMQHandler-{}] new block received via ZMQ".format(self.parent), - block_hash=block_hash)) + logger.info("new block received via ZMQ", + parent=self.parent, + block_hash=block_hash) diff --git a/pisa/watcher.py b/pisa/watcher.py index 3e62f06..8dbaa1c 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -2,7 +2,7 @@ from uuid import uuid4 from queue import Queue from threading import Thread -from pisa import logging, M +from pisa import Logger from pisa.cleaner import Cleaner from pisa.conf import EXPIRY_DELTA from pisa.responder import Responder @@ -10,6 +10,7 @@ from pisa.conf import MAX_APPOINTMENTS from pisa.block_processor import BlockProcessor from pisa.utils.zmq_subscriber import ZMQHandler +logging = Logger("Watcher") class Watcher: def __init__(self, max_appointments=MAX_APPOINTMENTS): @@ -52,17 +53,16 @@ class Watcher: zmq_thread.start() watcher.start() - logging.info(M("[Watcher] waking up!")) + logger.info("waking up!") appointment_added = True - logging.info(M("[Watcher] new appointment accepted.", locator=appointment.locator)) + logger.info("new appointment accepted.", locator=appointment.locator) else: appointment_added = False - logging.info(M("[Watcher] maximum appointments reached, appointment rejected.", - locator=appointment.locator)) + logger.info("maximum appointments reached, appointment rejected.", locator=appointment.locator) return appointment_added @@ -73,14 +73,14 @@ class Watcher: def do_watch(self): while len(self.appointments) > 0: block_hash = self.block_queue.get() - logging.info(M("[Watcher] new block received", block_hash=block_hash)) + logger.info("new block received", block_hash=block_hash) block = BlockProcessor.get_block(block_hash) if block is not None: txids = block.get('tx') - logging.info(M("[Watcher] list of transactions.", txids=txids)) + logger.info("list of transactions.", txids=txids) expired_appointments = [uuid for uuid, appointment in self.appointments.items() if block["height"] > appointment.end_time + EXPIRY_DELTA] @@ -91,8 +91,8 @@ class Watcher: matches = BlockProcessor.get_matches(potential_matches, self.locator_uuid_map, self.appointments) for locator, uuid, dispute_txid, justice_txid, justice_rawtx in matches: - logging.info(M("[Watcher] notifying responder and deleting appointment.", - justice_txid=justice_txid, locator=locator, uuid=uuid)) + logger.info("notifying responder and deleting appointment.", + justice_txid=justice_txid, locator=locator, uuid=uuid) self.responder.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, self.appointments[uuid].end_time) @@ -113,4 +113,4 @@ class Watcher: self.asleep = True self.zmq_subscriber.terminate = True - logging.error(M("[Watcher] no more pending appointments, going back to sleep")) + logger.error("no more pending appointments, going back to sleep") From 4bcc8e20a0c0af38030fae153e2554e881bfa6d9 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Wed, 9 Oct 2019 08:29:47 +0700 Subject: [PATCH 06/13] Refactored check_tx_in_chain with new log format --- pisa/pisad.py | 5 ++--- pisa/responder.py | 4 ++-- pisa/tools.py | 12 ++++++------ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/pisa/pisad.py b/pisa/pisad.py index c769201..406df70 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -20,8 +20,7 @@ if __name__ == '__main__': start_api() else: - logger.error("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. " - "Shutting down") + logger.error("bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down") else: - logging.error("[Pisad] can't connect to bitcoind. Shutting down") + logging.error("can't connect to bitcoind. Shutting down") diff --git a/pisa/responder.py b/pisa/responder.py index 6e76904..6855e5c 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -183,11 +183,11 @@ class Responder: for uuid, job in self.jobs.items(): # First we check if the dispute transaction is still in the blockchain. If not, the justice can not be # there either, so we'll need to call the reorg manager straight away - dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, parent='Responder', tx_label='dispute tx') + dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, logger=logger, tx_label='dispute tx') # If the dispute is there, we can check the justice tx if dispute_in_chain: - justice_in_chain, justice_confirmations = check_tx_in_chain(job.justice_txid, parent='Responder', + justice_in_chain, justice_confirmations = check_tx_in_chain(job.justice_txid, logger=logger, tx_label='justice tx') # If both transactions are there, we only need to update the justice tx confirmation count diff --git a/pisa/tools.py b/pisa/tools.py index 909c64c..a5fbe9c 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -2,12 +2,12 @@ import re from http.client import HTTPException import pisa.conf as conf -from pisa import logging, bitcoin_cli +from pisa import bitcoin_cli, Logger from pisa.utils.auth_proxy import JSONRPCException from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY -def check_tx_in_chain(tx_id, parent='', tx_label='transaction'): +def check_tx_in_chain(tx_id, logger=Logger(), tx_label='transaction'): tx_in_chain = False confirmations = 0 @@ -17,18 +17,18 @@ def check_tx_in_chain(tx_id, parent='', tx_label='transaction'): if tx_info.get("confirmations"): confirmations = int(tx_info.get("confirmations")) tx_in_chain = True - logging.error("[{}] {} found in the blockchain (txid: {}) ".format(parent, tx_label, tx_id)) + logger.error("{} found in the blockchain (txid: {}) ".format(tx_label), txid=tx_id) else: - logging.error("[{}] {} found in mempool (txid: {}) ".format(parent, tx_label, tx_id)) + logger.error("{} found in mempool (txid: {}) ".format(tx_label), txid=tx_id) except JSONRPCException as e: if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - logging.error("[{}] {} not found in mempool nor blockchain (txid: {}) ".format(parent, tx_label, tx_id)) + logger.error("{} not found in mempool nor blockchain (txid: {}) ".format(tx_label), txid=tx_id) else: # ToDO: Unhandled errors, check this properly - logging.error("[{}] JSONRPCException. Error code {}".format(parent, e)) + logger.error("JSONRPCException.", error_code=e) return tx_in_chain, confirmations From a53e7a82e38b3a2288f8daccc960022c727e4ef9 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Wed, 9 Oct 2019 09:16:35 +0700 Subject: [PATCH 07/13] Added 'actor' among the fields of the structured log messages; minor other fixes --- pisa/__init__.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pisa/__init__.py b/pisa/__init__.py index 77ae6b2..2dcde22 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -26,14 +26,14 @@ class Logger(object): def _add_prefix(self, msg): return msg if self.actor is None else "[{}] {}".format(self.actor, msg) - def info(msg, **kwargs): - logging.info(StructuredMessage(self._add_prefix(msg), **kwargs)) + def info(self, msg, **kwargs): + logging.info(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - def debug(msg, **kwargs): - logging.debug(StructuredMessage(self._add_prefix(msg), **kwargs)) + def debug(self, msg, **kwargs): + logging.debug(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - def error(msg, **kwargs): - logging.error(StructuredMessage(self._add_prefix(msg), **kwargs)) + def error(self, msg, **kwargs): + logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) # Configure logging logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ From c524319027d5ae2b2df99f4c90a2f76aaf8f39aa Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Wed, 9 Oct 2019 09:30:32 +0700 Subject: [PATCH 08/13] Several fixes and improvements --- pisa/block_processor.py | 2 -- pisa/inspector.py | 21 ++++++++++++++------- pisa/logger.py | 30 ++++++++++++++++++++++++++++++ pisa/pisad.py | 2 +- pisa/tools.py | 6 +++--- pisa/utils/zmq_subscriber.py | 9 +++------ pisa/watcher.py | 2 +- 7 files changed, 52 insertions(+), 20 deletions(-) create mode 100644 pisa/logger.py diff --git a/pisa/block_processor.py b/pisa/block_processor.py index ae9530b..7c544ea 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -63,8 +63,6 @@ class BlockProcessor: return potential_matches - return potential_matches - @staticmethod def get_matches(potential_matches, locator_uuid_map, appointments): matches = [] diff --git a/pisa/inspector.py b/pisa/inspector.py index 0bffa75..ecbf688 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -72,7 +72,8 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong locator format ({})".format(locator) - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -99,7 +100,8 @@ class Inspector: else: message = "start_time is too close to current height" - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -132,7 +134,8 @@ class Inspector: else: message = 'end_time is too close to current height' - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -154,7 +157,8 @@ class Inspector: message = "dispute delta too small. The dispute delta should be at least {} (current: {})".format( conf.MIN_DISPUTE_DELTA, dispute_delta) - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -176,7 +180,8 @@ class Inspector: rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT message = "wrong encrypted_blob format ({})".format(encrypted_blob) - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -197,7 +202,8 @@ class Inspector: rcode = errors.APPOINTMENT_CIPHER_NOT_SUPPORTED message = "cipher not supported: {}".format(cipher) - logger.error(message) + if message is not None: + logger.error(message) return rcode, message @@ -218,6 +224,7 @@ class Inspector: rcode = errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED message = "hash_function not supported {}".format(hash_function) - logger.error(message) + if message is not None: + logger.error(message) return rcode, message diff --git a/pisa/logger.py b/pisa/logger.py new file mode 100644 index 0000000..3dad0e0 --- /dev/null +++ b/pisa/logger.py @@ -0,0 +1,30 @@ +import logging +import time +import json + + +class StructuredMessage(object): + def __init__(self, message, **kwargs): + self.message = message + self.time = time.asctime() + self.kwargs = kwargs + + def __str__(self): + return json.dumps({**self.kwargs, "message": self.message, "time": self.time}) + + +class Logger(object): + def __init__(self, actor=None): + self.actor = actor + + def _add_prefix(self, msg): + return msg if self.actor is None else "[{}] {}".format(self.actor, msg) + + def info(self, msg, **kwargs): + logging.info(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) + + def debug(self, msg, **kwargs): + logging.debug(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) + + def error(self, msg, **kwargs): + logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) \ No newline at end of file diff --git a/pisa/pisad.py b/pisa/pisad.py index 406df70..305f156 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -23,4 +23,4 @@ if __name__ == '__main__': logger.error("bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down") else: - logging.error("can't connect to bitcoind. Shutting down") + logger.error("can't connect to bitcoind. Shutting down") diff --git a/pisa/tools.py b/pisa/tools.py index a5fbe9c..6e24716 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -17,14 +17,14 @@ def check_tx_in_chain(tx_id, logger=Logger(), tx_label='transaction'): if tx_info.get("confirmations"): confirmations = int(tx_info.get("confirmations")) tx_in_chain = True - logger.error("{} found in the blockchain (txid: {}) ".format(tx_label), txid=tx_id) + logger.error("{} found in the blockchain".format(tx_label), txid=tx_id) else: - logger.error("{} found in mempool (txid: {}) ".format(tx_label), txid=tx_id) + logger.error("{} found in mempool".format(tx_label), txid=tx_id) except JSONRPCException as e: if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - logger.error("{} not found in mempool nor blockchain (txid: {}) ".format(tx_label), txid=tx_id) + logger.error("{} not found in mempool nor blockchain".format(tx_label), txid=tx_id) else: # ToDO: Unhandled errors, check this properly diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index d28ae64..ded5922 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -3,8 +3,6 @@ import binascii from pisa import Logger from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT -logger = Logger("ZMQHandler") - # ToDo: #7-add-async-back-to-zmq class ZMQHandler: @@ -15,7 +13,8 @@ class ZMQHandler: self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0) self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock") self.zmqSubSocket.connect("%s://%s:%s" % (FEED_PROTOCOL, FEED_ADDR, FEED_PORT)) - self.parent = parent + self.logger = Logger("ZMQHandler-{}".format(parent)) + self.terminate = False def handle(self, block_queue): @@ -31,6 +30,4 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - logger.info("new block received via ZMQ", - parent=self.parent, - block_hash=block_hash) + self.logger.info("new block received via ZMQ", block_hash=block_hash) diff --git a/pisa/watcher.py b/pisa/watcher.py index 8dbaa1c..4184e58 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -10,7 +10,7 @@ from pisa.conf import MAX_APPOINTMENTS from pisa.block_processor import BlockProcessor from pisa.utils.zmq_subscriber import ZMQHandler -logging = Logger("Watcher") +logger = Logger("Watcher") class Watcher: def __init__(self, max_appointments=MAX_APPOINTMENTS): From dee93e5c62d8e871e6e2b0c91a319d0bfd052569 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Wed, 9 Oct 2019 10:20:39 +0700 Subject: [PATCH 09/13] Moved StructuredMessage and Logger to separate file; refactored pisa-cli logging using new format --- apps/cli/__init__.py | 2 +- apps/cli/blob.py | 13 ++++++++----- pisa/__init__.py | 29 ----------------------------- pisa/api.py | 3 ++- pisa/block_processor.py | 3 ++- pisa/carrier.py | 3 ++- pisa/cleaner.py | 2 +- pisa/encrypted_blob.py | 2 +- pisa/inspector.py | 3 ++- pisa/pisad.py | 3 ++- pisa/responder.py | 2 +- pisa/tools.py | 3 ++- pisa/utils/zmq_subscriber.py | 2 +- pisa/watcher.py | 2 +- 14 files changed, 26 insertions(+), 46 deletions(-) diff --git a/apps/cli/__init__.py b/apps/cli/__init__.py index 40e495c..20bb9fb 100644 --- a/apps/cli/__init__.py +++ b/apps/cli/__init__.py @@ -12,7 +12,7 @@ SUPPORTED_HASH_FUNCTIONS = ["SHA256"] SUPPORTED_CIPHERS = ["AES-GCM-128"] # Configure logging -logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[ +logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ logging.FileHandler(CLIENT_LOG_FILE), logging.StreamHandler() ]) diff --git a/apps/cli/blob.py b/apps/cli/blob.py index 6041050..204ccd7 100644 --- a/apps/cli/blob.py +++ b/apps/cli/blob.py @@ -5,6 +5,9 @@ from cryptography.hazmat.primitives.ciphers.aead import AESGCM from apps.cli import logging from apps.cli import SUPPORTED_HASH_FUNCTIONS, SUPPORTED_CIPHERS +from pisa.logger import Logger + +logger = Logger("Client") class Blob: @@ -50,10 +53,10 @@ class Blob: encrypted_blob = aesgcm.encrypt(nonce=nonce, data=tx, associated_data=None) encrypted_blob = hexlify(encrypted_blob).decode() - logging.info("[Client] creating new blob") - logging.info("[Client] master key: {}".format(hexlify(master_key).decode())) - logging.info("[Client] sk: {}".format(hexlify(sk).decode())) - logging.info("[Client] nonce: {}".format(hexlify(nonce).decode())) - logging.info("[Client] encrypted_blob: {}".format(encrypted_blob)) + logger.info("creating new blob", + master_key=hexlify(master_key).decode(), + sk=hexlify(sk).decode(), + nonce=hexlify(nonce).decode(), + encrypted_blob=encrypted_blob) return encrypted_blob diff --git a/pisa/__init__.py b/pisa/__init__.py index 2dcde22..5d03345 100644 --- a/pisa/__init__.py +++ b/pisa/__init__.py @@ -1,6 +1,4 @@ import logging -import json -import time from pisa.utils.auth_proxy import AuthServiceProxy import pisa.conf as conf @@ -8,33 +6,6 @@ import pisa.conf as conf HOST = 'localhost' PORT = 9814 - -class StructuredMessage(object): - def __init__(self, message, **kwargs): - self.message = message - self.time = time.asctime() - self.kwargs = kwargs - - def __str__(self): - return json.dumps({**self.kwargs, "message": self.message, "time": self.time}) - - -class Logger(object): - def __init__(self, actor=None): - self.actor = actor - - def _add_prefix(self, msg): - return msg if self.actor is None else "[{}] {}".format(self.actor, msg) - - def info(self, msg, **kwargs): - logging.info(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - - def debug(self, msg, **kwargs): - logging.debug(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - - def error(self, msg, **kwargs): - logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - # Configure logging logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=[ logging.FileHandler(conf.SERVER_LOG_FILE), diff --git a/pisa/api.py b/pisa/api.py index 301dfa5..1ab768d 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,7 +1,8 @@ import json from flask import Flask, request, Response, abort, jsonify -from pisa import HOST, PORT, logging, bitcoin_cli, Logger +from pisa import HOST, PORT, logging, bitcoin_cli +from pisa.logger import Logger from pisa.watcher import Watcher from pisa.inspector import Inspector from pisa import HOST, PORT, logging diff --git a/pisa/block_processor.py b/pisa/block_processor.py index 7c544ea..c139f86 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -1,7 +1,8 @@ import binascii from hashlib import sha256 -from pisa import bitcoin_cli, Logger +from pisa import bitcoin_cli +from pisa.logger import Logger from pisa.utils.auth_proxy import JSONRPCException logger = Logger("BlockProcessor") diff --git a/pisa/carrier.py b/pisa/carrier.py index a82fb3f..3e11028 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -1,5 +1,6 @@ from pisa.rpc_errors import * -from pisa import bitcoin_cli, Logger +from pisa import bitcoin_cli +from pisa.logger import Logger from pisa.utils.auth_proxy import JSONRPCException from pisa.errors import UNKNOWN_JSON_RPC_EXCEPTION diff --git a/pisa/cleaner.py b/pisa/cleaner.py index 39af1d5..0dad0da 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -1,4 +1,4 @@ -from pisa import Logger +from pisa.logger import Logger logger = Logger("Cleaner") diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index a772241..d9a02ba 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -1,7 +1,7 @@ from hashlib import sha256 from binascii import unhexlify, hexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM -from pisa import Logger +from pisa.logger import Logger logger = Logger("Watcher") diff --git a/pisa/inspector.py b/pisa/inspector.py index ecbf688..53055b8 100644 --- a/pisa/inspector.py +++ b/pisa/inspector.py @@ -2,7 +2,8 @@ import re from pisa import errors import pisa.conf as conf -from pisa import bitcoin_cli, Logger +from pisa import bitcoin_cli +from pisa.logger import Logger from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor diff --git a/pisa/pisad.py b/pisa/pisad.py index 305f156..c990a2e 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,7 +1,8 @@ from sys import argv from getopt import getopt -from pisa import logging, Logger +from pisa import logging +from pisa.logger import Logger from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network diff --git a/pisa/responder.py b/pisa/responder.py index 6855e5c..9b2b1f8 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -3,7 +3,7 @@ from threading import Thread from hashlib import sha256 from binascii import unhexlify -from pisa import Logger +from pisa.logger import Logger from pisa.cleaner import Cleaner from pisa.carrier import Carrier from pisa.tools import check_tx_in_chain diff --git a/pisa/tools.py b/pisa/tools.py index 6e24716..ddc4ab5 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -2,7 +2,8 @@ import re from http.client import HTTPException import pisa.conf as conf -from pisa import bitcoin_cli, Logger +from pisa import bitcoin_cli +from pisa.logger import Logger from pisa.utils.auth_proxy import JSONRPCException from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index ded5922..f0ac469 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -1,6 +1,6 @@ import zmq import binascii -from pisa import Logger +from pisa.logger import Logger from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT diff --git a/pisa/watcher.py b/pisa/watcher.py index 4184e58..6ec6d30 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -2,7 +2,7 @@ from uuid import uuid4 from queue import Queue from threading import Thread -from pisa import Logger +from pisa.logger import Logger from pisa.cleaner import Cleaner from pisa.conf import EXPIRY_DELTA from pisa.responder import Responder From 831545ef2c0b15527ccc4974eb20178ae4d65f05 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Thu, 10 Oct 2019 09:54:50 +0700 Subject: [PATCH 10/13] Added simple tests for check_txid_format --- test/unit/test_tools.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 test/unit/test_tools.py diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py new file mode 100644 index 0000000..ba5b834 --- /dev/null +++ b/test/unit/test_tools.py @@ -0,0 +1,16 @@ +from pisa.tools import check_txid_format +from pisa import logging + +logging.getLogger().disabled = True + + +def test_check_txid_format(): + assert(check_txid_format(None) is False) + assert(check_txid_format("") is False) + assert(check_txid_format(0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef) is False) # wrong type + assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") is True) # lowercase + assert(check_txid_format("0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF") is True) # uppercase + assert(check_txid_format("0123456789abcdef0123456789ABCDEF0123456789abcdef0123456789ABCDEF") is True) # mixed case + assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdf") is False) # too short + assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0") is False) # too long + assert(check_txid_format("g123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") is False) # non-hex From aea1d1f1e02dc03ca4675f37c188fb772aa43c48 Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Thu, 10 Oct 2019 10:34:03 +0700 Subject: [PATCH 11/13] Fixes from PR review --- apps/cli/blob.py | 3 +-- pisa/api.py | 4 ++-- pisa/block_processor.py | 18 +++++++++--------- pisa/carrier.py | 4 ++-- pisa/cleaner.py | 6 +++--- pisa/encrypted_blob.py | 2 +- pisa/pisad.py | 1 - pisa/responder.py | 30 +++++++++++++++--------------- pisa/tools.py | 3 ++- pisa/utils/zmq_subscriber.py | 2 +- pisa/watcher.py | 14 +++++++------- 11 files changed, 43 insertions(+), 44 deletions(-) diff --git a/apps/cli/blob.py b/apps/cli/blob.py index 204ccd7..5e6f9da 100644 --- a/apps/cli/blob.py +++ b/apps/cli/blob.py @@ -3,7 +3,6 @@ from hashlib import sha256 from binascii import hexlify, unhexlify from cryptography.hazmat.primitives.ciphers.aead import AESGCM -from apps.cli import logging from apps.cli import SUPPORTED_HASH_FUNCTIONS, SUPPORTED_CIPHERS from pisa.logger import Logger @@ -53,7 +52,7 @@ class Blob: encrypted_blob = aesgcm.encrypt(nonce=nonce, data=tx, associated_data=None) encrypted_blob = hexlify(encrypted_blob).decode() - logger.info("creating new blob", + logger.info("Creating new blob", master_key=hexlify(master_key).decode(), sk=hexlify(sk).decode(), nonce=hexlify(nonce).decode(), diff --git a/pisa/api.py b/pisa/api.py index 1ab768d..48b6c46 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -24,7 +24,7 @@ def add_appointment(): remote_addr = request.environ.get('REMOTE_ADDR') remote_port = request.environ.get('REMOTE_PORT') - logger.info('connection accepted', from_addr_port='{}:{}'.format(remote_addr, remote_port)) + logger.info('Connection accepted', from_addr_port='{}:{}'.format(remote_addr, remote_port)) # Check content type once if properly defined request_data = json.loads(request.get_json()) @@ -50,7 +50,7 @@ def add_appointment(): rcode = HTTP_BAD_REQUEST response = "appointment rejected. Request does not match the standard" - logger.info('sending response and disconnecting', + logger.info('Sending response and disconnecting', from_addr_port='{}:{}'.format(remote_addr, remote_port), response=response) return Response(response, status=rcode, mimetype='text/plain') diff --git a/pisa/block_processor.py b/pisa/block_processor.py index c139f86..fdd71a8 100644 --- a/pisa/block_processor.py +++ b/pisa/block_processor.py @@ -17,7 +17,7 @@ class BlockProcessor: except JSONRPCException as e: block = None - logger.error("couldn't get block from bitcoind.", error_code=e) + logger.error("Couldn't get block from bitcoind.", error_code=e) return block @@ -29,7 +29,7 @@ class BlockProcessor: except JSONRPCException as e: block_hash = None - logger.error("couldn't get block hash.", error_code=e) + logger.error("Couldn't get block hash.", error_code=e) return block_hash @@ -41,7 +41,7 @@ class BlockProcessor: except JSONRPCException as e: block_count = None - logger.error("couldn't get block block count", error_code=e) + logger.error("Couldn't get block count", error_code=e) return block_count @@ -57,10 +57,10 @@ class BlockProcessor: potential_matches = {locator: potential_locators[locator] for locator in intersection} if len(potential_matches) > 0: - logger.info("list of potential matches", potential_matches=potential_matches) + logger.info("List of potential matches", potential_matches=potential_matches) else: - logger.info("no potential matches found") + logger.info("No potential matches found") return potential_matches @@ -76,12 +76,12 @@ class BlockProcessor: justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid') matches.append((locator, uuid, dispute_txid, justice_txid, justice_rawtx)) - logger.info("match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid) + logger.info("Match found for locator.", locator=locator, uuid=uuid, justice_txid=justice_txid) except JSONRPCException as e: # Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple # for the POC - logger.error("can't build transaction from decoded data.", error_code=e) + logger.error("Can't build transaction from decoded data.", error_code=e) return matches @@ -93,7 +93,7 @@ class BlockProcessor: if tx in tx_job_map and tx in unconfirmed_txs: unconfirmed_txs.remove(tx) - logger.info("confirmation received for transaction", tx=tx) + logger.info("Confirmation received for transaction", tx=tx) elif tx in unconfirmed_txs: if tx in missed_confirmations: @@ -102,6 +102,6 @@ class BlockProcessor: else: missed_confirmations[tx] = 1 - logger.info("transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx]) + logger.info("Transaction missed a confirmation", tx=tx, missed_confirmations=missed_confirmations[tx]) return unconfirmed_txs, missed_confirmations diff --git a/pisa/carrier.py b/pisa/carrier.py index 3e11028..5658151 100644 --- a/pisa/carrier.py +++ b/pisa/carrier.py @@ -16,7 +16,7 @@ class Carrier: def send_transaction(self, rawtx, txid): try: - logger.info("pushing transaction to the network", txid=txid, rawtx=rawtx) + logger.info("Pushing transaction to the network", txid=txid, rawtx=rawtx) bitcoin_cli.sendrawtransaction(rawtx) receipt = self.Receipt(delivered=True) @@ -69,7 +69,7 @@ class Carrier: # reorged while we were querying bitcoind to get the confirmation count. In such a case we just # restart the job if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY: - logger.info("transaction got reorged before obtaining information", txid=txid) + logger.info("Transaction got reorged before obtaining information", txid=txid) # TODO: Check RPC methods to see possible returns and avoid general else # else: diff --git a/pisa/cleaner.py b/pisa/cleaner.py index 0dad0da..00076b4 100644 --- a/pisa/cleaner.py +++ b/pisa/cleaner.py @@ -20,12 +20,12 @@ class Cleaner: else: locator_uuid_map[locator].remove(uuid) - logger.info("end time reached with no match! Deleting appointment.", locator=locator, uuid=uuid) + logger.info("End time reached with no match. Deleting appointment.", locator=locator, uuid=uuid) @staticmethod def delete_completed_jobs(jobs, tx_job_map, completed_jobs, height): for uuid, confirmations in completed_jobs: - logger.info("job completed. Appointment ended after reaching enough confirmations.", + logger.info("Job completed. Appointment ended after reaching enough confirmations.", uuid=uuid, height=height, confirmations=confirmations) # ToDo: #9-add-data-persistence @@ -35,7 +35,7 @@ class Cleaner: if len(tx_job_map[justice_txid]) == 1: tx_job_map.pop(justice_txid) - logger.info("no more jobs for justice transaction.", justice_txid=justice_txid) + logger.info("No more jobs for justice transaction.", justice_txid=justice_txid) else: tx_job_map[justice_txid].remove(uuid) diff --git a/pisa/encrypted_blob.py b/pisa/encrypted_blob.py index d9a02ba..68c1fe5 100644 --- a/pisa/encrypted_blob.py +++ b/pisa/encrypted_blob.py @@ -24,7 +24,7 @@ class EncryptedBlob: sk = master_key[:16] nonce = master_key[16:] - logger.info("[Watcher] creating new blob.", + logger.info("Creating new blob.", master_key=hexlify(master_key).decode(), sk=hexlify(sk).decode(), nonce=hexlify(sk).decode(), diff --git a/pisa/pisad.py b/pisa/pisad.py index c990a2e..918ec76 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -1,7 +1,6 @@ from sys import argv from getopt import getopt -from pisa import logging from pisa.logger import Logger from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network diff --git a/pisa/responder.py b/pisa/responder.py index 9b2b1f8..bbb4d71 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -47,7 +47,7 @@ class Responder: def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=False): if self.asleep: - logger.info("waking up!") + logger.info("Waking up") carrier = Carrier() receipt = carrier.send_transaction(justice_rawtx, justice_txid) @@ -82,7 +82,7 @@ class Responder: if confirmations == 0: self.unconfirmed_txs.append(justice_txid) - logger.info("new job added.", + logger.info("New job added.", dispute_txid=dispute_txid, justice_txid=justice_txid, appointment_end=appointment_end) if self.asleep: @@ -111,7 +111,7 @@ class Responder: txs = block.get('tx') height = block.get('height') - logger.info("new block received", + logger.info("New block received", block_hash=block_hash, prev_block_hash=block.get('previousblockhash'), txs=txs) # ToDo: #9-add-data-persistence @@ -126,9 +126,9 @@ class Responder: self.rebroadcast(txs_to_rebroadcast) else: - logger.warning("reorg found!", - local_prev_block_hash=prev_block_hash, - remote_prev_block_hash=block.get('previousblockhash')) + logger.warn("Reorg found", + local_prev_block_hash=prev_block_hash, + remote_prev_block_hash=block.get('previousblockhash')) self.handle_reorgs() @@ -138,7 +138,7 @@ class Responder: self.asleep = True self.zmq_subscriber.terminate = True - logger.info("no more pending jobs, going back to sleep") + logger.info("No more pending jobs, going back to sleep") def get_txs_to_rebroadcast(self, txs): txs_to_rebroadcast = [] @@ -174,25 +174,25 @@ class Responder: self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) - logger.warning("Transaction has missed many confirmations. Rebroadcasting.", - justice_txid=self.jobs[uuid].justice_txid, - confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) + logger.warn("Transaction has missed many confirmations. Rebroadcasting.", + justice_txid=self.jobs[uuid].justice_txid, + confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): for uuid, job in self.jobs.items(): # First we check if the dispute transaction is still in the blockchain. If not, the justice can not be # there either, so we'll need to call the reorg manager straight away - dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, logger=logger, tx_label='dispute tx') + dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, logger=logger, tx_label='Dispute tx') # If the dispute is there, we can check the justice tx if dispute_in_chain: justice_in_chain, justice_confirmations = check_tx_in_chain(job.justice_txid, logger=logger, - tx_label='justice tx') + tx_label='Justice tx') # If both transactions are there, we only need to update the justice tx confirmation count if justice_in_chain: - logger.info("updating confirmation count for transaction.", + logger.info("Updating confirmation count for transaction.", justice_txid=job.justice_txid, prev_count=job.confirmations, curr_count=justice_confirmations) @@ -210,5 +210,5 @@ class Responder: # ToDo: #24-properly-handle-reorgs # FIXME: if the dispute is not on chain (either in mempool or not there at all), we need to call the # reorg manager - logger.warning("dispute and justice transaction missing. Calling the reorg manager") - logger.error("reorg manager not yet implemented") + logger.warn("Dispute and justice transaction missing. Calling the reorg manager") + logger.error("Reorg manager not yet implemented") diff --git a/pisa/tools.py b/pisa/tools.py index ddc4ab5..db6c33d 100644 --- a/pisa/tools.py +++ b/pisa/tools.py @@ -8,7 +8,8 @@ from pisa.utils.auth_proxy import JSONRPCException from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY -def check_tx_in_chain(tx_id, logger=Logger(), tx_label='transaction'): +# TODO: currently only used in the Responder; might move there or in the BlockProcessor +def check_tx_in_chain(tx_id, logger=Logger(), tx_label='Transaction'): tx_in_chain = False confirmations = 0 diff --git a/pisa/utils/zmq_subscriber.py b/pisa/utils/zmq_subscriber.py index f0ac469..76f0150 100644 --- a/pisa/utils/zmq_subscriber.py +++ b/pisa/utils/zmq_subscriber.py @@ -30,4 +30,4 @@ class ZMQHandler: block_hash = binascii.hexlify(body).decode('UTF-8') block_queue.put(block_hash) - self.logger.info("new block received via ZMQ", block_hash=block_hash) + self.logger.info("New block received via ZMQ", block_hash=block_hash) diff --git a/pisa/watcher.py b/pisa/watcher.py index 6ec6d30..45ec563 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -53,16 +53,16 @@ class Watcher: zmq_thread.start() watcher.start() - logger.info("waking up!") + logger.info("Waking up") appointment_added = True - logger.info("new appointment accepted.", locator=appointment.locator) + logger.info("New appointment accepted.", locator=appointment.locator) else: appointment_added = False - logger.info("maximum appointments reached, appointment rejected.", locator=appointment.locator) + logger.info("Maximum appointments reached, appointment rejected.", locator=appointment.locator) return appointment_added @@ -73,14 +73,14 @@ class Watcher: def do_watch(self): while len(self.appointments) > 0: block_hash = self.block_queue.get() - logger.info("new block received", block_hash=block_hash) + logger.info("New block received", block_hash=block_hash) block = BlockProcessor.get_block(block_hash) if block is not None: txids = block.get('tx') - logger.info("list of transactions.", txids=txids) + logger.info("List of transactions.", txids=txids) expired_appointments = [uuid for uuid, appointment in self.appointments.items() if block["height"] > appointment.end_time + EXPIRY_DELTA] @@ -91,7 +91,7 @@ class Watcher: matches = BlockProcessor.get_matches(potential_matches, self.locator_uuid_map, self.appointments) for locator, uuid, dispute_txid, justice_txid, justice_rawtx in matches: - logger.info("notifying responder and deleting appointment.", + logger.info("Notifying responder and deleting appointment.", justice_txid=justice_txid, locator=locator, uuid=uuid) self.responder.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, @@ -113,4 +113,4 @@ class Watcher: self.asleep = True self.zmq_subscriber.terminate = True - logger.error("no more pending appointments, going back to sleep") + logger.error("No more pending appointments, going back to sleep") From 9f25ef8603790871c142b34efd5eabf0b5c2098d Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Thu, 10 Oct 2019 18:22:33 +0700 Subject: [PATCH 12/13] Fixes from PR review --- apps/cli/pisa-cli.py | 41 +++++++++++++++++-------------- pisa/api.py | 3 +-- pisa/logger.py | 5 +++- pisa/pisad.py | 4 +-- pisa/watcher.py | 1 + test/unit/test_api.py | 5 ---- test/unit/test_blob.py | 2 -- test/unit/test_block_processor.py | 1 - test/unit/test_cleaner.py | 1 - test/unit/test_encrypted_blob.py | 3 --- test/unit/test_inspector.py | 1 - test/unit/test_tools.py | 5 ++-- 12 files changed, 33 insertions(+), 39 deletions(-) diff --git a/apps/cli/pisa-cli.py b/apps/cli/pisa-cli.py index 6327214..8b181a1 100644 --- a/apps/cli/pisa-cli.py +++ b/apps/cli/pisa-cli.py @@ -2,7 +2,6 @@ import re import os import sys import json -import logging import requests from sys import argv from hashlib import sha256 @@ -10,11 +9,15 @@ from binascii import unhexlify from getopt import getopt, GetoptError from requests import ConnectTimeout, ConnectionError +from pisa.logger import Logger from apps.cli.blob import Blob from apps.cli.help import help_add_appointment, help_get_appointment from apps.cli import DEFAULT_PISA_API_SERVER, DEFAULT_PISA_API_PORT +logger = Logger("Client") + + # FIXME: TESTING ENDPOINT, WON'T BE THERE IN PRODUCTION def generate_dummy_appointment(): get_block_count_end_point = "http://{}:{}/get_block_count".format(pisa_api_server, pisa_api_port) @@ -49,14 +52,14 @@ def add_appointment(args): if os.path.isfile(fin): appointment_data = json.load(open(fin)) else: - logging.error("[Client] can't find file " + fin) + logger.error("Can't find file " + fin) else: - logging.error("[Client] no file provided as appointment. " + use_help) + logger.error("No file provided as appointment. " + use_help) else: appointment_data = json.loads(arg_opt) except json.JSONDecodeError: - logging.error("[Client] non-JSON encoded data provided as appointment. " + use_help) + logger.error("Non-JSON encoded data provided as appointment. " + use_help) if appointment_data: valid_locator = check_txid_format(appointment_data.get('tx_id')) @@ -67,22 +70,22 @@ def add_appointment(args): appointment_data.get('start_time'), appointment_data.get('end_time'), appointment_data.get('dispute_delta')) - logging.info("[Client] sending appointment to PISA") + logger.info("Sending appointment to PISA") try: r = requests.post(url=add_appointment_endpoint, json=json.dumps(appointment), timeout=5) - logging.info("[Client] {} (code: {}).".format(r.text, r.status_code)) + logger.info("{} (code: {}).".format(r.text, r.status_code)) except ConnectTimeout: - logging.error("[Client] can't connect to pisa API. Connection timeout.") + logger.error("Can't connect to pisa API. Connection timeout.") except ConnectionError: - logging.error("[Client] can't connect to pisa API. Server cannot be reached.") + logger.error("Can't connect to pisa API. Server cannot be reached.") else: - logging.error("[Client] the provided locator is not valid.") + logger.error("The provided locator is not valid.") else: - logging.error("[Client] no appointment data provided. " + use_help) + logger.error("No appointment data provided. " + use_help) def get_appointment(args): @@ -104,16 +107,16 @@ def get_appointment(args): print(json.dumps(r.json(), indent=4, sort_keys=True)) except ConnectTimeout: - logging.error("[Client] can't connect to pisa API. Connection timeout.") + logger.error("Can't connect to pisa API. Connection timeout.") except ConnectionError: - logging.error("[Client] can't connect to pisa API. Server cannot be reached.") + logger.error("Can't connect to pisa API. Server cannot be reached.") else: - logging.error("[Client] the provided locator is not valid.") + logger.error("The provided locator is not valid.") else: - logging.error("[Client] the provided locator is not valid.") + logger.error("The provided locator is not valid.") def build_appointment(tx, tx_id, start_block, end_block, dispute_delta): @@ -199,7 +202,7 @@ if __name__ == '__main__': sys.exit(help_get_appointment()) else: - logging.error("[Client] unknown command. Use help to check the list of available commands") + logger.error("Unknown command. Use help to check the list of available commands") else: sys.exit(show_usage()) @@ -210,14 +213,14 @@ if __name__ == '__main__': generate_dummy_appointment() else: - logging.error("[Client] unknown command. Use help to check the list of available commands") + logger.error("Unknown command. Use help to check the list of available commands") else: - logging.error("[Client] no command provided. Use help to check the list of available commands.") + logger.error("No command provided. Use help to check the list of available commands.") except GetoptError as e: - logging.error("[Client] {}".format(e)) + logger.error("{}".format(e)) except json.JSONDecodeError as e: - logging.error("[Client] non-JSON encoded appointment passed as parameter.") + logger.error("Non-JSON encoded appointment passed as parameter.") diff --git a/pisa/api.py b/pisa/api.py index 48b6c46..cd05e02 100644 --- a/pisa/api.py +++ b/pisa/api.py @@ -1,11 +1,10 @@ import json from flask import Flask, request, Response, abort, jsonify -from pisa import HOST, PORT, logging, bitcoin_cli +from pisa import HOST, PORT, logging from pisa.logger import Logger from pisa.watcher import Watcher from pisa.inspector import Inspector -from pisa import HOST, PORT, logging from pisa.appointment import Appointment from pisa.block_processor import BlockProcessor diff --git a/pisa/logger.py b/pisa/logger.py index 3dad0e0..95c4733 100644 --- a/pisa/logger.py +++ b/pisa/logger.py @@ -27,4 +27,7 @@ class Logger(object): logging.debug(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) def error(self, msg, **kwargs): - logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) \ No newline at end of file + logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) + + def warn(self, msg, **kwargs): + logging.warn(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) diff --git a/pisa/pisad.py b/pisa/pisad.py index 918ec76..152d7c9 100644 --- a/pisa/pisad.py +++ b/pisa/pisad.py @@ -5,7 +5,7 @@ from pisa.logger import Logger from pisa.api import start_api from pisa.tools import can_connect_to_bitcoind, in_correct_network -logger = Logger("Pisad") +logger = Logger("Daemon") if __name__ == '__main__': debug = False @@ -23,4 +23,4 @@ if __name__ == '__main__': logger.error("bitcoind is running on a different network, check conf.py and bitcoin.conf. Shutting down") else: - logger.error("can't connect to bitcoind. Shutting down") + logger.error("Can't connect to bitcoind. Shutting down") diff --git a/pisa/watcher.py b/pisa/watcher.py index 45ec563..5248b77 100644 --- a/pisa/watcher.py +++ b/pisa/watcher.py @@ -12,6 +12,7 @@ from pisa.utils.zmq_subscriber import ZMQHandler logger = Logger("Watcher") + class Watcher: def __init__(self, max_appointments=MAX_APPOINTMENTS): self.appointments = dict() diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 8505bfa..5e4a6ee 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -183,8 +183,3 @@ def test_get_all_appointments_responder(): assert (set(responder_jobs) == set(local_locators)) assert (len(received_appointments["watcher_appointments"]) == 0) - - - - - diff --git a/test/unit/test_blob.py b/test/unit/test_blob.py index efd9e1a..7eb3418 100644 --- a/test/unit/test_blob.py +++ b/test/unit/test_blob.py @@ -87,5 +87,3 @@ def test_encrypt(): encrypted_blob2 = blob.encrypt(key) assert(encrypted_blob == encrypted_blob2 and id(encrypted_blob) != id(encrypted_blob2)) - - diff --git a/test/unit/test_block_processor.py b/test/unit/test_block_processor.py index b0a2bba..a57fbb0 100644 --- a/test/unit/test_block_processor.py +++ b/test/unit/test_block_processor.py @@ -73,4 +73,3 @@ def test_potential_matches_random_data(locator_uuid_map): # None of the txids should match assert len(potential_matches) == 0 - diff --git a/test/unit/test_cleaner.py b/test/unit/test_cleaner.py index 5206118..237a2e3 100644 --- a/test/unit/test_cleaner.py +++ b/test/unit/test_cleaner.py @@ -78,4 +78,3 @@ def test_delete_completed_jobs(): Cleaner.delete_completed_jobs(jobs, tx_job_map, completed_jobs, 0) assert not set(completed_jobs).issubset(jobs.keys()) - diff --git a/test/unit/test_encrypted_blob.py b/test/unit/test_encrypted_blob.py index 096c316..67270c2 100644 --- a/test/unit/test_encrypted_blob.py +++ b/test/unit/test_encrypted_blob.py @@ -34,6 +34,3 @@ def test_decrypt(): encrypted_blob = EncryptedBlob(encrypted_data) assert(encrypted_blob.decrypt(key) == data) - - - diff --git a/test/unit/test_inspector.py b/test/unit/test_inspector.py index 3aa68f6..551cfe3 100644 --- a/test/unit/test_inspector.py +++ b/test/unit/test_inspector.py @@ -230,4 +230,3 @@ def test_inspect(): and appointment.end_time == end_time and appointment.dispute_delta == dispute_delta and appointment.encrypted_blob.data == encrypted_blob and appointment.cipher == cipher and appointment.hash_function == hash_function) - diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py index ba5b834..0e96b7c 100644 --- a/test/unit/test_tools.py +++ b/test/unit/test_tools.py @@ -8,9 +8,10 @@ def test_check_txid_format(): assert(check_txid_format(None) is False) assert(check_txid_format("") is False) assert(check_txid_format(0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef) is False) # wrong type - assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") is True) # lowercase - assert(check_txid_format("0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF") is True) # uppercase + assert(check_txid_format("abcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcd") is True) # lowercase + assert(check_txid_format("ABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCDEFABCD") is True) # uppercase assert(check_txid_format("0123456789abcdef0123456789ABCDEF0123456789abcdef0123456789ABCDEF") is True) # mixed case + assert(check_txid_format("0123456789012345678901234567890123456789012345678901234567890123") is True) # only nums assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdf") is False) # too short assert(check_txid_format("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0") is False) # too long assert(check_txid_format("g123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") is False) # non-hex From d05c2c21b785af3a72f5ccf38a76563f058e6f6f Mon Sep 17 00:00:00 2001 From: Salvatore Ingala <6681844+bigspider@users.noreply.github.com> Date: Thu, 10 Oct 2019 18:35:31 +0700 Subject: [PATCH 13/13] warn ==> warning --- pisa/logger.py | 4 ++-- pisa/responder.py | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pisa/logger.py b/pisa/logger.py index 95c4733..e6a1f4e 100644 --- a/pisa/logger.py +++ b/pisa/logger.py @@ -29,5 +29,5 @@ class Logger(object): def error(self, msg, **kwargs): logging.error(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) - def warn(self, msg, **kwargs): - logging.warn(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) + def warning(self, msg, **kwargs): + logging.warning(StructuredMessage(self._add_prefix(msg), actor=self.actor, **kwargs)) diff --git a/pisa/responder.py b/pisa/responder.py index bbb4d71..87f64ec 100644 --- a/pisa/responder.py +++ b/pisa/responder.py @@ -126,9 +126,9 @@ class Responder: self.rebroadcast(txs_to_rebroadcast) else: - logger.warn("Reorg found", - local_prev_block_hash=prev_block_hash, - remote_prev_block_hash=block.get('previousblockhash')) + logger.warning("Reorg found", + local_prev_block_hash=prev_block_hash, + remote_prev_block_hash=block.get('previousblockhash')) self.handle_reorgs() @@ -174,9 +174,9 @@ class Responder: self.add_response(uuid, self.jobs[uuid].dispute_txid, self.jobs[uuid].justice_txid, self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, retry=True) - logger.warn("Transaction has missed many confirmations. Rebroadcasting.", - justice_txid=self.jobs[uuid].justice_txid, - confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) + logger.warning("Transaction has missed many confirmations. Rebroadcasting.", + justice_txid=self.jobs[uuid].justice_txid, + confirmations_missed=CONFIRMATIONS_BEFORE_RETRY) # FIXME: Legacy code, must be checked and updated/fixed def handle_reorgs(self): @@ -210,5 +210,5 @@ class Responder: # ToDo: #24-properly-handle-reorgs # FIXME: if the dispute is not on chain (either in mempool or not there at all), we need to call the # reorg manager - logger.warn("Dispute and justice transaction missing. Calling the reorg manager") + logger.warning("Dispute and justice transaction missing. Calling the reorg manager") logger.error("Reorg manager not yet implemented")