Code clean up

Deletes debug/logging pair. Defines logging and bitcoin_cli as system-wide variables
This commit is contained in:
Sergi Delgado Segura
2019-10-02 17:03:43 +01:00
parent 9bb3b38b3f
commit 93e23e769f
10 changed files with 158 additions and 214 deletions

View File

@@ -2,11 +2,11 @@ from queue import Queue
from threading import Thread
from hashlib import sha256
from binascii import unhexlify
from pisa.zmq_subscriber import ZMQHandler
from pisa import logging, bitcoin_cli
from pisa.rpc_errors import *
from pisa.tools import check_tx_in_chain
from pisa.utils.authproxy import AuthServiceProxy, JSONRPCException
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT
from pisa.utils.zmq_subscriber import ZMQHandler
from pisa.utils.auth_proxy import JSONRPCException
CONFIRMATIONS_BEFORE_RETRY = 6
MIN_CONFIRMATIONS = 6
@@ -42,31 +42,24 @@ class Responder:
self.asleep = True
self.zmq_subscriber = None
def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging,
retry=False):
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST,
BTC_RPC_PORT))
def add_response(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=False):
try:
if debug:
if self.asleep:
logging.info("[Responder] waking up!")
logging.info("[Responder] pushing transaction to the network (txid: {})".format(justice_txid))
if self.asleep:
logging.info("[Responder] waking up!")
logging.info("[Responder] pushing transaction to the network (txid: {})".format(justice_txid))
bitcoin_cli.sendrawtransaction(justice_rawtx)
# handle_responses can call add_response recursively if a broadcast transaction does not get confirmations
# retry holds such information.
self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging,
retry=retry)
self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry)
except JSONRPCException as e:
self.handle_send_failures(e, bitcoin_cli, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end,
debug, logging, retry)
self.handle_send_failures(e, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry)
def create_job(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging,
confirmations=0, retry=False):
def create_job(self, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, confirmations=0,
retry=False):
# ToDo: #23-define-behaviour-approaching-end
if retry:
@@ -81,25 +74,22 @@ class Responder:
else:
self.tx_job_map[justice_txid] = [uuid]
if debug:
logging.info('[Responder] new job added (dispute txid = {}, justice txid = {}, appointment end = {})'.
format(dispute_txid, justice_txid, appointment_end))
logging.info('[Responder] new job added (dispute txid = {}, justice txid = {}, appointment end = {})'
.format(dispute_txid, justice_txid, appointment_end))
if self.asleep:
self.asleep = False
self.block_queue = Queue()
zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue, debug, logging])
responder = Thread(target=self.handle_responses, args=[debug, logging])
zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue])
responder = Thread(target=self.handle_responses)
zmq_thread.start()
responder.start()
def do_subscribe(self, block_queue, debug, logging):
def do_subscribe(self, block_queue):
self.zmq_subscriber = ZMQHandler(parent='Responder')
self.zmq_subscriber.handle(block_queue, debug, logging)
self.zmq_subscriber.handle(block_queue)
def handle_responses(self, debug, logging):
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST,
BTC_RPC_PORT))
def handle_responses(self):
prev_block_hash = 0
while len(self.jobs) > 0:
# We get notified for every new received block
@@ -110,14 +100,12 @@ class Responder:
txs = block.get('tx')
height = block.get('height')
if debug:
logging.info("[Responder] new block received {}".format(block_hash))
logging.info("[Responder] prev. block hash {}".format(block.get('previousblockhash')))
logging.info("[Responder] list of transactions: {}".format(txs))
logging.info("[Responder] new block received {}".format(block_hash))
logging.info("[Responder] prev. block hash {}".format(block.get('previousblockhash')))
logging.info("[Responder] list of transactions: {}".format(txs))
except JSONRPCException as e:
if debug:
logging.error("[Responder] couldn't get block from bitcoind. Error code {}".format(e))
logging.error("[Responder] couldn't get block from bitcoind. Error code {}".format(e))
continue
@@ -129,20 +117,19 @@ class Responder:
if justice_txid in txs or self.jobs[uuid].confirmations > 0:
self.jobs[uuid].confirmations += 1
if debug:
logging.info("[Responder] new confirmation received for job = {}, txid = {}".format(
uuid, justice_txid))
logging.info("[Responder] new confirmation received for job = {}, txid = {}".format(
uuid, justice_txid))
elif self.jobs[uuid].missed_confirmations >= CONFIRMATIONS_BEFORE_RETRY:
# If a transactions has missed too many confirmations for a while we'll try to rebroadcast
# ToDO: #22-discuss-confirmations-before-retry
# ToDo: #23-define-behaviour-approaching-end
self.add_response(uuid, self.jobs[uuid].dispute_txid, justice_txid,
self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end, debug,
logging, retry=True)
if debug:
logging.warning("[Responder] txid = {} has missed {} confirmations. Rebroadcasting"
.format(justice_txid, CONFIRMATIONS_BEFORE_RETRY))
self.jobs[uuid].justice_rawtx, self.jobs[uuid].appointment_end,
retry=True)
logging.warning("[Responder] txid = {} has missed {} confirmations. Rebroadcasting"
.format(justice_txid, CONFIRMATIONS_BEFORE_RETRY))
else:
# Otherwise we increase the number of missed confirmations
@@ -153,14 +140,13 @@ class Responder:
# The end of the appointment has been reached
completed_jobs.append(uuid)
self.remove_completed_jobs(completed_jobs, height, debug, logging)
self.remove_completed_jobs(completed_jobs, height)
else:
if debug:
logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}"
.format(prev_block_hash, block.get('previousblockhash')))
logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}"
.format(prev_block_hash, block.get('previousblockhash')))
self.handle_reorgs(bitcoin_cli, debug, logging)
self.handle_reorgs()
prev_block_hash = block.get('hash')
@@ -168,11 +154,9 @@ class Responder:
self.asleep = True
self.zmq_subscriber.terminate = True
if debug:
logging.info("[Responder] no more pending jobs, going back to sleep")
logging.info("[Responder] no more pending jobs, going back to sleep")
def handle_send_failures(self, e, bitcoin_cli, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end,
debug, logging, retry):
def handle_send_failures(self, e, uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry):
# Since we're pushing a raw transaction to the network we can get two kind of rejections:
# RPC_VERIFY_REJECTED and RPC_VERIFY_ALREADY_IN_CHAIN. The former implies that the transaction is rejected
# due to network rules, whereas the later implies that the transaction is already in the blockchain.
@@ -185,38 +169,36 @@ class Responder:
elif e.error.get('code') == RPC_VERIFY_ALREADY_IN_CHAIN:
try:
if debug:
logging.info("[Responder] {} is already in the blockchain. Getting the confirmation count and "
"start monitoring the transaction".format(justice_txid))
logging.info("[Responder] {} is already in the blockchain. Getting the confirmation count and start "
"monitoring the transaction".format(justice_txid))
# If the transaction is already in the chain, we get the number of confirmations and watch the job
# until the end of the appointment
tx_info = bitcoin_cli.getrawtransaction(justice_txid, 1)
confirmations = int(tx_info.get("confirmations"))
self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging,
retry=retry, confirmations=confirmations)
self.create_job(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry,
confirmations=confirmations)
except JSONRPCException as e:
# While it's quite unlikely, the transaction that was already in the blockchain could have been
# reorged while we were querying bitcoind to get the confirmation count. In such a case we just
# restart the job
if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY:
self.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug,
logging, retry=retry)
elif debug:
self.add_response(uuid, dispute_txid, justice_txid, justice_rawtx, appointment_end, retry=retry)
else:
# If something else happens (unlikely but possible) log it so we can treat it in future releases
logging.error("[Responder] JSONRPCException. Error {}".format(e))
elif debug:
else:
# If something else happens (unlikely but possible) log it so we can treat it in future releases
logging.error("[Responder] JSONRPCException. Error {}".format(e))
def remove_completed_jobs(self, completed_jobs, height, debug, logging):
def remove_completed_jobs(self, completed_jobs, height):
for uuid in completed_jobs:
if debug:
logging.info("[Responder] job completed (uuid = {}, justice_txid = {}). Appointment ended at "
"block {} after {} confirmations".format(uuid, self.jobs[uuid].justice_txid, height,
self.jobs[uuid].confirmations))
logging.info("[Responder] job completed (uuid = {}, justice_txid = {}). Appointment ended at "
"block {} after {} confirmations".format(uuid, self.jobs[uuid].justice_txid, height,
self.jobs[uuid].confirmations))
# ToDo: #9-add-data-persistency
justice_txid = self.jobs[uuid].justice_txid
@@ -225,30 +207,25 @@ class Responder:
if len(self.tx_job_map[justice_txid]) == 1:
self.tx_job_map.pop(justice_txid)
if debug:
logging.info("[Responder] no more jobs for justice_txid {}".format(justice_txid))
logging.info("[Responder] no more jobs for justice_txid {}".format(justice_txid))
else:
self.tx_job_map[justice_txid].remove(uuid)
def handle_reorgs(self, bitcoin_cli, debug, logging):
def handle_reorgs(self):
for uuid, job in self.jobs.items():
# First we check if the dispute transaction is still in the blockchain. If not, the justice can not be
# there either, so we'll need to call the reorg manager straight away
dispute_in_chain, _ = check_tx_in_chain(bitcoin_cli, job.dispute_txid, debug, logging,
parent='Responder',
tx_label='dispute tx')
dispute_in_chain, _ = check_tx_in_chain(job.dispute_txid, parent='Responder', tx_label='dispute tx')
# If the dispute is there, we can check the justice tx
if dispute_in_chain:
justice_in_chain, justice_confirmations = check_tx_in_chain(bitcoin_cli, job.justice_txid, debug,
logging, parent='Responder',
justice_in_chain, justice_confirmations = check_tx_in_chain(job.justice_txid, parent='Responder',
tx_label='justice tx')
# If both transactions are there, we only need to update the justice tx confirmation count
if justice_in_chain:
if debug:
logging.info("[Responder] updating confirmation count for {}: prev. {}, current {}".format(
logging.info("[Responder] updating confirmation count for {}: prev. {}, current {}".format(
job.justice_txid, job.confirmations, justice_confirmations))
job.confirmations = justice_confirmations
@@ -258,9 +235,7 @@ class Responder:
# DISCUSS: Adding job back, should we flag it as retried?
# FIXME: Whether we decide to increase the retried counter or not, the current counter should be
# maintained. There is no way of doing so with the current approach. Update if required
self.add_response(uuid, job.dispute_txid, job.justice_txid, job.justice_rawtx,
job.appointment_end,
debug, logging)
self.add_response(uuid, job.dispute_txid, job.justice_txid, job.justice_rawtx, job.appointment_end)
else:
# ToDo: #24-properly-handle-reorgs