mirror of
https://github.com/aljazceru/python-teos.git
synced 2025-12-18 14:44:21 +01:00
refactors project structure
This commit is contained in:
8
pisa/TODO.md
Normal file
8
pisa/TODO.md
Normal file
@@ -0,0 +1,8 @@
|
||||
- Start jobs according to the start time, jobs are now started when they are received
|
||||
- Add DB
|
||||
- Store jobs in DB until start time?
|
||||
- Handle failures in the underlying system (i.e. bitcoind crashes)
|
||||
- Add checks related with OP_CSV in justice tx and dispute_delta provided once the blob is decrypted
|
||||
- Do not accept new appointments if the locator has already been used
|
||||
- <s> Check all the interactions with core, figure out the edge cases and error codes i.e: The justice transaction can already be in the blockchain the first time we push it <s>
|
||||
- <s> Handle reconnection with ZMQ in case of broken pipe. The current version of the code fails if it does happen <s>
|
||||
2
pisa/__init__.py
Normal file
2
pisa/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
HOST = 'localhost'
|
||||
PORT = 9814
|
||||
134
pisa/api.py
Normal file
134
pisa/api.py
Normal file
@@ -0,0 +1,134 @@
|
||||
from pisa import *
|
||||
from pisa.watcher import Watcher
|
||||
from pisa.inspector import Inspector
|
||||
from pisa.appointment import Appointment
|
||||
from flask import Flask, request, Response, abort, jsonify
|
||||
import json
|
||||
|
||||
|
||||
# FIXME: HERE FOR TESTING (get_block_count). REMOVE WHEN REMOVING THE FUNCTION
|
||||
from pisa.utils.authproxy import AuthServiceProxy
|
||||
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT
|
||||
|
||||
# ToDo: #5-add-async-to-api
|
||||
app = Flask(__name__)
|
||||
HTTP_OK = 200
|
||||
HTTP_BAD_REQUEST = 400
|
||||
|
||||
|
||||
@app.route('/', methods=['POST'])
|
||||
def add_appointment():
|
||||
remote_addr = request.environ.get('REMOTE_ADDR')
|
||||
remote_port = request.environ.get('REMOTE_PORT')
|
||||
|
||||
if debug:
|
||||
logging.info('[API] connection accepted from {}:{}'.format(remote_addr, remote_port))
|
||||
|
||||
# Check content type once if properly defined
|
||||
request_data = json.loads(request.get_json())
|
||||
appointment = inspector.inspect(request_data)
|
||||
|
||||
if type(appointment) == Appointment:
|
||||
appointment_added = watcher.add_appointment(appointment, debug, logging)
|
||||
rcode = HTTP_OK
|
||||
|
||||
# FIXME: Response should be signed receipt (created and signed by the API)
|
||||
if appointment_added:
|
||||
response = "appointment accepted. locator: {}".format(appointment.locator)
|
||||
else:
|
||||
response = "appointment rejected"
|
||||
# FIXME: change the response code maybe?
|
||||
|
||||
elif type(appointment) == tuple:
|
||||
rcode = HTTP_BAD_REQUEST
|
||||
response = "appointment rejected. Error {}: {}".format(appointment[0], appointment[1])
|
||||
else:
|
||||
|
||||
rcode = HTTP_BAD_REQUEST
|
||||
response = "appointment rejected. Request does not match the standard"
|
||||
|
||||
# Send response back. Change multiprocessing.connection for an http based connection
|
||||
if debug:
|
||||
logging.info('[API] sending response and disconnecting: {} --> {}:{}'.format(response, remote_addr,
|
||||
remote_port))
|
||||
|
||||
return Response(response, status=rcode, mimetype='text/plain')
|
||||
|
||||
|
||||
# FIXME: THE NEXT THREE API ENDPOINTS ARE FOR TESTING AND SHOULD BE REMOVED / PROPERLY MANAGED BEFORE PRODUCTION!
|
||||
@app.route('/get_appointment', methods=['GET'])
|
||||
def get_appointment():
|
||||
locator = request.args.get('locator')
|
||||
response = []
|
||||
|
||||
appointment_in_watcher = watcher.appointments.get(locator)
|
||||
|
||||
if appointment_in_watcher:
|
||||
for appointment in appointment_in_watcher:
|
||||
appointment_data = appointment.to_json()
|
||||
appointment_data['status'] = "being_watched"
|
||||
response.append(appointment_data)
|
||||
|
||||
if watcher.responder:
|
||||
responder_jobs = watcher.responder.jobs
|
||||
|
||||
for job_id, job in responder_jobs.items():
|
||||
if job.locator == locator:
|
||||
job_data = job.to_json()
|
||||
job_data['status'] = "dispute_responded"
|
||||
job_data['confirmations'] = watcher.responder.confirmation_counter.get(job_id)
|
||||
response.append(job_data)
|
||||
|
||||
if not response:
|
||||
response.append({"locator": locator, "status": "not found"})
|
||||
|
||||
response = jsonify(response)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@app.route('/get_all_appointments', methods=['GET'])
|
||||
def get_all_appointments():
|
||||
watcher_appointments = []
|
||||
responder_jobs = []
|
||||
|
||||
if request.remote_addr in request.host or request.remote_addr == '127.0.0.1':
|
||||
for app_id, appointment in watcher.appointments.items():
|
||||
jobs_data = [job.to_json() for job in appointment]
|
||||
|
||||
watcher_appointments.append({app_id: jobs_data})
|
||||
|
||||
if watcher.responder:
|
||||
for job_id, job in watcher.responder.jobs.items():
|
||||
job_data = job.to_json()
|
||||
job_data['confirmations'] = watcher.responder.confirmation_counter.get(job_id)
|
||||
responder_jobs.append({job_id: job_data})
|
||||
|
||||
response = jsonify({"watcher_appointments": watcher_appointments, "responder_jobs": responder_jobs})
|
||||
|
||||
else:
|
||||
abort(404)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@app.route('/get_block_count', methods=['GET'])
|
||||
def get_block_count():
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST,
|
||||
BTC_RPC_PORT))
|
||||
|
||||
return jsonify({"block_count": bitcoin_cli.getblockcount()})
|
||||
|
||||
|
||||
def start_api(d, l):
|
||||
# FIXME: Pretty ugly but I haven't found a proper way to pass it to add_appointment
|
||||
global debug, logging, watcher, inspector
|
||||
debug = d
|
||||
logging = l
|
||||
watcher = Watcher()
|
||||
inspector = Inspector(debug, logging)
|
||||
|
||||
# Setting Flask log t ERROR only so it does not mess with out logging
|
||||
logging.getLogger('werkzeug').setLevel(logging.ERROR)
|
||||
|
||||
app.run(host=HOST, port=PORT)
|
||||
25
pisa/appointment.py
Normal file
25
pisa/appointment.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from pisa.encrypted_blob import EncryptedBlob
|
||||
|
||||
|
||||
# Basic appointment structure
|
||||
class Appointment:
|
||||
def __init__(self, locator, start_time, end_time, dispute_delta, encrypted_blob, cipher, hash_function):
|
||||
self.locator = locator
|
||||
self.start_time = start_time # ToDo: #4-standardize-appointment-fields
|
||||
self.end_time = end_time # ToDo: #4-standardize-appointment-fields
|
||||
self.dispute_delta = dispute_delta
|
||||
self.encrypted_blob = EncryptedBlob(encrypted_blob)
|
||||
self.cipher = cipher
|
||||
self.hash_function = hash_function
|
||||
|
||||
def to_json(self):
|
||||
appointment = {"locator": self.locator, "start_time": self.start_time, "end_time": self.end_time,
|
||||
"dispute_delta": self.dispute_delta, "encrypted_blob": self.encrypted_blob.data,
|
||||
"cipher": self.cipher, "hash_function": self.hash_function}
|
||||
|
||||
return appointment
|
||||
|
||||
# ToDO: #3-improve-appointment-strcuture
|
||||
|
||||
|
||||
|
||||
30
pisa/encrypted_blob.py
Normal file
30
pisa/encrypted_blob.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from binascii import unhexlify, hexlify
|
||||
from hashlib import sha256
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
|
||||
class EncryptedBlob:
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def decrypt(self, key, debug, logging):
|
||||
# master_key = H(tx_id | tx_id)
|
||||
master_key = sha256(key + key).digest()
|
||||
|
||||
# The 16 MSB of the master key will serve as the AES GCM 128 secret key. The 16 LSB will serve as the IV.
|
||||
sk = master_key[:16]
|
||||
nonce = master_key[16:]
|
||||
|
||||
if debug:
|
||||
logging.info("[Watcher] creating new blob")
|
||||
logging.info("[Watcher] master key: {}".format(hexlify(master_key).decode()))
|
||||
logging.info("[Watcher] sk: {}".format(hexlify(sk).decode()))
|
||||
logging.info("[Watcher] nonce: {}".format(hexlify(nonce).decode()))
|
||||
logging.info("[Watcher] encrypted_blob: {}".format(self.data))
|
||||
|
||||
# Decrypt
|
||||
aesgcm = AESGCM(sk)
|
||||
data = unhexlify(self.data.encode())
|
||||
raw_tx = aesgcm.decrypt(nonce=nonce, data=data, associated_data=None)
|
||||
|
||||
return raw_tx
|
||||
13
pisa/errors.py
Normal file
13
pisa/errors.py
Normal file
@@ -0,0 +1,13 @@
|
||||
# Appointment errors
|
||||
APPOINTMENT_EMPTY_FIELD = -1
|
||||
APPOINTMENT_WRONG_FIELD_TYPE = -2
|
||||
APPOINTMENT_WRONG_FIELD_SIZE = -3
|
||||
APPOINTMENT_WRONG_FIELD_FORMAT = -4
|
||||
APPOINTMENT_FIELD_TOO_SMALL = -5
|
||||
APPOINTMENT_FIELD_TOO_BIG = -6
|
||||
APPOINTMENT_WRONG_FIELD = -7
|
||||
APPOINTMENT_CIPHER_NOT_SUPPORTED = -8
|
||||
APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED = -9
|
||||
|
||||
|
||||
|
||||
211
pisa/inspector.py
Normal file
211
pisa/inspector.py
Normal file
@@ -0,0 +1,211 @@
|
||||
import re
|
||||
from pisa.appointment import Appointment
|
||||
from pisa import errors
|
||||
from pisa.utils.authproxy import AuthServiceProxy, JSONRPCException
|
||||
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MIN_DISPUTE_DELTA, \
|
||||
SUPPORTED_CIPHERS, SUPPORTED_HASH_FUNCTIONS
|
||||
|
||||
|
||||
class Inspector:
|
||||
def __init__(self, debug=False, logging=None):
|
||||
self.debug = debug
|
||||
self.logging = logging
|
||||
|
||||
def inspect(self, data):
|
||||
locator = data.get('locator')
|
||||
start_time = data.get('start_time')
|
||||
end_time = data.get('end_time')
|
||||
dispute_delta = data.get('dispute_delta')
|
||||
encrypted_blob = data.get('encrypted_blob')
|
||||
cipher = data.get('cipher')
|
||||
hash_function = data.get('hash_function')
|
||||
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST,
|
||||
BTC_RPC_PORT))
|
||||
try:
|
||||
block_height = bitcoin_cli.getblockcount()
|
||||
|
||||
rcode, message = self.check_locator(locator)
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_start_time(start_time, block_height)
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_end_time(end_time, start_time, block_height)
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_delta(dispute_delta)
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_blob(encrypted_blob)
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_cipher(cipher)
|
||||
if rcode == 0:
|
||||
rcode, message = self.check_hash_function(hash_function)
|
||||
|
||||
if rcode == 0:
|
||||
r = Appointment(locator, start_time, end_time, dispute_delta, encrypted_blob, cipher, hash_function)
|
||||
else:
|
||||
r = (rcode, message)
|
||||
|
||||
except JSONRPCException as e:
|
||||
if self.debug:
|
||||
self.logging.error("[Inspector] JSONRPCException. Error code {}".format(e))
|
||||
|
||||
return r
|
||||
|
||||
def check_locator(self, locator):
|
||||
message = None
|
||||
rcode = 0
|
||||
|
||||
if locator is None:
|
||||
rcode = errors.APPOINTMENT_EMPTY_FIELD
|
||||
message = "empty locator received"
|
||||
elif type(locator) != str:
|
||||
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE
|
||||
message = "wrong locator data type ({})".format(type(locator))
|
||||
elif len(locator) != 64:
|
||||
rcode = errors.APPOINTMENT_WRONG_FIELD_SIZE
|
||||
message = "wrong locator size ({})".format(len(locator))
|
||||
# TODO: Check this regexp
|
||||
elif re.search(r'^[0-9A-Fa-f]+$', locator) is None:
|
||||
rcode = errors.APPOINTMENT_WRONG_FIELD_FORMAT
|
||||
message = "wrong locator format ({})".format(locator)
|
||||
|
||||
if self.debug and message:
|
||||
self.logging.error("[Inspector] {}".format(message))
|
||||
|
||||
return rcode, message
|
||||
|
||||
def check_start_time(self, start_time, block_height):
|
||||
message = None
|
||||
rcode = 0
|
||||
|
||||
t = type(start_time)
|
||||
|
||||
if start_time is None:
|
||||
rcode = errors.APPOINTMENT_EMPTY_FIELD
|
||||
message = "empty start_time received"
|
||||
elif t != int:
|
||||
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE
|
||||
message = "wrong start_time data type ({})".format(t)
|
||||
elif start_time <= block_height:
|
||||
rcode = errors.APPOINTMENT_FIELD_TOO_SMALL
|
||||
if start_time < block_height:
|
||||
message = "start_time is in the past"
|
||||
else:
|
||||
message = "start_time too close to current height"
|
||||
|
||||
if self.debug and message:
|
||||
self.logging.error("[Inspector] {}".format(message))
|
||||
|
||||
return rcode, message
|
||||
|
||||
def check_end_time(self, end_time, start_time, block_height):
|
||||
message = None
|
||||
rcode = 0
|
||||
|
||||
t = type(end_time)
|
||||
|
||||
if end_time is None:
|
||||
rcode = errors.APPOINTMENT_EMPTY_FIELD
|
||||
message = "empty end_time received"
|
||||
elif t != int:
|
||||
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE
|
||||
message = "wrong end_time data type ({})".format(t)
|
||||
elif start_time >= end_time:
|
||||
rcode = errors.APPOINTMENT_FIELD_TOO_SMALL
|
||||
if start_time > end_time:
|
||||
message = "end_time is smaller than start_time"
|
||||
else:
|
||||
message = "end_time is equal to start_time"
|
||||
elif block_height > end_time:
|
||||
rcode = errors.APPOINTMENT_FIELD_TOO_SMALL
|
||||
message = 'end_time is in the past'
|
||||
|
||||
if self.debug and message:
|
||||
self.logging.error("[Inspector] {}".format(message))
|
||||
|
||||
return rcode, message
|
||||
|
||||
def check_delta(self, dispute_delta):
|
||||
message = None
|
||||
rcode = 0
|
||||
|
||||
t = type(dispute_delta)
|
||||
|
||||
if dispute_delta is None:
|
||||
rcode = errors.APPOINTMENT_EMPTY_FIELD
|
||||
message = "empty dispute_delta received"
|
||||
elif t != int:
|
||||
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE
|
||||
message = "wrong dispute_delta data type ({})".format(t)
|
||||
elif dispute_delta < MIN_DISPUTE_DELTA:
|
||||
rcode = errors.APPOINTMENT_FIELD_TOO_SMALL
|
||||
message = "dispute delta too small. The dispute delta should be at least {} (current: {})".format(
|
||||
MIN_DISPUTE_DELTA, dispute_delta)
|
||||
|
||||
if self.debug and message:
|
||||
self.logging.error("[Inspector] {}".format(message))
|
||||
|
||||
return rcode, message
|
||||
|
||||
# ToDo: #5-define-checks-encrypted-blob
|
||||
def check_blob(self, encrypted_blob):
|
||||
message = None
|
||||
rcode = 0
|
||||
|
||||
t = type(encrypted_blob)
|
||||
|
||||
if encrypted_blob is None:
|
||||
rcode = errors.APPOINTMENT_EMPTY_FIELD
|
||||
message = "empty encrypted_blob received"
|
||||
elif t != str:
|
||||
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE
|
||||
message = "wrong encrypted_blob data type ({})".format(t)
|
||||
elif encrypted_blob == '':
|
||||
# ToDo: #5 We may want to define this to be at least as long as one block of the cipher we are using
|
||||
rcode = errors.APPOINTMENT_WRONG_FIELD
|
||||
message = "wrong encrypted_blob"
|
||||
if self.debug and message:
|
||||
self.logging.error("[Inspector] {}".format(message))
|
||||
|
||||
return rcode, message
|
||||
|
||||
def check_cipher(self, cipher):
|
||||
message = None
|
||||
rcode = 0
|
||||
|
||||
t = type(cipher)
|
||||
|
||||
if cipher is None:
|
||||
rcode = errors.APPOINTMENT_EMPTY_FIELD
|
||||
message = "empty cipher received"
|
||||
elif t != str:
|
||||
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE
|
||||
message = "wrong cipher data type ({})".format(t)
|
||||
elif cipher not in SUPPORTED_CIPHERS:
|
||||
rcode = errors.APPOINTMENT_CIPHER_NOT_SUPPORTED
|
||||
message = "cipher not supported: {}".format(cipher)
|
||||
|
||||
if self.debug and message:
|
||||
self.logging.error("[Inspector] {}".format(message))
|
||||
|
||||
return rcode, message
|
||||
|
||||
def check_hash_function(self, hash_function):
|
||||
message = None
|
||||
rcode = 0
|
||||
|
||||
t = type(hash_function)
|
||||
|
||||
if hash_function is None:
|
||||
rcode = errors.APPOINTMENT_EMPTY_FIELD
|
||||
message = "empty hash_function received"
|
||||
elif t != str:
|
||||
rcode = errors.APPOINTMENT_WRONG_FIELD_TYPE
|
||||
message = "wrong hash_function data type ({})".format(t)
|
||||
elif hash_function not in SUPPORTED_HASH_FUNCTIONS:
|
||||
rcode = errors.APPOINTMENT_HASH_FUNCTION_NOT_SUPPORTED
|
||||
message = "hash_function not supported {}".format(hash_function)
|
||||
|
||||
if self.debug and message:
|
||||
self.logging.error("[Inspector] {}".format(message))
|
||||
|
||||
return rcode, message
|
||||
35
pisa/pisad.py
Normal file
35
pisa/pisad.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import logging
|
||||
from sys import argv
|
||||
from getopt import getopt
|
||||
from threading import Thread
|
||||
from pisa.api import start_api
|
||||
from pisa.tools import can_connect_to_bitcoind, in_correct_network
|
||||
from pisa.utils.authproxy import AuthServiceProxy
|
||||
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, BTC_NETWORK, SERVER_LOG_FILE
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
debug = False
|
||||
opts, _ = getopt(argv[1:], 'd', ['debug'])
|
||||
for opt, arg in opts:
|
||||
if opt in ['-d', '--debug']:
|
||||
debug = True
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[
|
||||
logging.FileHandler(SERVER_LOG_FILE),
|
||||
logging.StreamHandler()
|
||||
])
|
||||
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST,
|
||||
BTC_RPC_PORT))
|
||||
|
||||
if can_connect_to_bitcoind(bitcoin_cli):
|
||||
if in_correct_network(bitcoin_cli, BTC_NETWORK):
|
||||
api_thread = Thread(target=start_api, args=[debug, logging])
|
||||
api_thread.start()
|
||||
else:
|
||||
logging.error("[Pisad] bitcoind is running on a different network, check conf.py and bitcoin.conf. "
|
||||
"Shutting down")
|
||||
else:
|
||||
logging.error("[Pisad] can't connect to bitcoind. Shutting down")
|
||||
4
pisa/requirements.txt
Normal file
4
pisa/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
zmq
|
||||
flask
|
||||
cryptography
|
||||
requests
|
||||
229
pisa/responder.py
Normal file
229
pisa/responder.py
Normal file
@@ -0,0 +1,229 @@
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
from hashlib import sha256
|
||||
from binascii import unhexlify
|
||||
from pisa.zmq_subscriber import ZMQHandler
|
||||
from pisa.rpc_errors import *
|
||||
from pisa.tools import check_tx_in_chain
|
||||
from pisa.utils.authproxy import AuthServiceProxy, JSONRPCException
|
||||
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT
|
||||
|
||||
CONFIRMATIONS_BEFORE_RETRY = 6
|
||||
MIN_CONFIRMATIONS = 6
|
||||
|
||||
|
||||
class Job:
|
||||
def __init__(self, dispute_txid, justice_rawtx, appointment_end, retry_counter=0):
|
||||
self.dispute_txid = dispute_txid
|
||||
# FIXME: locator is here so we can give info about jobs for now. It can be either passed from watcher or info
|
||||
# can be directly got from DB
|
||||
self.locator = sha256(unhexlify(dispute_txid)).hexdigest()
|
||||
self.justice_rawtx = justice_rawtx
|
||||
self.appointment_end = appointment_end
|
||||
self.missed_confirmations = 0
|
||||
self.retry_counter = retry_counter
|
||||
|
||||
def to_json(self):
|
||||
job = {"locator": self.dispute_txid, "justice_rawtx": self.justice_rawtx,
|
||||
"appointment_end": self.appointment_end}
|
||||
|
||||
return job
|
||||
|
||||
|
||||
class Responder:
|
||||
def __init__(self):
|
||||
self.jobs = dict()
|
||||
self.confirmation_counter = dict()
|
||||
self.block_queue = None
|
||||
self.asleep = True
|
||||
self.zmq_subscriber = None
|
||||
|
||||
def do_subscribe(self, block_queue, debug, logging):
|
||||
self.zmq_subscriber = ZMQHandler(parent='Responder')
|
||||
self.zmq_subscriber.handle(block_queue, debug, logging)
|
||||
|
||||
def create_job(self, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging, conf_counter=0,
|
||||
retry=False):
|
||||
# DISCUSS: Check what to do if the retry counter gets too big
|
||||
if retry:
|
||||
self.jobs[justice_txid].retry_counter += 1
|
||||
self.jobs[justice_txid].missed_confirmations = 0
|
||||
else:
|
||||
self.confirmation_counter[justice_txid] = conf_counter
|
||||
self.jobs[justice_txid] = Job(dispute_txid, justice_rawtx, appointment_end)
|
||||
|
||||
if debug:
|
||||
logging.info('[Responder] new job added (dispute txid = {}, justice txid = {}, appointment end = {})'.
|
||||
format(dispute_txid, justice_txid, appointment_end))
|
||||
|
||||
if self.asleep:
|
||||
self.asleep = False
|
||||
self.block_queue = Queue()
|
||||
zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue, debug, logging])
|
||||
responder = Thread(target=self.handle_responses, args=[debug, logging])
|
||||
zmq_thread.start()
|
||||
responder.start()
|
||||
|
||||
def add_response(self, dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging, retry=False):
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST,
|
||||
BTC_RPC_PORT))
|
||||
try:
|
||||
if debug:
|
||||
if self.asleep:
|
||||
logging.info("[Responder] waking up!")
|
||||
logging.info("[Responder] pushing transaction to the network (txid: {})".format(justice_txid))
|
||||
|
||||
bitcoin_cli.sendrawtransaction(justice_rawtx)
|
||||
|
||||
# handle_responses can call add_response recursively if a broadcast transaction does not get confirmations
|
||||
# retry holds such information.
|
||||
self.create_job(dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging, retry=retry)
|
||||
|
||||
except JSONRPCException as e:
|
||||
# Since we're pushing a raw transaction to the network we can get two kind of rejections:
|
||||
# RPC_VERIFY_REJECTED and RPC_VERIFY_ALREADY_IN_CHAIN. The former implies that the transaction is rejected
|
||||
# due to network rules, whereas the later implies that the transaction is already in the blockchain.
|
||||
if e.error.get('code') == RPC_VERIFY_REJECTED:
|
||||
# DISCUSS: what to do in this case
|
||||
pass
|
||||
elif e.error.get('code') == RPC_VERIFY_ALREADY_IN_CHAIN:
|
||||
try:
|
||||
if debug:
|
||||
logging.info("[Responder] {} is already in the blockchain. Getting the confirmation count and "
|
||||
"start monitoring the transaction".format(justice_txid))
|
||||
|
||||
# If the transaction is already in the chain, we get the number of confirmations and watch the job
|
||||
# until the end of the appointment
|
||||
tx_info = bitcoin_cli.getrawtransaction(justice_txid, 1)
|
||||
confirmations = int(tx_info.get("confirmations"))
|
||||
self.create_job(dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging,
|
||||
retry=retry, conf_counter=confirmations)
|
||||
|
||||
except JSONRPCException as e:
|
||||
# While it's quite unlikely, the transaction that was already in the blockchain could have been
|
||||
# reorged while we were querying bitcoind to get the confirmation count. in such a case we just
|
||||
# restart the job
|
||||
if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY:
|
||||
self.add_response(dispute_txid, justice_txid, justice_rawtx, appointment_end, debug, logging,
|
||||
retry=retry)
|
||||
elif debug:
|
||||
# If something else happens (unlikely but possible) log it so we can treat it in future releases
|
||||
logging.error("[Responder] JSONRPCException. Error code {}".format(e))
|
||||
elif debug:
|
||||
# If something else happens (unlikely but possible) log it so we can treat it in future releases
|
||||
logging.error("[Responder] JSONRPCException. Error code {}".format(e))
|
||||
|
||||
def handle_responses(self, debug, logging):
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST,
|
||||
BTC_RPC_PORT))
|
||||
prev_block_hash = 0
|
||||
while len(self.jobs) > 0:
|
||||
# We get notified for every new received block
|
||||
block_hash = self.block_queue.get()
|
||||
|
||||
try:
|
||||
block = bitcoin_cli.getblock(block_hash)
|
||||
txs = block.get('tx')
|
||||
height = block.get('height')
|
||||
|
||||
if debug:
|
||||
logging.info("[Responder] new block received {}".format(block_hash))
|
||||
logging.info("[Responder] prev. block hash {}".format(block.get('previousblockhash')))
|
||||
logging.info("[Responder] list of transactions: {}".format(txs))
|
||||
|
||||
except JSONRPCException as e:
|
||||
if debug:
|
||||
logging.error("[Responder] couldn't get block from bitcoind. Error code {}".format(e))
|
||||
|
||||
continue
|
||||
|
||||
jobs_to_delete = []
|
||||
if prev_block_hash == block.get('previousblockhash') or prev_block_hash == 0:
|
||||
# Keep count of the confirmations each tx gets
|
||||
for job_id, confirmations in self.confirmation_counter.items():
|
||||
# If we see the transaction for the first time, or MIN_CONFIRMATIONS hasn't been reached
|
||||
if job_id in txs or (0 < confirmations):
|
||||
self.confirmation_counter[job_id] += 1
|
||||
|
||||
if debug:
|
||||
logging.info("[Responder] new confirmation received for txid = {}".format(job_id))
|
||||
|
||||
elif self.jobs[job_id].missed_confirmations >= CONFIRMATIONS_BEFORE_RETRY:
|
||||
# If a transactions has missed too many confirmations for a while we'll try to rebroadcast
|
||||
# DISCUSS: How many confirmations before retry
|
||||
# DISCUSS: recursion vs setting confirmations to 0 and rebroadcast here
|
||||
# DISCUSS: how many max retries and what to do if the cap is reached
|
||||
self.add_response(self.jobs[job_id].dispute_txid, job_id, self.jobs[job_id].justice_rawtx,
|
||||
self.jobs[job_id].appointment_end, debug, logging, retry=True)
|
||||
if debug:
|
||||
logging.warning("[Responder] txid = {} has missed {} confirmations. Rebroadcasting"
|
||||
.format(job_id, CONFIRMATIONS_BEFORE_RETRY))
|
||||
else:
|
||||
# Otherwise we increase the number of missed confirmations
|
||||
self.jobs[job_id].missed_confirmations += 1
|
||||
|
||||
for job_id, job in self.jobs.items():
|
||||
if job.appointment_end <= height and self.confirmation_counter[job_id] >= MIN_CONFIRMATIONS:
|
||||
# The end of the appointment has been reached
|
||||
jobs_to_delete.append(job_id)
|
||||
|
||||
for job_id in jobs_to_delete:
|
||||
# Trying to delete directly when iterating the last for causes dictionary changed size error during
|
||||
# iteration in Python3 (can not be solved iterating only trough keys in Python3 either)
|
||||
|
||||
if debug:
|
||||
logging.info("[Responder] {} completed. Appointment ended at block {} after {} confirmations"
|
||||
.format(job_id, height, self.confirmation_counter[job_id]))
|
||||
|
||||
# ToDo: record job in DB
|
||||
del self.jobs[job_id]
|
||||
del self.confirmation_counter[job_id]
|
||||
|
||||
else:
|
||||
if debug:
|
||||
logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}"
|
||||
.format(prev_block_hash, block.get('previousblockhash')))
|
||||
|
||||
self.handle_reorgs(bitcoin_cli, debug, logging)
|
||||
|
||||
prev_block_hash = block.get('hash')
|
||||
|
||||
# Go back to sleep if there are no more jobs
|
||||
self.asleep = True
|
||||
self.zmq_subscriber.terminate = True
|
||||
|
||||
if debug:
|
||||
logging.info("[Responder] no more pending jobs, going back to sleep")
|
||||
|
||||
def handle_reorgs(self, bitcoin_cli, debug, logging):
|
||||
for job_id, job in self.jobs.items():
|
||||
# First we check if the dispute transaction is still in the blockchain. If not, the justice can not be
|
||||
# there either, so we'll need to call the reorg manager straight away
|
||||
dispute_in_chain, _ = check_tx_in_chain(bitcoin_cli, job.dispute_txid, debug, logging, parent='Responder',
|
||||
tx_label='dispute')
|
||||
|
||||
# If the dispute is there, we can check the justice tx
|
||||
if dispute_in_chain:
|
||||
justice_in_chain, justice_confirmations = check_tx_in_chain(bitcoin_cli, job_id, debug, logging,
|
||||
parent='Responder', tx_label='dispute')
|
||||
|
||||
# If both transactions are there, we only need to update the justice tx confirmation count
|
||||
if justice_in_chain:
|
||||
if debug:
|
||||
logging.info("[Responder] updating confirmation count for {}: prev. {}, current {}".format(
|
||||
job_id, self.confirmation_counter[job_id], justice_confirmations))
|
||||
self.confirmation_counter[job_id] = justice_confirmations
|
||||
if debug:
|
||||
logging.info("[Responder] no more pending jobs, going back to sleep")
|
||||
|
||||
else:
|
||||
# Otherwise, we will add the job back (implying rebroadcast of the tx) and monitor it again
|
||||
# DISCUSS: Adding job back, should we flag it as retried?
|
||||
self.add_response(job.dispute_txid, job_id, job.justice_rawtx, job.appointment_end, debug, logging)
|
||||
|
||||
else:
|
||||
# FIXME: if the dispute is not on chain (either in mempool or not there al all), we need to call the
|
||||
# reorg manager
|
||||
logging.warning("[Responder] dispute and justice transaction missing. Calling the reorg manager")
|
||||
logging.error("[Responder] reorg manager not yet implemented")
|
||||
pass
|
||||
42
pisa/rpc_errors.py
Normal file
42
pisa/rpc_errors.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# Ported from https://github.com/bitcoin/bitcoin/blob/0.18/src/rpc/protocol.h
|
||||
|
||||
# General application defined errors
|
||||
RPC_MISC_ERROR = -1 # std::exception thrown in command handling
|
||||
RPC_TYPE_ERROR = -3 # Unexpected type was passed as parameter
|
||||
RPC_INVALID_ADDRESS_OR_KEY = -5 # Invalid address or key
|
||||
RPC_OUT_OF_MEMORY = -7 # Ran out of memory during operation
|
||||
RPC_INVALID_PARAMETER = -8 # Invalid missing or duplicate parameter
|
||||
RPC_DATABASE_ERROR = -20 # Database error
|
||||
RPC_DESERIALIZATION_ERROR = -22 # Error parsing or validating structure in raw format
|
||||
RPC_VERIFY_ERROR = -25 # General error during transaction or block submission
|
||||
RPC_VERIFY_REJECTED = -26 # Transaction or block was rejected by network rules
|
||||
RPC_VERIFY_ALREADY_IN_CHAIN = -27 # Transaction already in chain
|
||||
RPC_IN_WARMUP = -28 # Client still warming up
|
||||
RPC_METHOD_DEPRECATED = -32 # RPC method is deprecated
|
||||
|
||||
# Aliases for backward compatibility
|
||||
RPC_TRANSACTION_ERROR = RPC_VERIFY_ERROR
|
||||
RPC_TRANSACTION_REJECTED = RPC_VERIFY_REJECTED
|
||||
RPC_TRANSACTION_ALREADY_IN_CHAIN= RPC_VERIFY_ALREADY_IN_CHAIN
|
||||
|
||||
# P2P client errors
|
||||
RPC_CLIENT_NOT_CONNECTED = -9 # Bitcoin is not connected
|
||||
RPC_CLIENT_IN_INITIAL_DOWNLOAD = -10 # Still downloading initial blocks
|
||||
RPC_CLIENT_NODE_ALREADY_ADDED = -23 # Node is already added
|
||||
RPC_CLIENT_NODE_NOT_ADDED = -24 # Node has not been added before
|
||||
RPC_CLIENT_NODE_NOT_CONNECTED = -29 # Node to disconnect not found in connected nodes
|
||||
RPC_CLIENT_INVALID_IP_OR_SUBNET = -30 # Invalid IP/Subnet
|
||||
RPC_CLIENT_P2P_DISABLED = -31 # No valid connection manager instance found
|
||||
|
||||
# Wallet errors
|
||||
RPC_WALLET_ERROR = -4 # Unspecified problem with wallet (key not found etc.)
|
||||
RPC_WALLET_INSUFFICIENT_FUNDS = -6 # Not enough funds in wallet or account
|
||||
RPC_WALLET_INVALID_LABEL_NAME = -11 # Invalid label name
|
||||
RPC_WALLET_KEYPOOL_RAN_OUT = -12 # Keypool ran out call keypoolrefill first
|
||||
RPC_WALLET_UNLOCK_NEEDED = -13 # Enter the wallet passphrase with walletpassphrase first
|
||||
RPC_WALLET_PASSPHRASE_INCORRECT = -14 # The wallet passphrase entered was incorrect
|
||||
RPC_WALLET_WRONG_ENC_STATE = -15 # Command given in wrong wallet encryption state (encrypting an encrypted wallet etc.)
|
||||
RPC_WALLET_ENCRYPTION_FAILED = -16 # Failed to encrypt the wallet
|
||||
RPC_WALLET_ALREADY_UNLOCKED = -17 # Wallet is already unlocked
|
||||
RPC_WALLET_NOT_FOUND = -18 # Invalid wallet specified
|
||||
RPC_WALLET_NOT_SPECIFIED = -19 # No wallet specified (error when there are multiple wallets loaded)
|
||||
26
pisa/sample_conf.py
Normal file
26
pisa/sample_conf.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# bitcoind
|
||||
BTC_RPC_USER = None
|
||||
BTC_RPC_PASSWD = None
|
||||
BTC_RPC_HOST = None
|
||||
BTC_RPC_PORT = None
|
||||
BTC_NETWORK = None
|
||||
|
||||
|
||||
# ZMQ
|
||||
FEED_PROTOCOL = None
|
||||
FEED_ADDR = None
|
||||
FEED_PORT = None
|
||||
|
||||
# PISA
|
||||
MAX_APPOINTMENTS = 100
|
||||
EXPIRY_DELTA = 6
|
||||
MIN_DISPUTE_DELTA = 20
|
||||
SERVER_LOG_FILE = 'pisa.log'
|
||||
DB_PATH = 'appointments/'
|
||||
|
||||
# PISA-CLI
|
||||
CLIENT_LOG_FILE = 'pisa.log'
|
||||
|
||||
# CRYPTO
|
||||
SUPPORTED_HASH_FUNCTIONS = ["SHA256"]
|
||||
SUPPORTED_CIPHERS = ["AES-GCM-128"]
|
||||
57
pisa/tools.py
Normal file
57
pisa/tools.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from pisa.utils.authproxy import JSONRPCException
|
||||
from pisa.rpc_errors import RPC_INVALID_ADDRESS_OR_KEY
|
||||
from http.client import HTTPException
|
||||
|
||||
|
||||
def check_tx_in_chain(bitcoin_cli, tx_id, debug, logging, parent='', tx_label='transaction'):
|
||||
tx_in_chain = False
|
||||
confirmations = 0
|
||||
|
||||
try:
|
||||
tx_info = bitcoin_cli.getrawtransaction(tx_id, 1)
|
||||
|
||||
if tx_info.get("confirmations"):
|
||||
confirmations = int(tx_info.get("confirmations"))
|
||||
tx_in_chain = True
|
||||
if debug:
|
||||
logging.error("[{}] {} found in the blockchain (txid: {}) ".format(parent, tx_label, tx_id))
|
||||
elif debug:
|
||||
logging.error("[{}] {} found in mempool (txid: {}) ".format(parent, tx_label, tx_id))
|
||||
except JSONRPCException as e:
|
||||
if e.error.get('code') == RPC_INVALID_ADDRESS_OR_KEY:
|
||||
if debug:
|
||||
logging.error("[{}] {} not found in mempool nor blockchain (txid: {}) ".format(parent, tx_label, tx_id))
|
||||
elif debug:
|
||||
# ToDO: Unhandled errors, check this properly
|
||||
logging.error("[{}] JSONRPCException. Error code {}".format(parent, e))
|
||||
|
||||
return tx_in_chain, confirmations
|
||||
|
||||
|
||||
def can_connect_to_bitcoind(bitcoin_cli):
|
||||
can_connect = True
|
||||
|
||||
try:
|
||||
bitcoin_cli.help()
|
||||
except (ConnectionRefusedError, JSONRPCException, HTTPException):
|
||||
can_connect = False
|
||||
|
||||
return can_connect
|
||||
|
||||
|
||||
def in_correct_network(bitcoin_cli, network):
|
||||
mainnet_genesis_block_hash = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
|
||||
testnet3_genesis_block_hash = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"
|
||||
correct_network = False
|
||||
|
||||
genesis_block_hash = bitcoin_cli.getblockhash(0)
|
||||
|
||||
if network == 'mainnet' and genesis_block_hash == mainnet_genesis_block_hash:
|
||||
correct_network = True
|
||||
elif network == 'testnet' and genesis_block_hash == testnet3_genesis_block_hash:
|
||||
correct_network = True
|
||||
elif network == 'regtest' and genesis_block_hash not in [mainnet_genesis_block_hash, testnet3_genesis_block_hash]:
|
||||
correct_network = True
|
||||
|
||||
return correct_network
|
||||
|
||||
0
pisa/utils/__init__.py
Normal file
0
pisa/utils/__init__.py
Normal file
211
pisa/utils/authproxy.py
Normal file
211
pisa/utils/authproxy.py
Normal file
@@ -0,0 +1,211 @@
|
||||
# Copyright (c) 2011 Jeff Garzik
|
||||
#
|
||||
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
|
||||
#
|
||||
# Copyright (c) 2007 Jan-Klaas Kollhof
|
||||
#
|
||||
# This file is part of jsonrpc.
|
||||
#
|
||||
# jsonrpc is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License as published by
|
||||
# the Free Software Foundation; either version 2.1 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this software; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
"""HTTP proxy for opening RPC connection to bitcoind.
|
||||
|
||||
AuthServiceProxy has the following improvements over python-jsonrpc's
|
||||
ServiceProxy class:
|
||||
|
||||
- HTTP connections persist for the life of the AuthServiceProxy object
|
||||
(if server supports HTTP/1.1)
|
||||
- sends protocol 'version', per JSON-RPC 1.1
|
||||
- sends proper, incrementing 'id'
|
||||
- sends Basic HTTP authentication headers
|
||||
- parses all JSON numbers that look like floats as Decimal
|
||||
- uses standard Python json lib
|
||||
"""
|
||||
|
||||
# bitcoin_rpc auth proxy does not handle broken pipes. Using Bitcoin Core's one which is more complete.
|
||||
# Taken from https://github.com/bitcoin/bitcoin/blob/master/test/functional/test_framework/authproxy.py
|
||||
|
||||
import base64
|
||||
import decimal
|
||||
from http import HTTPStatus
|
||||
import http.client
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
import urllib.parse
|
||||
|
||||
HTTP_TIMEOUT = 30
|
||||
USER_AGENT = "AuthServiceProxy/0.1"
|
||||
|
||||
log = logging.getLogger("BitcoinRPC")
|
||||
|
||||
|
||||
class JSONRPCException(Exception):
|
||||
def __init__(self, rpc_error, http_status=None):
|
||||
try:
|
||||
errmsg = '%(message)s (%(code)i)' % rpc_error
|
||||
except (KeyError, TypeError):
|
||||
errmsg = ''
|
||||
super().__init__(errmsg)
|
||||
self.error = rpc_error
|
||||
self.http_status = http_status
|
||||
|
||||
|
||||
def EncodeDecimal(o):
|
||||
if isinstance(o, decimal.Decimal):
|
||||
return str(o)
|
||||
raise TypeError(repr(o) + " is not JSON serializable")
|
||||
|
||||
|
||||
class AuthServiceProxy():
|
||||
__id_count = 0
|
||||
|
||||
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
|
||||
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
|
||||
self.__service_url = service_url
|
||||
self._service_name = service_name
|
||||
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
|
||||
self.__url = urllib.parse.urlparse(service_url)
|
||||
user = None if self.__url.username is None else self.__url.username.encode('utf8')
|
||||
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
|
||||
authpair = user + b':' + passwd
|
||||
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
|
||||
self.timeout = timeout
|
||||
self._set_conn(connection)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name.startswith('__') and name.endswith('__'):
|
||||
# Python internal stuff
|
||||
raise AttributeError
|
||||
if self._service_name is not None:
|
||||
name = "%s.%s" % (self._service_name, name)
|
||||
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
|
||||
|
||||
def _request(self, method, path, postdata):
|
||||
'''
|
||||
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
|
||||
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
|
||||
'''
|
||||
headers = {'Host': self.__url.hostname,
|
||||
'User-Agent': USER_AGENT,
|
||||
'Authorization': self.__auth_header,
|
||||
'Content-type': 'application/json'}
|
||||
if os.name == 'nt':
|
||||
# Windows somehow does not like to re-use connections
|
||||
# TODO: Find out why the connection would disconnect occasionally and make it reusable on Windows
|
||||
self._set_conn()
|
||||
try:
|
||||
self.__conn.request(method, path, postdata, headers)
|
||||
return self._get_response()
|
||||
except http.client.BadStatusLine as e:
|
||||
if e.line == "''": # if connection was closed, try again
|
||||
self.__conn.close()
|
||||
self.__conn.request(method, path, postdata, headers)
|
||||
return self._get_response()
|
||||
else:
|
||||
raise
|
||||
except (BrokenPipeError, ConnectionResetError):
|
||||
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
|
||||
# ConnectionResetError happens on FreeBSD with Python 3.4
|
||||
self.__conn.close()
|
||||
self.__conn.request(method, path, postdata, headers)
|
||||
return self._get_response()
|
||||
|
||||
def get_request(self, *args, **argsn):
|
||||
AuthServiceProxy.__id_count += 1
|
||||
|
||||
log.debug("-{}-> {} {}".format(
|
||||
AuthServiceProxy.__id_count,
|
||||
self._service_name,
|
||||
json.dumps(args or argsn, default=EncodeDecimal, ensure_ascii=self.ensure_ascii),
|
||||
))
|
||||
if args and argsn:
|
||||
raise ValueError('Cannot handle both named and positional arguments')
|
||||
return {'version': '1.1',
|
||||
'method': self._service_name,
|
||||
'params': args or argsn,
|
||||
'id': AuthServiceProxy.__id_count}
|
||||
|
||||
def __call__(self, *args, **argsn):
|
||||
postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
|
||||
response, status = self._request('POST', self.__url.path, postdata.encode('utf-8'))
|
||||
if response['error'] is not None:
|
||||
raise JSONRPCException(response['error'], status)
|
||||
elif 'result' not in response:
|
||||
raise JSONRPCException({
|
||||
'code': -343, 'message': 'missing JSON-RPC result'}, status)
|
||||
elif status != HTTPStatus.OK:
|
||||
raise JSONRPCException({
|
||||
'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
|
||||
else:
|
||||
return response['result']
|
||||
|
||||
def batch(self, rpc_call_list):
|
||||
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
|
||||
log.debug("--> " + postdata)
|
||||
response, status = self._request('POST', self.__url.path, postdata.encode('utf-8'))
|
||||
if status != HTTPStatus.OK:
|
||||
raise JSONRPCException({
|
||||
'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
|
||||
return response
|
||||
|
||||
def _get_response(self):
|
||||
req_start_time = time.time()
|
||||
try:
|
||||
http_response = self.__conn.getresponse()
|
||||
except socket.timeout:
|
||||
raise JSONRPCException({
|
||||
'code': -344,
|
||||
'message': '%r RPC took longer than %f seconds. Consider '
|
||||
'using larger timeout for calls that take '
|
||||
'longer to return.' % (self._service_name,
|
||||
self.__conn.timeout)})
|
||||
if http_response is None:
|
||||
raise JSONRPCException({
|
||||
'code': -342, 'message': 'missing HTTP response from server'})
|
||||
|
||||
content_type = http_response.getheader('Content-Type')
|
||||
if content_type != 'application/json':
|
||||
raise JSONRPCException(
|
||||
{'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (
|
||||
http_response.status, http_response.reason)},
|
||||
http_response.status)
|
||||
|
||||
responsedata = http_response.read().decode('utf8')
|
||||
response = json.loads(responsedata, parse_float=decimal.Decimal)
|
||||
elapsed = time.time() - req_start_time
|
||||
if "error" in response and response["error"] is None:
|
||||
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed,
|
||||
json.dumps(response["result"], default=EncodeDecimal,
|
||||
ensure_ascii=self.ensure_ascii)))
|
||||
else:
|
||||
log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
|
||||
return response, http_response.status
|
||||
|
||||
def __truediv__(self, relative_uri):
|
||||
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name,
|
||||
connection=self.__conn)
|
||||
|
||||
def _set_conn(self, connection=None):
|
||||
port = 80 if self.__url.port is None else self.__url.port
|
||||
if connection:
|
||||
self.__conn = connection
|
||||
self.timeout = connection.timeout
|
||||
elif self.__url.scheme == 'https':
|
||||
self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=self.timeout)
|
||||
else:
|
||||
self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=self.timeout)
|
||||
177
pisa/watcher.py
Normal file
177
pisa/watcher.py
Normal file
@@ -0,0 +1,177 @@
|
||||
from binascii import hexlify, unhexlify
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
from pisa.responder import Responder
|
||||
from pisa.zmq_subscriber import ZMQHandler
|
||||
from pisa.utils.authproxy import AuthServiceProxy, JSONRPCException
|
||||
from hashlib import sha256
|
||||
from pisa.conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT, MAX_APPOINTMENTS, EXPIRY_DELTA
|
||||
|
||||
|
||||
class Watcher:
|
||||
def __init__(self, max_appointments=MAX_APPOINTMENTS):
|
||||
self.appointments = dict()
|
||||
self.block_queue = None
|
||||
self.asleep = True
|
||||
self.max_appointments = max_appointments
|
||||
self.zmq_subscriber = None
|
||||
self.responder = Responder()
|
||||
|
||||
def add_appointment(self, appointment, debug, logging):
|
||||
# DISCUSS: about validation of input data
|
||||
|
||||
# Rationale:
|
||||
# The Watcher will analyze every received block looking for appointment matches. If there is no work
|
||||
# to do the watcher can go sleep (if appointments = {} then asleep = True) otherwise for every received block
|
||||
# the watcher will get the list of transactions and compare it with the list of appointments.
|
||||
# If the watcher is awake, every new appointment will just be added to the appointment list until
|
||||
# max_appointments is reached.
|
||||
|
||||
if len(self.appointments) < self.max_appointments:
|
||||
# Appointments are identified by the locator: the sha256 of commitment txid (H(tx_id)).
|
||||
# Two different nodes may ask for appointments using the same commitment txid, what will result in a
|
||||
# collision in our appointments structure (and may be an attack surface), we use lists to avoid that.
|
||||
if not self.appointments.get(appointment.locator):
|
||||
self.appointments[appointment.locator] = []
|
||||
|
||||
self.appointments[appointment.locator].append(appointment)
|
||||
|
||||
if self.asleep:
|
||||
self.asleep = False
|
||||
self.block_queue = Queue()
|
||||
zmq_thread = Thread(target=self.do_subscribe, args=[self.block_queue, debug, logging])
|
||||
watcher = Thread(target=self.do_watch, args=[debug, logging])
|
||||
zmq_thread.start()
|
||||
watcher.start()
|
||||
|
||||
if debug:
|
||||
logging.info("[Watcher] waking up!")
|
||||
|
||||
appointment_added = True
|
||||
|
||||
if debug:
|
||||
logging.info('[Watcher] new appointment accepted (locator = {})'.format(appointment.locator))
|
||||
|
||||
else:
|
||||
appointment_added = False
|
||||
|
||||
if debug:
|
||||
logging.info('[Watcher] maximum appointments reached, appointment rejected (locator = {})'
|
||||
.format(appointment.locator))
|
||||
|
||||
return appointment_added
|
||||
|
||||
def do_subscribe(self, block_queue, debug, logging):
|
||||
self.zmq_subscriber = ZMQHandler(parent='Watcher')
|
||||
self.zmq_subscriber.handle(block_queue, debug, logging)
|
||||
|
||||
def do_watch(self, debug, logging):
|
||||
bitcoin_cli = AuthServiceProxy("http://%s:%s@%s:%d" % (BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST,
|
||||
BTC_RPC_PORT))
|
||||
|
||||
while len(self.appointments) > 0:
|
||||
block_hash = self.block_queue.get()
|
||||
|
||||
try:
|
||||
block = bitcoin_cli.getblock(block_hash)
|
||||
txids = block.get('tx')
|
||||
|
||||
potential_locators = {sha256(unhexlify(txid)).hexdigest(): txid for txid in txids}
|
||||
|
||||
if debug:
|
||||
logging.info("[Watcher] new block received {}".format(block_hash))
|
||||
logging.info("[Watcher] list of transactions: {}".format(txids))
|
||||
|
||||
# Delete expired appointments
|
||||
to_delete = {}
|
||||
for locator in self.appointments:
|
||||
for appointment in self.appointments[locator]:
|
||||
if block["height"] > appointment.end_time + EXPIRY_DELTA:
|
||||
# Get the appointment index and add the appointment to the deletion list
|
||||
appointment_pos = self.appointments[locator].index(appointment)
|
||||
|
||||
if locator in to_delete:
|
||||
to_delete[locator].append(appointment_pos)
|
||||
else:
|
||||
to_delete[locator] = [appointment_pos]
|
||||
|
||||
for locator, indexes in to_delete.items():
|
||||
if len(indexes) == len(self.appointments[locator]):
|
||||
if debug:
|
||||
logging.info("[Watcher] end time reached with no match! Deleting appointment {}"
|
||||
.format(locator))
|
||||
|
||||
del self.appointments[locator]
|
||||
else:
|
||||
for i in indexes:
|
||||
if debug:
|
||||
logging.info("[Watcher] end time reached with no match! Deleting appointment {}:{}"
|
||||
.format(locator, i))
|
||||
|
||||
del self.appointments[locator][i]
|
||||
|
||||
# Check is any of the tx_ids in the received block is an actual match
|
||||
potential_matches = {}
|
||||
for locator in self.appointments.keys():
|
||||
if locator in potential_locators:
|
||||
# This is locator:txid
|
||||
potential_matches[locator] = potential_locators[locator]
|
||||
|
||||
if debug:
|
||||
if len(potential_matches) > 0:
|
||||
logging.info("[Watcher] list of potential matches: {}".format(potential_matches))
|
||||
else:
|
||||
logging.info("[Watcher] no potential matches found")
|
||||
|
||||
matches = self.check_potential_matches(potential_matches, bitcoin_cli, debug, logging)
|
||||
|
||||
for locator, appointment_pos, dispute_txid, justice_txid, justice_rawtx in matches:
|
||||
if debug:
|
||||
logging.info("[Watcher] notifying responder about {} and deleting appointment {}:{}".format(
|
||||
justice_txid, locator, appointment_pos))
|
||||
|
||||
self.responder.add_response(dispute_txid, justice_txid, justice_rawtx,
|
||||
self.appointments[locator][appointment_pos].end_time, debug, logging)
|
||||
|
||||
# If there was only one appointment that matches the locator we can delete the whole list
|
||||
# DISCUSS: We may want to use locks before adding / removing appointment
|
||||
if len(self.appointments[locator]) == 1:
|
||||
del self.appointments[locator]
|
||||
else:
|
||||
# Otherwise we just delete the appointment that matches locator:appointment_pos
|
||||
del self.appointments[locator][appointment_pos]
|
||||
|
||||
except JSONRPCException as e:
|
||||
if debug:
|
||||
logging.error("[Watcher] JSONRPCException. Error code {}".format(e))
|
||||
continue
|
||||
|
||||
# Go back to sleep if there are no more appointments
|
||||
self.asleep = True
|
||||
self.zmq_subscriber.terminate = True
|
||||
|
||||
if debug:
|
||||
logging.error("[Watcher] no more pending appointments, going back to sleep")
|
||||
|
||||
def check_potential_matches(self, potential_matches, bitcoin_cli, debug, logging):
|
||||
matches = []
|
||||
|
||||
for locator, dispute_txid in potential_matches.items():
|
||||
for appointment_pos, appointment in enumerate(self.appointments.get(locator)):
|
||||
try:
|
||||
justice_rawtx = appointment.encrypted_blob.decrypt(unhexlify(dispute_txid), debug, logging)
|
||||
justice_rawtx = hexlify(justice_rawtx).decode()
|
||||
justice_txid = bitcoin_cli.decoderawtransaction(justice_rawtx).get('txid')
|
||||
matches.append((locator, appointment_pos, dispute_txid, justice_txid, justice_rawtx))
|
||||
|
||||
if debug:
|
||||
logging.info("[Watcher] match found for {}:{}! {}".format(locator, appointment_pos,
|
||||
justice_txid))
|
||||
except JSONRPCException as e:
|
||||
# Tx decode failed returns error code -22, maybe we should be more strict here. Leaving it simple
|
||||
# for the POC
|
||||
if debug:
|
||||
logging.error("[Watcher] can't build transaction from decoded data. Error code {}".format(e))
|
||||
continue
|
||||
|
||||
return matches
|
||||
31
pisa/zmq_subscriber.py
Normal file
31
pisa/zmq_subscriber.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import zmq
|
||||
import binascii
|
||||
from pisa.conf import FEED_PROTOCOL, FEED_ADDR, FEED_PORT
|
||||
|
||||
# ToDo: #7-add-async-back-to-zmq
|
||||
class ZMQHandler:
|
||||
""" Adapted from https://github.com/bitcoin/bitcoin/blob/master/contrib/zmq/zmq_sub.py"""
|
||||
def __init__(self, parent):
|
||||
self.zmqContext = zmq.Context()
|
||||
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
|
||||
self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0)
|
||||
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
|
||||
self.zmqSubSocket.connect("%s://%s:%s" % (FEED_PROTOCOL, FEED_ADDR, FEED_PORT))
|
||||
self.parent = parent
|
||||
self.terminate = False
|
||||
|
||||
def handle(self, block_queue, debug, logging):
|
||||
while not self.terminate:
|
||||
msg = self.zmqSubSocket.recv_multipart()
|
||||
|
||||
# Terminate could have been set wile the thread was blocked in recv
|
||||
if not self.terminate:
|
||||
topic = msg[0]
|
||||
body = msg[1]
|
||||
|
||||
if topic == b"hashblock":
|
||||
block_hash = binascii.hexlify(body).decode('UTF-8')
|
||||
block_queue.put(block_hash)
|
||||
|
||||
if debug:
|
||||
logging.info("[ZMQHandler-{}] new block received via ZMQ".format(self.parent, block_hash))
|
||||
Reference in New Issue
Block a user