Fixes several bugs related with the encrypted blob

Uses a proper KDF to extend the LSB of the txid (HKDF). Standardizes the representation of the keys / hashes in both sides (encrypt/decrypt deals with bytes now instead of string). Adds further logging.
This commit is contained in:
Sergi Delgado Segura
2019-06-06 15:44:30 +01:00
parent 552a3622ac
commit 9aea751307
6 changed files with 100 additions and 54 deletions

View File

@@ -1,9 +1,12 @@
import hashlib
from binascii import hexlify
from binascii import hexlify, unhexlify
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
SUPPORTED_HASH_FUNCTIONS = ['SHA256']
SUPPORTED_CYPHERS = ['AES-GCM-128']
SUPPORTED_HASH_FUNCTIONS = ["SHA256"]
SUPPORTED_CYPHERS = ["AES-GCM-128"]
SALT = "lightningwatcher"
class Blob:
@@ -14,23 +17,32 @@ class Blob:
# FIXME: We only support SHA256 for now
if self.hash_function.upper() not in SUPPORTED_HASH_FUNCTIONS:
raise Exception('Hash function not supported ({}). Supported Hash functions: {}'
raise Exception("Hash function not supported ({}). Supported Hash functions: {}"
.format(self.hash_function, SUPPORTED_HASH_FUNCTIONS))
# FIXME: We only support SHA256 for now
if self.cypher.upper() not in SUPPORTED_CYPHERS:
raise Exception('Cypher not supported ({}). Supported cyphers: {}'.format(self.hash_function,
raise Exception("Cypher not supported ({}). Supported cyphers: {}".format(self.hash_function,
SUPPORTED_CYPHERS))
def encrypt(self, tx_id):
def encrypt(self, tx_id, debug, logging):
# Transaction to be encrypted
# FIXME: The blob data should contain more things that just the transaction. Leaving like this for now.
tx = self.data.encode()
tx = unhexlify(self.data)
# FIXME: tx_id should not be necessary (can be derived from tx SegWit-like). Passing it for now
# Extend the key using SHA256 as a KDF
tx_id = tx_id.encode()
extended_key = hashlib.sha256(tx_id[:16]).digest()
# Extend the key using HKDF
tx_id = unhexlify(tx_id)
hkdf = HKDF(
algorithm=hashes.SHA256(),
length=32,
salt=SALT.encode(),
info=None,
backend=default_backend()
)
extended_key = hkdf.derive(tx_id[16:])
# The 16 MSB of the extended key will serve as the AES GCM 128 secret key. The 16 LSB will serve as the IV.
sk = extended_key[:16]
@@ -38,6 +50,14 @@ class Blob:
# Encrypt the data
aesgcm = AESGCM(sk)
encrypted_blob = hexlify(aesgcm.encrypt(nonce=nonce, data=tx, associated_data=None)).decode()
encrypted_blob = aesgcm.encrypt(nonce=nonce, data=tx, associated_data=None)
encrypted_blob = hexlify(encrypted_blob).decode()
if debug:
logging.info("[Client] creating new blob")
logging.info("[Client] master key: {}".format(hexlify(tx_id[16:]).decode()))
logging.info("[Client] sk: {}".format(hexlify(sk).decode()))
logging.info("[Client] nonce: {}".format(hexlify(nonce).decode()))
logging.info("[Client] encrypted_blob: {}".format(encrypted_blob))
return encrypted_blob

View File

@@ -14,8 +14,8 @@ from apps import PISA_API_SERVER, PISA_API_PORT
commands = ['add_appointment']
def build_appointment(tx, tx_id, start_block, end_block, dispute_delta):
locator = tx_id[:16]
def build_appointment(tx, tx_id, start_block, end_block, dispute_delta, debug, logging):
locator = tx_id[:32]
cipher = "AES-GCM-128"
hash_function = "SHA256"
@@ -24,7 +24,7 @@ def build_appointment(tx, tx_id, start_block, end_block, dispute_delta):
blob = Blob(tx, cipher, hash_function)
# FIXME: tx_id should not be necessary (can be derived from tx SegWit-like). Passing it for now
encrypted_blob = blob.encrypt(tx_id)
encrypted_blob = blob.encrypt(tx_id, debug, logging)
appointment = {"locator": locator, "start_block": start_block, "end_block": end_block,
"dispute_delta": dispute_delta, "encrypted_blob": encrypted_blob, "cipher": cipher, "hash_function":
@@ -48,7 +48,22 @@ def show_usage():
if __name__ == '__main__':
opts, args = getopt(argv[1:], '', commands)
debug = False
command = None
opts, args = getopt(argv[1:], 'a:d', ['add_appointment, debug'])
for opt, arg in opts:
if opt in ['-a', '--add_appointment']:
if arg:
if not os.path.isfile(arg):
raise Exception("Can't find file " + arg)
else:
command = 'add_appointment'
json_file = arg
else:
raise Exception("Path to appointment_data.json missing.")
if opt in ['-d', '--debug']:
debug = True
# Configure logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO, handlers=[
@@ -56,33 +71,24 @@ if __name__ == '__main__':
logging.StreamHandler()
])
# Get args
if len(args) > 0:
command = args[0]
else:
raise Exception("Argument missing. Use help for usage information.")
if command in commands:
if command in commands:
if len(args) != 2:
raise Exception("Path to appointment_data.json missing.")
if not os.path.isfile(args[1]):
raise Exception("Can't find file " + args[1])
appointment_data = json.load(open(args[1]))
if command == 'add_appointment':
appointment_data = json.load(open(json_file))
valid_locator = check_txid_format(appointment_data.get('tx_id'))
if valid_locator:
pisa_url = "http://{}:{}".format(PISA_API_SERVER, PISA_API_PORT)
appointment = build_appointment(appointment_data.get('tx'), appointment_data.get('tx_id'),
appointment_data.get('start_time'), appointment_data.get('end_time'),
appointment_data.get('dispute_delta'))
appointment_data.get('dispute_delta'), debug, logging)
if debug:
logging.info("[Client] sending appointment to PISA")
r = requests.post(url=pisa_url, json=json.dumps(appointment))
logging.info("[Client] {} (code: {})".format(r.text, r.status_code))
if debug:
logging.info("[Client] {} (code: {})".format(r.text, r.status_code))
else:
raise ValueError("The provided locator is not valid.")

View File

@@ -27,14 +27,14 @@ def add_appointment():
# FIXME: Response should be signed receipt (created and signed by the API)
if appointment_added:
response = "Appointment accepted"
response = "appointment accepted"
else:
response = "Appointment rejected"
response = "appointment rejected"
# FIXME: change the response code maybe?
else:
rcode = HTTP_BAD_REQUEST
response = "Appointment rejected. Request does not match the standard"
response = "appointment rejected. Request does not match the standard"
# Send response back. Change multiprocessing.connection for an http based connection
if debug:

View File

@@ -1,23 +1,42 @@
import hashlib
from binascii import unhexlify
from binascii import unhexlify, hexlify
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
SALT = 'lightningwatcher'
class EncryptedBlob:
def __init__(self, data):
self.data = data
def decrypt(self, key):
# Extend the key using SHA256 as a KDF
extended_key = hashlib.sha256(key).digest()
def decrypt(self, key, debug, logging):
# Extend the key using HKDF
hkdf = HKDF(
algorithm=hashes.SHA256(),
length=32,
salt=SALT.encode(),
info=None,
backend=default_backend()
)
extended_key = hkdf.derive(key)
# The 16 MSB of the extended key will serve as the AES GCM 128 secret key. The 16 LSB will serve as the IV.
sk = extended_key[:16]
nonce = extended_key[16:]
if debug:
logging.info("[Watcher] creating new blob")
logging.info("[Watcher] master key: {}".format(hexlify(key).decode()))
logging.info("[Watcher] sk: {}".format(hexlify(sk).decode()))
logging.info("[Watcher] nonce: {}".format(hexlify(nonce).decode()))
logging.info("[Watcher] encrypted_blob: {}".format(self.data))
# Decrypt
aesgcm = AESGCM(sk)
data = unhexlify(self.data.encode)
data = unhexlify(self.data.encode())
raw_tx = aesgcm.decrypt(nonce=nonce, data=data, associated_data=None)
return raw_tx

View File

@@ -5,7 +5,6 @@ from pisa.errors import *
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from conf import BTC_RPC_USER, BTC_RPC_PASSWD, BTC_RPC_HOST, BTC_RPC_PORT
CONFIRMATIONS_BEFORE_RETRY = 6
MIN_CONFIRMATIONS = 6
@@ -74,7 +73,7 @@ class Responder:
try:
if debug:
logging.info("[Responder] {} is already in the blockchain. Getting the confirmation count"
"and start monitoring the transaction".format(txid))
"and start monitoring the transaction".format(txid))
# If the transaction is already in the chain, we get the number of confirmations and watch the job
# until the end of the appointment
@@ -140,7 +139,7 @@ class Responder:
self.jobs[job_id].appointment_end, debug, logging, retry=True)
if debug:
logging.warning("[Responder] txid = {} has missed {} confirmations. Rebroadcasting"
.format(job_id, CONFIRMATIONS_BEFORE_RETRY))
.format(job_id, CONFIRMATIONS_BEFORE_RETRY))
else:
# Otherwise we increase the number of missed confirmations
self.jobs[job_id].missed_confirmations += 1
@@ -165,7 +164,7 @@ class Responder:
else:
if debug:
logging.warning("[Responder] reorg found! local prev. block id = {}, remote prev. block id = {}"
.format(prev_block_hash, block.get('previousblockhash')))
.format(prev_block_hash, block.get('previousblockhash')))
self.handle_reorgs(bitcoin_cli, debug, logging)
@@ -176,7 +175,7 @@ class Responder:
self.zmq_subscriber.terminate = True
if debug:
logging.info("[Responder] no more pending jobs, going back to sleep.")
logging.info("[Responder] no more pending jobs, going back to sleep")
def handle_reorgs(self, bitcoin_cli, debug, logging):
for job_id, job in self.jobs.items():
@@ -197,7 +196,7 @@ class Responder:
# FIXME: It should be safe but check Exception code anyway
if debug:
logging.warning("[Responder] dispute transaction (txid = {}) not found either!"
.format(job.dispute_txid))
.format(job.dispute_txid))
# ToDO: Dispute transaction is not there either, call reorg manager
pass

View File

@@ -1,3 +1,4 @@
from binascii import hexlify, unhexlify
from queue import Queue
from threading import Thread
from pisa.responder import Responder
@@ -54,7 +55,7 @@ class Watcher:
appointment_added = False
if debug:
logging.info('[Watcher] maximum appointments reached, appointment rejected (locator = {}).'
logging.info('[Watcher] maximum appointments reached, appointment rejected (locator = {})'
.format(appointment.locator))
return appointment_added
@@ -72,16 +73,16 @@ class Watcher:
try:
block = bitcoin_cli.getblock(block_hash)
txs = block.get('tx')
tx_ids = block.get('tx')
if debug:
logging.info("[Watcher] new block received {}".format(block_hash))
logging.info("[Watcher] list of transactions: {}".format(txs))
logging.info("[Watcher] list of transactions: {}".format(tx_ids))
potential_matches = []
for locator in self.appointments.keys():
potential_matches += [(locator, tx[32:]) for tx in txs if tx.startswith(locator)]
potential_matches += [(locator, txid[32:]) for txid in tx_ids if txid.startswith(locator)]
if debug:
if len(potential_matches) > 0:
@@ -117,7 +118,7 @@ class Watcher:
self.zmq_subscriber.terminate = True
if debug:
logging.error("[Watcher] no more pending appointments, going back to sleep.")
logging.error("[Watcher] no more pending appointments, going back to sleep")
def check_potential_matches(self, potential_matches, bitcoin_cli, debug, logging):
matches = []
@@ -126,7 +127,8 @@ class Watcher:
for appointment_pos, appointment in enumerate(self.appointments.get(locator)):
try:
dispute_txid = locator + k
raw_tx = appointment.encrypted_blob.decrypt(k)
raw_tx = appointment.encrypted_blob.decrypt(unhexlify(k), debug, logging)
raw_tx = hexlify(raw_tx).decode()
txid = bitcoin_cli.decoderawtransaction(raw_tx).get('txid')
matches.append((locator, appointment_pos, dispute_txid, txid, raw_tx))