mirror of
https://github.com/aljazceru/mutiny-net.git
synced 2025-12-17 05:54:22 +01:00
init commit of 24.0.1 custom signet image
This commit is contained in:
65
Dockerfile
Normal file
65
Dockerfile
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
FROM debian:buster-slim as builder
|
||||||
|
|
||||||
|
ARG BITCOIN_VERSION=${BITCOIN_VERSION:-24.0.1}
|
||||||
|
ARG TRIPLET=${TRIPLET:-"x86_64-linux-gnu"}
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -qq --no-install-recommends ca-certificates dirmngr gosu wget libc6 procps python3
|
||||||
|
WORKDIR /tmp
|
||||||
|
|
||||||
|
# install bitcoin binaries
|
||||||
|
RUN BITCOIN_URL="https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/bitcoin-${BITCOIN_VERSION}-${TRIPLET}.tar.gz" && \
|
||||||
|
BITCOIN_FILE="bitcoin-${BITCOIN_VERSION}-${TRIPLET}.tar.gz" && \
|
||||||
|
wget -qO "${BITCOIN_FILE}" "${BITCOIN_URL}" && \
|
||||||
|
mkdir -p bin && \
|
||||||
|
tar -xzvf "${BITCOIN_FILE}" -C /tmp/bin --strip-components=2 "bitcoin-${BITCOIN_VERSION}/bin/bitcoin-cli" "bitcoin-${BITCOIN_VERSION}/bin/bitcoind" "bitcoin-${BITCOIN_VERSION}/bin/bitcoin-wallet" "bitcoin-${BITCOIN_VERSION}/bin/bitcoin-util"
|
||||||
|
FROM debian:buster-slim as custom-signet-bitcoin
|
||||||
|
|
||||||
|
LABEL org.opencontainers.image.authors="NBD"
|
||||||
|
LABEL org.opencontainers.image.licenses=MIT
|
||||||
|
LABEL org.opencontainers.image.source="https://github.com/nbd-wtf/bitcoin_signet"
|
||||||
|
|
||||||
|
ENV BITCOIN_DIR /root/.bitcoin
|
||||||
|
|
||||||
|
ENV NBITS=${NBITS}
|
||||||
|
ENV SIGNETCHALLENGE=${SIGNETCHALLENGE}
|
||||||
|
ENV PRIVKEY=${PRIVKEY}
|
||||||
|
|
||||||
|
ENV RPCUSER=${RPCUSER:-"bitcoin"}
|
||||||
|
ENV RPCPASSWORD=${RPCPASSWORD:-"bitcoin"}
|
||||||
|
ENV COOKIEFILE=${COOKIEFILE:-"false"}
|
||||||
|
ENV ONIONPROXY=${ONIONPROXY:-""}
|
||||||
|
ENV TORPASSWORD=${TORPASSWORD:-""}
|
||||||
|
ENV TORCONTROL=${TORCONTROL:-""}
|
||||||
|
ENV I2PSAM=${I2PSAM:-""}
|
||||||
|
|
||||||
|
ENV UACOMMENT=${UACOMMENT:-"CustomSignet"}
|
||||||
|
ENV ZMQPUBRAWBLOCK=${ZMQPUBRAWBLOCK:-"tcp://0.0.0.0:28332"}
|
||||||
|
ENV ZMQPUBRAWTX=${ZMQPUBRAWTX:-"tcp://0.0.0.0:28333"}
|
||||||
|
ENV ZMQPUBHASHBLOCK=${ZMQPUBHASHBLOCK:-"tcp://0.0.0.0:28334"}
|
||||||
|
|
||||||
|
ENV RPCBIND=${RPCBIND:-"0.0.0.0:38332"}
|
||||||
|
ENV RPCALLOWIP=${RPCALLOWIP:-"0.0.0.0/0"}
|
||||||
|
ENV WHITELIST=${WHITELIST:-"0.0.0.0/0"}
|
||||||
|
ENV ADDNODE=${ADDNODE:-""}
|
||||||
|
ENV BLOCKPRODUCTIONDELAY=${BLOCKPRODUCTIONDELAY:-""}
|
||||||
|
ENV MINERENABLED=${MINERENABLED:-"1"}
|
||||||
|
ENV MINETO=${MINETO:-""}
|
||||||
|
ENV EXTERNAL_IP=${EXTERNAL_IP:-""}
|
||||||
|
|
||||||
|
VOLUME $BITCOIN_DIR
|
||||||
|
EXPOSE 28332 28333 28334 38332 38333 38334
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -qq --no-install-recommends procps python3 python3-pip jq && \
|
||||||
|
apt-get clean
|
||||||
|
COPY --from=builder "/tmp/bin" /usr/local/bin
|
||||||
|
COPY docker-entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||||
|
COPY miner_imports /usr/local/bin
|
||||||
|
COPY miner /usr/local/bin/miner
|
||||||
|
COPY *.sh /usr/local/bin/
|
||||||
|
COPY rpcauth.py /usr/local/bin/rpcauth.py
|
||||||
|
RUN pip3 install setuptools
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||||
|
|
||||||
|
CMD ["run.sh"]
|
||||||
35
README.md
35
README.md
@@ -1,2 +1,33 @@
|
|||||||
# bitcoin_signet
|
# Bitcoin Signet Docker Image
|
||||||
Generic Signet Dockerfiles
|
## ENV Variables
|
||||||
|
|
||||||
|
* `BLOCKPRODUCTIONDELAY` - default sleep period between mining blocks (**mining mode only**)
|
||||||
|
* if ~/.bitcoin/BLOCKPRODUCTIONDELAY.txt is present will use this value, allowing the delay to be dynamically changed.
|
||||||
|
* `MINERENABLED` - flag for enabling mining chain
|
||||||
|
* `NBITS` - sets min difficulty in mining (**mining mode only**)
|
||||||
|
* `PRIVKEY` - private key of signet signer (**mining mode only**)
|
||||||
|
* if `MINERENABLED=1` and not provided will generate this
|
||||||
|
* `MINETO` - mine to a static address, if not provided will make new address for each block (**mining mode only**)
|
||||||
|
* `SIGNETCHALLENGE` - sets the valid block producer for this signet
|
||||||
|
* if `MINERENABLED=1` and not provided will generate this, if provded PRIVKEY also must be populated
|
||||||
|
* Requied for client-mode
|
||||||
|
*
|
||||||
|
* `RPCUSER` - bitcoind RPC User
|
||||||
|
* `RPCPASSWORD` - bitcoind RPC password
|
||||||
|
*
|
||||||
|
* `ONIONPROXY` - tor SOCK5 endpoint
|
||||||
|
* `TORPASSWORD` - tor control port password
|
||||||
|
* `TORCONTROL` - tor control port endpoint
|
||||||
|
* `I2PSAM` - I2P control endpoint
|
||||||
|
* `UACOMMENT` - UA Comment which would show on bitcoin-cli -netinfo printout
|
||||||
|
*
|
||||||
|
* `ZMQPUBRAWBLOCK` - bitcoind setting
|
||||||
|
* `ZMQPUBRAWTX` - bitcoind setting
|
||||||
|
* `ZMQPUBHASHBLOCK` - bitcoind setting
|
||||||
|
*
|
||||||
|
* `RPCBIND` - bitcoind setting
|
||||||
|
* `RPCALLOWIP` - bitcoind setting
|
||||||
|
* `WHITELIST` - bitcoind setting
|
||||||
|
* `ADDNODE` - add seeding node location, comma-separate for multiple nodes (needed for client-mode)
|
||||||
|
* `EXTERNAL_IP` - add public IP/onion endpoint information, comma-seperated for multiple IPs.
|
||||||
|
|
||||||
|
|||||||
28
docker-entrypoint.sh
Executable file
28
docker-entrypoint.sh
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
shutdown_gracefully(){
|
||||||
|
|
||||||
|
echo "Container is shutting down, lets make sure bitcoind flushes the db."
|
||||||
|
bitcoin-cli stop
|
||||||
|
sleep 5
|
||||||
|
}
|
||||||
|
trap shutdown_gracefully SIGTERM SIGHUP SIGQUIT SIGINT
|
||||||
|
|
||||||
|
mkdir -p "${BITCOIN_DIR}"
|
||||||
|
# check if this is first run if so run init if config
|
||||||
|
if [[ ! -f "${BITCOIN_DIR}/install_done" ]]; then
|
||||||
|
echo "install_done file not found, running install.sh."
|
||||||
|
install.sh #this is config based on args passed into mining node or peer.
|
||||||
|
else
|
||||||
|
echo "install_done file exists, skipping setup process."
|
||||||
|
echo "rewrite bitcoin.conf"
|
||||||
|
gen-bitcoind-conf.sh >~/.bitcoin/bitcoin.conf
|
||||||
|
fi
|
||||||
|
|
||||||
|
$@ &
|
||||||
|
echo "Infinate loop"
|
||||||
|
while true
|
||||||
|
do
|
||||||
|
tail -f /dev/null & wait ${!}
|
||||||
|
done
|
||||||
63
gen-bitcoind-conf.sh
Executable file
63
gen-bitcoind-conf.sh
Executable file
@@ -0,0 +1,63 @@
|
|||||||
|
SIGNETCHALLENGE=${SIGNETCHALLENGE:-$(cat ~/.bitcoin/SIGNETCHALLENGE.txt)}
|
||||||
|
|
||||||
|
RPCAUTH=$(/usr/local/bin/rpcauth.py $RPCUSER $RPCPASSWORD | tr -d '\n')
|
||||||
|
echo "signet=1"
|
||||||
|
|
||||||
|
if [[ "$COOKIEFILE" == "true" ]]; then
|
||||||
|
echo "rpccookiefile=/root/.bitcoin/.cookie
|
||||||
|
rpcauth=$RPCAUTH"
|
||||||
|
else
|
||||||
|
echo "rpcauth=$RPCAUTH
|
||||||
|
rpcuser=$RPCUSER
|
||||||
|
rpcpassword=$RPCPASSWORD"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "txindex=1
|
||||||
|
blockfilterindex=1
|
||||||
|
peerblockfilters=1
|
||||||
|
coinstatsindex=1
|
||||||
|
dnsseed=0
|
||||||
|
persistmempool=1
|
||||||
|
uacomment=$UACOMMENT"
|
||||||
|
|
||||||
|
if [[ "$EXTERNAL_IP" != "" ]]; then
|
||||||
|
echo $EXTERNAL_IP | tr ',' '\n' | while read ip; do
|
||||||
|
echo "externalip=$ip"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[signet]
|
||||||
|
daemon=1
|
||||||
|
listen=1
|
||||||
|
server=1
|
||||||
|
discover=1
|
||||||
|
signetchallenge=$SIGNETCHALLENGE
|
||||||
|
zmqpubrawblock=$ZMQPUBRAWBLOCK
|
||||||
|
zmqpubrawtx=$ZMQPUBRAWTX
|
||||||
|
zmqpubhashblock=$ZMQPUBHASHBLOCK
|
||||||
|
rpcbind=$RPCBIND
|
||||||
|
rpcallowip=$RPCALLOWIP
|
||||||
|
whitelist=$WHITELIST
|
||||||
|
fallbackfee=0.0002"
|
||||||
|
|
||||||
|
if [[ "$ADDNODE" != "" ]]; then
|
||||||
|
echo $ADDNODE | tr ',' '\n' | while read node; do
|
||||||
|
echo "addnode=$node"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [[ "$I2PSAM" != "" ]]; then
|
||||||
|
echo "i2psam=$I2PSAM"
|
||||||
|
fi
|
||||||
|
if [[ "$ONIONPROXY" != "" ]]; then
|
||||||
|
echo "onion=$ONIONPROXY" # unless have static IP won't resolve the control port as domain
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$TORPASSWORD" != "" ]]; then
|
||||||
|
echo "torpassword=$TORPASSWORD"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$TORCONTROL" != "" ]]; then
|
||||||
|
echo "torcontrol=$TORCONTROL"
|
||||||
|
fi
|
||||||
52
gen-signet-keys.sh
Executable file
52
gen-signet-keys.sh
Executable file
@@ -0,0 +1,52 @@
|
|||||||
|
DATADIR=${DATADIR:-"regtest-temp"}
|
||||||
|
BITCOINCLI=${BITCOINCLI:-"bitcoin-cli -regtest -datadir=$DATADIR "}
|
||||||
|
BITCOIND=${BITCOIND:-"bitcoind -datadir=$DATADIR -regtest -daemon "}
|
||||||
|
|
||||||
|
write_files() {
|
||||||
|
# echo "ADDR=" $ADDR
|
||||||
|
echo "PRIVKEY=" $PRIVKEY
|
||||||
|
# echo "PUBKEY=" $PUBKEY
|
||||||
|
echo "SIGNETCHALLENGE=" $SIGNETCHALLENGE
|
||||||
|
# echo $ADDR > ~/.bitcoin/ADDR.txt
|
||||||
|
echo $PRIVKEY >~/.bitcoin/PRIVKEY.txt
|
||||||
|
# echo $PUBKEY > ~/.bitcoin/PUBKEY.txt
|
||||||
|
echo $SIGNETCHALLENGE >~/.bitcoin/SIGNETCHALLENGE.txt
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ "$MINERENABLED" == "1" && ("$SIGNETCHALLENGE" == "" || "$PRIVKEY" == "") ]]; then
|
||||||
|
echo "Generating new signetchallange and privkey."
|
||||||
|
#clean if exists
|
||||||
|
rm -rf $DATADIR
|
||||||
|
#make it fresh
|
||||||
|
mkdir $DATADIR
|
||||||
|
#kill any daemon running stuff
|
||||||
|
pkill bitcoind
|
||||||
|
#minimal config file (hardcode bitcoin:bitcoin for rpc)
|
||||||
|
echo "
|
||||||
|
regtest=1
|
||||||
|
server=1
|
||||||
|
rpcauth=bitcoin:c8c8b9740a470454255b7a38d4f38a52\$e8530d1c739a3bb0ec6e9513290def11651afbfd2b979f38c16ec2cf76cf348a
|
||||||
|
rpcuser=bitcoin
|
||||||
|
rpcpassword=bitcoin
|
||||||
|
" >$DATADIR/bitcoin.conf
|
||||||
|
#start daemon
|
||||||
|
$BITCOIND -wallet="temp"
|
||||||
|
#wait a bit for startup
|
||||||
|
sleep 5s
|
||||||
|
#create wallet
|
||||||
|
$BITCOINCLI createwallet "temp"
|
||||||
|
#export future signet seeding key data
|
||||||
|
ADDR=$($BITCOINCLI getnewaddress)
|
||||||
|
PRIVKEY=$($BITCOINCLI dumpprivkey $ADDR)
|
||||||
|
PUBKEY=$($BITCOINCLI getaddressinfo $ADDR | jq .pubkey | tr -d '""')
|
||||||
|
#don't need regtest anymore
|
||||||
|
$BITCOINCLI stop
|
||||||
|
SIGNETCHALLENGE=$(echo '5121'$PUBKEY'51ae')
|
||||||
|
|
||||||
|
#cleanup
|
||||||
|
rm -rf $DATADIR
|
||||||
|
else
|
||||||
|
echo "Imported signetchallange and privkey being used."
|
||||||
|
fi
|
||||||
|
|
||||||
|
write_files
|
||||||
13
install.sh
Executable file
13
install.sh
Executable file
@@ -0,0 +1,13 @@
|
|||||||
|
echo "Generate or import keyset"
|
||||||
|
gen-signet-keys.sh
|
||||||
|
echo "Generate bitcoind configuration"
|
||||||
|
gen-bitcoind-conf.sh >~/.bitcoin/bitcoin.conf
|
||||||
|
echo "Setup Signet"
|
||||||
|
setup-signet.sh
|
||||||
|
|
||||||
|
if [[ "$MINE_GENESIS" == "1" ]]; then
|
||||||
|
echo "Mine Genesis Block"
|
||||||
|
mine-genesis.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
touch ~/.bitcoin/install_done
|
||||||
6
mine-genesis.sh
Executable file
6
mine-genesis.sh
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
ADDR=${ADDR:-$(bitcoin-cli getnewaddress)}
|
||||||
|
NBITS=${NBITS:-"1e0377ae"} #minimum difficulty in signet
|
||||||
|
miner --cli="bitcoin-cli" generate --address=$ADDR --grind-cmd="bitcoin-util grind" --nbits=$NBITS --set-block-time=$(date +%s)
|
||||||
|
|
||||||
|
|
||||||
19
mine.sh
Executable file
19
mine.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
NBITS=${NBITS:-"1e0377ae"} #minimum difficulty in signet
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
ADDR=${MINETO:-$(bitcoin-cli getnewaddress)}
|
||||||
|
if [[ -f "${BITCOIN_DIR}/BLOCKPRODUCTIONDELAY.txt" ]]; then
|
||||||
|
BLOCKPRODUCTIONDELAY_OVERRIDE=$(cat ~/.bitcoin/BLOCKPRODUCTIONDELAY.txt)
|
||||||
|
echo "Delay OVERRIDE before next block" $BLOCKPRODUCTIONDELAY_OVERRIDE "seconds."
|
||||||
|
sleep $BLOCKPRODUCTIONDELAY_OVERRIDE
|
||||||
|
else
|
||||||
|
BLOCKPRODUCTIONDELAY=${BLOCKPRODUCTIONDELAY:="0"}
|
||||||
|
if [[ BLOCKPRODUCTIONDELAY -gt 0 ]]; then
|
||||||
|
echo "Delay before next block" $BLOCKPRODUCTIONDELAY "seconds."
|
||||||
|
sleep $BLOCKPRODUCTIONDELAY
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "Mine To:" $ADDR
|
||||||
|
miner --cli="bitcoin-cli" generate --grind-cmd="bitcoin-util grind" --address=$ADDR --nbits=$NBITS --set-block-time=$(date +%s)
|
||||||
|
done
|
||||||
608
miner
Executable file
608
miner
Executable file
@@ -0,0 +1,608 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2020 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
# Modified for quick blocks and run in leaner docker container
|
||||||
|
import argparse
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import math
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import struct
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
|
PATH_BASE_CONTRIB_SIGNET = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
|
||||||
|
PATH_BASE_TEST_FUNCTIONAL = os.path.abspath(os.path.join(PATH_BASE_CONTRIB_SIGNET, "miner_imports"))
|
||||||
|
sys.path.insert(0, PATH_BASE_TEST_FUNCTIONAL)
|
||||||
|
|
||||||
|
from test_framework.blocktools import WITNESS_COMMITMENT_HEADER, script_BIP34_coinbase_height # noqa: E402
|
||||||
|
from test_framework.messages import CBlock, CBlockHeader, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, from_hex, deser_string, hash256, ser_compact_size, ser_string, ser_uint256, tx_from_hex, uint256_from_str # noqa: E402
|
||||||
|
from test_framework.script import CScriptOp # noqa: E402
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
format='%(asctime)s %(levelname)s %(message)s',
|
||||||
|
level=logging.INFO,
|
||||||
|
datefmt='%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
SIGNET_HEADER = b"\xec\xc7\xda\xa2"
|
||||||
|
PSBT_SIGNET_BLOCK = b"\xfc\x06signetb" # proprietary PSBT global field holding the block being signed
|
||||||
|
RE_MULTIMINER = re.compile("^(\d+)(-(\d+))?/(\d+)$")
|
||||||
|
|
||||||
|
# #### some helpers that could go into test_framework
|
||||||
|
|
||||||
|
# like from_hex, but without the hex part
|
||||||
|
def FromBinary(cls, stream):
|
||||||
|
"""deserialize a binary stream (or bytes object) into an object"""
|
||||||
|
# handle bytes object by turning it into a stream
|
||||||
|
was_bytes = isinstance(stream, bytes)
|
||||||
|
if was_bytes:
|
||||||
|
stream = BytesIO(stream)
|
||||||
|
obj = cls()
|
||||||
|
obj.deserialize(stream)
|
||||||
|
if was_bytes:
|
||||||
|
assert len(stream.read()) == 0
|
||||||
|
return obj
|
||||||
|
|
||||||
|
class PSBTMap:
|
||||||
|
"""Class for serializing and deserializing PSBT maps"""
|
||||||
|
|
||||||
|
def __init__(self, map=None):
|
||||||
|
self.map = map if map is not None else {}
|
||||||
|
|
||||||
|
def deserialize(self, f):
|
||||||
|
m = {}
|
||||||
|
while True:
|
||||||
|
k = deser_string(f)
|
||||||
|
if len(k) == 0:
|
||||||
|
break
|
||||||
|
v = deser_string(f)
|
||||||
|
if len(k) == 1:
|
||||||
|
k = k[0]
|
||||||
|
assert k not in m
|
||||||
|
m[k] = v
|
||||||
|
self.map = m
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
m = b""
|
||||||
|
for k,v in self.map.items():
|
||||||
|
if isinstance(k, int) and 0 <= k and k <= 255:
|
||||||
|
k = bytes([k])
|
||||||
|
m += ser_compact_size(len(k)) + k
|
||||||
|
m += ser_compact_size(len(v)) + v
|
||||||
|
m += b"\x00"
|
||||||
|
return m
|
||||||
|
|
||||||
|
class PSBT:
|
||||||
|
"""Class for serializing and deserializing PSBTs"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.g = PSBTMap()
|
||||||
|
self.i = []
|
||||||
|
self.o = []
|
||||||
|
self.tx = None
|
||||||
|
|
||||||
|
def deserialize(self, f):
|
||||||
|
assert f.read(5) == b"psbt\xff"
|
||||||
|
self.g = FromBinary(PSBTMap, f)
|
||||||
|
assert 0 in self.g.map
|
||||||
|
self.tx = FromBinary(CTransaction, self.g.map[0])
|
||||||
|
self.i = [FromBinary(PSBTMap, f) for _ in self.tx.vin]
|
||||||
|
self.o = [FromBinary(PSBTMap, f) for _ in self.tx.vout]
|
||||||
|
return self
|
||||||
|
|
||||||
|
def serialize(self):
|
||||||
|
assert isinstance(self.g, PSBTMap)
|
||||||
|
assert isinstance(self.i, list) and all(isinstance(x, PSBTMap) for x in self.i)
|
||||||
|
assert isinstance(self.o, list) and all(isinstance(x, PSBTMap) for x in self.o)
|
||||||
|
assert 0 in self.g.map
|
||||||
|
tx = FromBinary(CTransaction, self.g.map[0])
|
||||||
|
assert len(tx.vin) == len(self.i)
|
||||||
|
assert len(tx.vout) == len(self.o)
|
||||||
|
|
||||||
|
psbt = [x.serialize() for x in [self.g] + self.i + self.o]
|
||||||
|
return b"psbt\xff" + b"".join(psbt)
|
||||||
|
|
||||||
|
def to_base64(self):
|
||||||
|
return base64.b64encode(self.serialize()).decode("utf8")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_base64(cls, b64psbt):
|
||||||
|
return FromBinary(cls, base64.b64decode(b64psbt))
|
||||||
|
|
||||||
|
# #####
|
||||||
|
|
||||||
|
def create_coinbase(height, value, spk):
|
||||||
|
cb = CTransaction()
|
||||||
|
cb.vin = [CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff)]
|
||||||
|
cb.vout = [CTxOut(value, spk)]
|
||||||
|
return cb
|
||||||
|
|
||||||
|
def get_witness_script(witness_root, witness_nonce):
|
||||||
|
commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
|
||||||
|
return b"\x6a" + CScriptOp.encode_op_pushdata(WITNESS_COMMITMENT_HEADER + ser_uint256(commitment))
|
||||||
|
|
||||||
|
def signet_txs(block, challenge):
|
||||||
|
# assumes signet solution has not been added yet so does not need
|
||||||
|
# to be removed
|
||||||
|
|
||||||
|
txs = block.vtx[:]
|
||||||
|
txs[0] = CTransaction(txs[0])
|
||||||
|
txs[0].vout[-1].scriptPubKey += CScriptOp.encode_op_pushdata(SIGNET_HEADER)
|
||||||
|
hashes = []
|
||||||
|
for tx in txs:
|
||||||
|
tx.rehash()
|
||||||
|
hashes.append(ser_uint256(tx.sha256))
|
||||||
|
mroot = block.get_merkle_root(hashes)
|
||||||
|
|
||||||
|
sd = b""
|
||||||
|
sd += struct.pack("<i", block.nVersion)
|
||||||
|
sd += ser_uint256(block.hashPrevBlock)
|
||||||
|
sd += ser_uint256(mroot)
|
||||||
|
sd += struct.pack("<I", block.nTime)
|
||||||
|
|
||||||
|
to_spend = CTransaction()
|
||||||
|
to_spend.nVersion = 0
|
||||||
|
to_spend.nLockTime = 0
|
||||||
|
to_spend.vin = [CTxIn(COutPoint(0, 0xFFFFFFFF), b"\x00" + CScriptOp.encode_op_pushdata(sd), 0)]
|
||||||
|
to_spend.vout = [CTxOut(0, challenge)]
|
||||||
|
to_spend.rehash()
|
||||||
|
|
||||||
|
spend = CTransaction()
|
||||||
|
spend.nVersion = 0
|
||||||
|
spend.nLockTime = 0
|
||||||
|
spend.vin = [CTxIn(COutPoint(to_spend.sha256, 0), b"", 0)]
|
||||||
|
spend.vout = [CTxOut(0, b"\x6a")]
|
||||||
|
|
||||||
|
return spend, to_spend
|
||||||
|
|
||||||
|
def do_createpsbt(block, signme, spendme):
|
||||||
|
psbt = PSBT()
|
||||||
|
psbt.g = PSBTMap( {0: signme.serialize(),
|
||||||
|
PSBT_SIGNET_BLOCK: block.serialize()
|
||||||
|
} )
|
||||||
|
psbt.i = [ PSBTMap( {0: spendme.serialize(),
|
||||||
|
3: bytes([1,0,0,0])})
|
||||||
|
]
|
||||||
|
psbt.o = [ PSBTMap() ]
|
||||||
|
return psbt.to_base64()
|
||||||
|
|
||||||
|
def do_decode_psbt(b64psbt):
|
||||||
|
psbt = PSBT.from_base64(b64psbt)
|
||||||
|
|
||||||
|
assert len(psbt.tx.vin) == 1
|
||||||
|
assert len(psbt.tx.vout) == 1
|
||||||
|
assert PSBT_SIGNET_BLOCK in psbt.g.map
|
||||||
|
|
||||||
|
scriptSig = psbt.i[0].map.get(7, b"")
|
||||||
|
scriptWitness = psbt.i[0].map.get(8, b"\x00")
|
||||||
|
|
||||||
|
return FromBinary(CBlock, psbt.g.map[PSBT_SIGNET_BLOCK]), ser_string(scriptSig) + scriptWitness
|
||||||
|
|
||||||
|
def finish_block(block, signet_solution, grind_cmd):
|
||||||
|
block.vtx[0].vout[-1].scriptPubKey += CScriptOp.encode_op_pushdata(SIGNET_HEADER + signet_solution)
|
||||||
|
block.vtx[0].rehash()
|
||||||
|
block.hashMerkleRoot = block.calc_merkle_root()
|
||||||
|
if grind_cmd is None:
|
||||||
|
block.solve()
|
||||||
|
else:
|
||||||
|
headhex = CBlockHeader.serialize(block).hex()
|
||||||
|
cmd = grind_cmd.split(" ") + [headhex]
|
||||||
|
newheadhex = subprocess.run(cmd, stdout=subprocess.PIPE, input=b"", check=True).stdout.strip()
|
||||||
|
newhead = from_hex(CBlockHeader(), newheadhex.decode('utf8'))
|
||||||
|
block.nNonce = newhead.nNonce
|
||||||
|
block.rehash()
|
||||||
|
return block
|
||||||
|
|
||||||
|
def generate_psbt(tmpl, reward_spk, *, blocktime=None):
|
||||||
|
signet_spk = tmpl["signet_challenge"]
|
||||||
|
signet_spk_bin = bytes.fromhex(signet_spk)
|
||||||
|
|
||||||
|
cbtx = create_coinbase(height=tmpl["height"], value=tmpl["coinbasevalue"], spk=reward_spk)
|
||||||
|
cbtx.vin[0].nSequence = 2**32-2
|
||||||
|
cbtx.rehash()
|
||||||
|
|
||||||
|
block = CBlock()
|
||||||
|
block.nVersion = tmpl["version"]
|
||||||
|
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
|
||||||
|
block.nTime = tmpl["curtime"] if blocktime is None else blocktime
|
||||||
|
if block.nTime < tmpl["mintime"]:
|
||||||
|
block.nTime = tmpl["mintime"]
|
||||||
|
block.nBits = int(tmpl["bits"], 16)
|
||||||
|
block.nNonce = 0
|
||||||
|
block.vtx = [cbtx] + [tx_from_hex(t["data"]) for t in tmpl["transactions"]]
|
||||||
|
|
||||||
|
witnonce = 0
|
||||||
|
witroot = block.calc_witness_merkle_root()
|
||||||
|
cbwit = CTxInWitness()
|
||||||
|
cbwit.scriptWitness.stack = [ser_uint256(witnonce)]
|
||||||
|
block.vtx[0].wit.vtxinwit = [cbwit]
|
||||||
|
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witroot, witnonce)))
|
||||||
|
|
||||||
|
signme, spendme = signet_txs(block, signet_spk_bin)
|
||||||
|
|
||||||
|
return do_createpsbt(block, signme, spendme)
|
||||||
|
|
||||||
|
def get_reward_address(args, height):
|
||||||
|
if args.address is not None:
|
||||||
|
return args.address
|
||||||
|
|
||||||
|
if '*' not in args.descriptor:
|
||||||
|
addr = json.loads(args.bcli("deriveaddresses", args.descriptor))[0]
|
||||||
|
args.address = addr
|
||||||
|
return addr
|
||||||
|
|
||||||
|
remove = [k for k in args.derived_addresses.keys() if k+20 <= height]
|
||||||
|
for k in remove:
|
||||||
|
del args.derived_addresses[k]
|
||||||
|
|
||||||
|
addr = args.derived_addresses.get(height, None)
|
||||||
|
if addr is None:
|
||||||
|
addrs = json.loads(args.bcli("deriveaddresses", args.descriptor, "[%d,%d]" % (height, height+20)))
|
||||||
|
addr = addrs[0]
|
||||||
|
for k, a in enumerate(addrs):
|
||||||
|
args.derived_addresses[height+k] = a
|
||||||
|
|
||||||
|
return addr
|
||||||
|
|
||||||
|
def get_reward_addr_spk(args, height):
|
||||||
|
assert args.address is not None or args.descriptor is not None
|
||||||
|
|
||||||
|
if hasattr(args, "reward_spk"):
|
||||||
|
return args.address, args.reward_spk
|
||||||
|
|
||||||
|
reward_addr = get_reward_address(args, height)
|
||||||
|
reward_spk = bytes.fromhex(json.loads(args.bcli("getaddressinfo", reward_addr))["scriptPubKey"])
|
||||||
|
if args.address is not None:
|
||||||
|
# will always be the same, so cache
|
||||||
|
args.reward_spk = reward_spk
|
||||||
|
|
||||||
|
return reward_addr, reward_spk
|
||||||
|
|
||||||
|
def do_genpsbt(args):
|
||||||
|
tmpl = json.load(sys.stdin)
|
||||||
|
_, reward_spk = get_reward_addr_spk(args, tmpl["height"])
|
||||||
|
psbt = generate_psbt(tmpl, reward_spk)
|
||||||
|
print(psbt)
|
||||||
|
|
||||||
|
def do_solvepsbt(args):
|
||||||
|
block, signet_solution = do_decode_psbt(sys.stdin.read())
|
||||||
|
block = finish_block(block, signet_solution, args.grind_cmd)
|
||||||
|
print(block.serialize().hex())
|
||||||
|
|
||||||
|
def nbits_to_target(nbits):
|
||||||
|
shift = (nbits >> 24) & 0xff
|
||||||
|
return (nbits & 0x00ffffff) * 2**(8*(shift - 3))
|
||||||
|
|
||||||
|
def target_to_nbits(target):
|
||||||
|
tstr = "{0:x}".format(target)
|
||||||
|
if len(tstr) < 6:
|
||||||
|
tstr = ("000000"+tstr)[-6:]
|
||||||
|
if len(tstr) % 2 != 0:
|
||||||
|
tstr = "0" + tstr
|
||||||
|
if int(tstr[0],16) >= 0x8:
|
||||||
|
# avoid "negative"
|
||||||
|
tstr = "00" + tstr
|
||||||
|
fix = int(tstr[:6], 16)
|
||||||
|
sz = len(tstr)//2
|
||||||
|
if tstr[6:] != "0"*(sz*2-6):
|
||||||
|
fix += 1
|
||||||
|
|
||||||
|
return int("%02x%06x" % (sz,fix), 16)
|
||||||
|
|
||||||
|
def seconds_to_hms(s):
|
||||||
|
if s == 0:
|
||||||
|
return "0s"
|
||||||
|
neg = (s < 0)
|
||||||
|
if neg:
|
||||||
|
s = -s
|
||||||
|
out = ""
|
||||||
|
if s % 60 > 0:
|
||||||
|
out = "%ds" % (s % 60)
|
||||||
|
s //= 60
|
||||||
|
if s % 60 > 0:
|
||||||
|
out = "%dm%s" % (s % 60, out)
|
||||||
|
s //= 60
|
||||||
|
if s > 0:
|
||||||
|
out = "%dh%s" % (s, out)
|
||||||
|
if neg:
|
||||||
|
out = "-" + out
|
||||||
|
return out
|
||||||
|
|
||||||
|
def next_block_delta(last_nbits, last_hash, ultimate_target, do_poisson):
|
||||||
|
this_interval = 0.000001
|
||||||
|
return this_interval
|
||||||
|
|
||||||
|
def next_block_is_mine(last_hash, my_blocks):
|
||||||
|
det_rand = int(last_hash[-16:-8], 16)
|
||||||
|
return my_blocks[0] <= (det_rand % my_blocks[2]) < my_blocks[1]
|
||||||
|
|
||||||
|
def do_generate(args):
|
||||||
|
if args.max_blocks is not None:
|
||||||
|
if args.ongoing:
|
||||||
|
logging.error("Cannot specify both --ongoing and --max-blocks")
|
||||||
|
return 1
|
||||||
|
if args.max_blocks < 1:
|
||||||
|
logging.error("N must be a positive integer")
|
||||||
|
return 1
|
||||||
|
max_blocks = args.max_blocks
|
||||||
|
elif args.ongoing:
|
||||||
|
max_blocks = None
|
||||||
|
else:
|
||||||
|
max_blocks = 1
|
||||||
|
|
||||||
|
if args.set_block_time is not None and max_blocks != 1:
|
||||||
|
logging.error("Cannot specify --ongoing or --max-blocks > 1 when using --set-block-time")
|
||||||
|
return 1
|
||||||
|
if args.set_block_time is not None and args.set_block_time < 0:
|
||||||
|
args.set_block_time = time.time()
|
||||||
|
logging.info("Treating negative block time as current time (%d)" % (args.set_block_time))
|
||||||
|
|
||||||
|
if args.min_nbits:
|
||||||
|
if args.nbits is not None:
|
||||||
|
logging.error("Cannot specify --nbits and --min-nbits")
|
||||||
|
return 1
|
||||||
|
args.nbits = "1e0377ae"
|
||||||
|
logging.info("Using nbits=%s" % (args.nbits))
|
||||||
|
|
||||||
|
if args.set_block_time is None:
|
||||||
|
if args.nbits is None or len(args.nbits) != 8:
|
||||||
|
logging.error("Must specify --nbits (use calibrate command to determine value)")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if args.multiminer is None:
|
||||||
|
my_blocks = (0,1,1)
|
||||||
|
else:
|
||||||
|
if not args.ongoing:
|
||||||
|
logging.error("Cannot specify --multiminer without --ongoing")
|
||||||
|
return 1
|
||||||
|
m = RE_MULTIMINER.match(args.multiminer)
|
||||||
|
if m is None:
|
||||||
|
logging.error("--multiminer argument must be k/m or j-k/m")
|
||||||
|
return 1
|
||||||
|
start,_,stop,total = m.groups()
|
||||||
|
if stop is None:
|
||||||
|
stop = start
|
||||||
|
start, stop, total = map(int, (start, stop, total))
|
||||||
|
if stop < start or start <= 0 or total < stop or total == 0:
|
||||||
|
logging.error("Inconsistent values for --multiminer")
|
||||||
|
return 1
|
||||||
|
my_blocks = (start-1, stop, total)
|
||||||
|
|
||||||
|
ultimate_target = nbits_to_target(int(args.nbits,16))
|
||||||
|
|
||||||
|
mined_blocks = 0
|
||||||
|
bestheader = {"hash": None}
|
||||||
|
lastheader = None
|
||||||
|
while max_blocks is None or mined_blocks < max_blocks:
|
||||||
|
|
||||||
|
# current status?
|
||||||
|
bci = json.loads(args.bcli("getblockchaininfo"))
|
||||||
|
|
||||||
|
if bestheader["hash"] != bci["bestblockhash"]:
|
||||||
|
bestheader = json.loads(args.bcli("getblockheader", bci["bestblockhash"]))
|
||||||
|
|
||||||
|
if lastheader is None:
|
||||||
|
lastheader = bestheader["hash"]
|
||||||
|
elif bestheader["hash"] != lastheader:
|
||||||
|
next_delta = next_block_delta(int(bestheader["bits"], 16), bestheader["hash"], ultimate_target, args.poisson)
|
||||||
|
next_delta += bestheader["time"] - time.time()
|
||||||
|
next_is_mine = next_block_is_mine(bestheader["hash"], my_blocks)
|
||||||
|
logging.info("Received new block at height %d; next in %s (%s)", bestheader["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup"))
|
||||||
|
lastheader = bestheader["hash"]
|
||||||
|
|
||||||
|
# when is the next block due to be mined?
|
||||||
|
now = time.time()
|
||||||
|
if args.set_block_time is not None:
|
||||||
|
logging.debug("Setting start time to %d", args.set_block_time)
|
||||||
|
mine_time = args.set_block_time
|
||||||
|
action_time = now
|
||||||
|
is_mine = True
|
||||||
|
elif bestheader["height"] == 0:
|
||||||
|
time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson)
|
||||||
|
time_delta *= 100 # 100 blocks
|
||||||
|
logging.info("Backdating time for first block to %d minutes ago" % (time_delta/60))
|
||||||
|
mine_time = now - time_delta
|
||||||
|
action_time = now
|
||||||
|
is_mine = True
|
||||||
|
else:
|
||||||
|
time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson)
|
||||||
|
mine_time = bestheader["time"] + time_delta
|
||||||
|
|
||||||
|
is_mine = next_block_is_mine(bci["bestblockhash"], my_blocks)
|
||||||
|
|
||||||
|
action_time = mine_time
|
||||||
|
if not is_mine:
|
||||||
|
action_time += args.backup_delay
|
||||||
|
|
||||||
|
if args.standby_delay > 0:
|
||||||
|
action_time += args.standby_delay
|
||||||
|
elif mined_blocks == 0:
|
||||||
|
# for non-standby, always mine immediately on startup,
|
||||||
|
# even if the next block shouldn't be ours
|
||||||
|
action_time = now
|
||||||
|
|
||||||
|
# don't want fractional times so round down
|
||||||
|
mine_time = int(mine_time)
|
||||||
|
action_time = int(action_time)
|
||||||
|
|
||||||
|
# can't mine a block 2h in the future; 1h55m for some safety
|
||||||
|
action_time = max(action_time, mine_time - 6900)
|
||||||
|
|
||||||
|
# ready to go? otherwise sleep and check for new block
|
||||||
|
if now < action_time:
|
||||||
|
sleep_for = min(action_time - now, 60)
|
||||||
|
if mine_time < now:
|
||||||
|
# someone else might have mined the block,
|
||||||
|
# so check frequently, so we don't end up late
|
||||||
|
# mining the next block if it's ours
|
||||||
|
sleep_for = min(20, sleep_for)
|
||||||
|
minestr = "mine" if is_mine else "backup"
|
||||||
|
logging.debug("Sleeping for %s, next block due in %s (%s)" % (seconds_to_hms(sleep_for), seconds_to_hms(mine_time - now), minestr))
|
||||||
|
time.sleep(sleep_for)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# gbt
|
||||||
|
tmpl = json.loads(args.bcli("getblocktemplate", '{"rules":["signet","segwit"]}'))
|
||||||
|
if tmpl["previousblockhash"] != bci["bestblockhash"]:
|
||||||
|
logging.warning("GBT based off unexpected block (%s not %s), retrying", tmpl["previousblockhash"], bci["bestblockhash"])
|
||||||
|
time.sleep(1)
|
||||||
|
continue
|
||||||
|
|
||||||
|
logging.debug("GBT template: %s", tmpl)
|
||||||
|
|
||||||
|
if tmpl["mintime"] > mine_time:
|
||||||
|
logging.info("Updating block time from %d to %d", mine_time, tmpl["mintime"])
|
||||||
|
mine_time = tmpl["mintime"]
|
||||||
|
if mine_time > now:
|
||||||
|
logging.error("GBT mintime is in the future: %d is %d seconds later than %d", mine_time, (mine_time-now), now)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# address for reward
|
||||||
|
reward_addr, reward_spk = get_reward_addr_spk(args, tmpl["height"])
|
||||||
|
|
||||||
|
# mine block
|
||||||
|
logging.debug("Mining block delta=%s start=%s mine=%s", seconds_to_hms(mine_time-bestheader["time"]), mine_time, is_mine)
|
||||||
|
mined_blocks += 1
|
||||||
|
psbt = generate_psbt(tmpl, reward_spk, blocktime=mine_time)
|
||||||
|
input_stream = os.linesep.join([psbt, "true", "ALL"]).encode('utf8')
|
||||||
|
psbt_signed = json.loads(args.bcli("-stdin", "walletprocesspsbt", input=input_stream))
|
||||||
|
if not psbt_signed.get("complete",False):
|
||||||
|
logging.debug("Generated PSBT: %s" % (psbt,))
|
||||||
|
sys.stderr.write("PSBT signing failed\n")
|
||||||
|
return 1
|
||||||
|
block, signet_solution = do_decode_psbt(psbt_signed["psbt"])
|
||||||
|
block = finish_block(block, signet_solution, args.grind_cmd)
|
||||||
|
|
||||||
|
# submit block
|
||||||
|
r = args.bcli("-stdin", "submitblock", input=block.serialize().hex().encode('utf8'))
|
||||||
|
|
||||||
|
# report
|
||||||
|
bstr = "block" if is_mine else "backup block"
|
||||||
|
|
||||||
|
next_delta = next_block_delta(block.nBits, block.hash, ultimate_target, args.poisson)
|
||||||
|
next_delta += block.nTime - time.time()
|
||||||
|
next_is_mine = next_block_is_mine(block.hash, my_blocks)
|
||||||
|
|
||||||
|
logging.debug("Block hash %s payout to %s", block.hash, reward_addr)
|
||||||
|
logging.info("Mined %s at height %d; next in %s (%s)", bstr, tmpl["height"], seconds_to_hms(next_delta), ("mine" if next_is_mine else "backup"))
|
||||||
|
if r != "":
|
||||||
|
logging.warning("submitblock returned %s for height %d hash %s", r, tmpl["height"], block.hash)
|
||||||
|
lastheader = block.hash
|
||||||
|
|
||||||
|
def do_calibrate(args):
|
||||||
|
if args.nbits is not None and args.seconds is not None:
|
||||||
|
sys.stderr.write("Can only specify one of --nbits or --seconds\n")
|
||||||
|
return 1
|
||||||
|
if args.nbits is not None and len(args.nbits) != 8:
|
||||||
|
sys.stderr.write("Must specify 8 hex digits for --nbits\n")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
TRIALS = 600 # gets variance down pretty low
|
||||||
|
TRIAL_BITS = 0x1e3ea75f # takes about 5m to do 600 trials
|
||||||
|
|
||||||
|
header = CBlockHeader()
|
||||||
|
header.nBits = TRIAL_BITS
|
||||||
|
targ = nbits_to_target(header.nBits)
|
||||||
|
|
||||||
|
start = time.time()
|
||||||
|
count = 0
|
||||||
|
for i in range(TRIALS):
|
||||||
|
header.nTime = i
|
||||||
|
header.nNonce = 0
|
||||||
|
headhex = header.serialize().hex()
|
||||||
|
cmd = args.grind_cmd.split(" ") + [headhex]
|
||||||
|
newheadhex = subprocess.run(cmd, stdout=subprocess.PIPE, input=b"", check=True).stdout.strip()
|
||||||
|
|
||||||
|
avg = (time.time() - start) * 1.0 / TRIALS
|
||||||
|
|
||||||
|
if args.nbits is not None:
|
||||||
|
want_targ = nbits_to_target(int(args.nbits,16))
|
||||||
|
want_time = avg*targ/want_targ
|
||||||
|
else:
|
||||||
|
want_time = args.seconds if args.seconds is not None else 25
|
||||||
|
want_targ = int(targ*(avg/want_time))
|
||||||
|
|
||||||
|
print("nbits=%08x for %ds average mining time" % (target_to_nbits(want_targ), want_time))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def bitcoin_cli(basecmd, args, **kwargs):
|
||||||
|
cmd = basecmd + ["-signet"] + args
|
||||||
|
logging.debug("Calling bitcoin-cli: %r", cmd)
|
||||||
|
out = subprocess.run(cmd, stdout=subprocess.PIPE, **kwargs, check=True).stdout
|
||||||
|
if isinstance(out, bytes):
|
||||||
|
out = out.decode('utf8')
|
||||||
|
return out.strip()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--cli", default="bitcoin-cli", type=str, help="bitcoin-cli command")
|
||||||
|
parser.add_argument("--debug", action="store_true", help="Print debugging info")
|
||||||
|
parser.add_argument("--quiet", action="store_true", help="Only print warnings/errors")
|
||||||
|
|
||||||
|
cmds = parser.add_subparsers(help="sub-commands")
|
||||||
|
genpsbt = cmds.add_parser("genpsbt", help="Generate a block PSBT for signing")
|
||||||
|
genpsbt.set_defaults(fn=do_genpsbt)
|
||||||
|
|
||||||
|
solvepsbt = cmds.add_parser("solvepsbt", help="Solve a signed block PSBT")
|
||||||
|
solvepsbt.set_defaults(fn=do_solvepsbt)
|
||||||
|
|
||||||
|
generate = cmds.add_parser("generate", help="Mine blocks")
|
||||||
|
generate.set_defaults(fn=do_generate)
|
||||||
|
generate.add_argument("--ongoing", action="store_true", help="Keep mining blocks")
|
||||||
|
generate.add_argument("--max-blocks", default=None, type=int, help="Max blocks to mine (default=1)")
|
||||||
|
generate.add_argument("--set-block-time", default=None, type=int, help="Set block time (unix timestamp)")
|
||||||
|
generate.add_argument("--nbits", default=None, type=str, help="Target nBits (specify difficulty)")
|
||||||
|
generate.add_argument("--min-nbits", action="store_true", help="Target minimum nBits (use min difficulty)")
|
||||||
|
generate.add_argument("--poisson", action="store_true", help="Simulate randomised block times")
|
||||||
|
generate.add_argument("--multiminer", default=None, type=str, help="Specify which set of blocks to mine (eg: 1-40/100 for the first 40%%, 2/3 for the second 3rd)")
|
||||||
|
generate.add_argument("--backup-delay", default=300, type=int, help="Seconds to delay before mining blocks reserved for other miners (default=300)")
|
||||||
|
generate.add_argument("--standby-delay", default=0, type=int, help="Seconds to delay before mining blocks (default=0)")
|
||||||
|
|
||||||
|
calibrate = cmds.add_parser("calibrate", help="Calibrate difficulty")
|
||||||
|
calibrate.set_defaults(fn=do_calibrate)
|
||||||
|
calibrate.add_argument("--nbits", type=str, default=None)
|
||||||
|
calibrate.add_argument("--seconds", type=int, default=None)
|
||||||
|
|
||||||
|
for sp in [genpsbt, generate]:
|
||||||
|
sp.add_argument("--address", default=None, type=str, help="Address for block reward payment")
|
||||||
|
sp.add_argument("--descriptor", default=None, type=str, help="Descriptor for block reward payment")
|
||||||
|
|
||||||
|
for sp in [solvepsbt, generate, calibrate]:
|
||||||
|
sp.add_argument("--grind-cmd", default=None, type=str, required=(sp==calibrate), help="Command to grind a block header for proof-of-work")
|
||||||
|
|
||||||
|
args = parser.parse_args(sys.argv[1:])
|
||||||
|
|
||||||
|
args.bcli = lambda *a, input=b"", **kwargs: bitcoin_cli(args.cli.split(" "), list(a), input=input, **kwargs)
|
||||||
|
|
||||||
|
if hasattr(args, "address") and hasattr(args, "descriptor"):
|
||||||
|
if args.address is None and args.descriptor is None:
|
||||||
|
sys.stderr.write("Must specify --address or --descriptor\n")
|
||||||
|
return 1
|
||||||
|
elif args.address is not None and args.descriptor is not None:
|
||||||
|
sys.stderr.write("Only specify one of --address or --descriptor\n")
|
||||||
|
return 1
|
||||||
|
args.derived_addresses = {}
|
||||||
|
|
||||||
|
if args.debug:
|
||||||
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
|
elif args.quiet:
|
||||||
|
logging.getLogger().setLevel(logging.WARNING)
|
||||||
|
else:
|
||||||
|
logging.getLogger().setLevel(logging.INFO)
|
||||||
|
|
||||||
|
if hasattr(args, "fn"):
|
||||||
|
return args.fn(args)
|
||||||
|
else:
|
||||||
|
logging.error("Must specify command")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|
||||||
|
|
||||||
0
miner_imports/test_framework/__init__.py
Normal file
0
miner_imports/test_framework/__init__.py
Normal file
175
miner_imports/test_framework/address.py
Normal file
175
miner_imports/test_framework/address.py
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2016-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Encode and decode Bitcoin addresses.
|
||||||
|
|
||||||
|
- base58 P2PKH and P2SH addresses.
|
||||||
|
- bech32 segwit v0 P2WPKH and P2WSH addresses.
|
||||||
|
- bech32m segwit v1 P2TR addresses."""
|
||||||
|
|
||||||
|
import enum
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from .script import (
|
||||||
|
CScript,
|
||||||
|
OP_0,
|
||||||
|
OP_TRUE,
|
||||||
|
hash160,
|
||||||
|
hash256,
|
||||||
|
sha256,
|
||||||
|
taproot_construct,
|
||||||
|
)
|
||||||
|
from .segwit_addr import encode_segwit_address
|
||||||
|
from .util import assert_equal
|
||||||
|
|
||||||
|
ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj'
|
||||||
|
ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj)#juyq9d97'
|
||||||
|
# Coins sent to this address can be spent with a witness stack of just OP_TRUE
|
||||||
|
ADDRESS_BCRT1_P2WSH_OP_TRUE = 'bcrt1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqseac85'
|
||||||
|
|
||||||
|
|
||||||
|
class AddressType(enum.Enum):
|
||||||
|
bech32 = 'bech32'
|
||||||
|
p2sh_segwit = 'p2sh-segwit'
|
||||||
|
legacy = 'legacy' # P2PKH
|
||||||
|
|
||||||
|
|
||||||
|
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||||
|
|
||||||
|
|
||||||
|
def create_deterministic_address_bcrt1_p2tr_op_true():
|
||||||
|
"""
|
||||||
|
Generates a deterministic bech32m address (segwit v1 output) that
|
||||||
|
can be spent with a witness stack of OP_TRUE and the control block
|
||||||
|
with internal public key (script-path spending).
|
||||||
|
|
||||||
|
Returns a tuple with the generated address and the internal key.
|
||||||
|
"""
|
||||||
|
internal_key = (1).to_bytes(32, 'big')
|
||||||
|
scriptPubKey = taproot_construct(internal_key, [(None, CScript([OP_TRUE]))]).scriptPubKey
|
||||||
|
address = encode_segwit_address("bcrt", 1, scriptPubKey[2:])
|
||||||
|
assert_equal(address, 'bcrt1p9yfmy5h72durp7zrhlw9lf7jpwjgvwdg0jr0lqmmjtgg83266lqsekaqka')
|
||||||
|
return (address, internal_key)
|
||||||
|
|
||||||
|
|
||||||
|
def byte_to_base58(b, version):
|
||||||
|
result = ''
|
||||||
|
b = bytes([version]) + b # prepend version
|
||||||
|
b += hash256(b)[:4] # append checksum
|
||||||
|
value = int.from_bytes(b, 'big')
|
||||||
|
while value > 0:
|
||||||
|
result = chars[value % 58] + result
|
||||||
|
value //= 58
|
||||||
|
while b[0] == 0:
|
||||||
|
result = chars[0] + result
|
||||||
|
b = b[1:]
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def base58_to_byte(s):
|
||||||
|
"""Converts a base58-encoded string to its data and version.
|
||||||
|
|
||||||
|
Throws if the base58 checksum is invalid."""
|
||||||
|
if not s:
|
||||||
|
return b''
|
||||||
|
n = 0
|
||||||
|
for c in s:
|
||||||
|
n *= 58
|
||||||
|
assert c in chars
|
||||||
|
digit = chars.index(c)
|
||||||
|
n += digit
|
||||||
|
h = '%x' % n
|
||||||
|
if len(h) % 2:
|
||||||
|
h = '0' + h
|
||||||
|
res = n.to_bytes((n.bit_length() + 7) // 8, 'big')
|
||||||
|
pad = 0
|
||||||
|
for c in s:
|
||||||
|
if c == chars[0]:
|
||||||
|
pad += 1
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
res = b'\x00' * pad + res
|
||||||
|
|
||||||
|
# Assert if the checksum is invalid
|
||||||
|
assert_equal(hash256(res[:-4])[:4], res[-4:])
|
||||||
|
|
||||||
|
return res[1:-4], int(res[0])
|
||||||
|
|
||||||
|
|
||||||
|
def keyhash_to_p2pkh(hash, main=False):
|
||||||
|
assert len(hash) == 20
|
||||||
|
version = 0 if main else 111
|
||||||
|
return byte_to_base58(hash, version)
|
||||||
|
|
||||||
|
def scripthash_to_p2sh(hash, main=False):
|
||||||
|
assert len(hash) == 20
|
||||||
|
version = 5 if main else 196
|
||||||
|
return byte_to_base58(hash, version)
|
||||||
|
|
||||||
|
def key_to_p2pkh(key, main=False):
|
||||||
|
key = check_key(key)
|
||||||
|
return keyhash_to_p2pkh(hash160(key), main)
|
||||||
|
|
||||||
|
def script_to_p2sh(script, main=False):
|
||||||
|
script = check_script(script)
|
||||||
|
return scripthash_to_p2sh(hash160(script), main)
|
||||||
|
|
||||||
|
def key_to_p2sh_p2wpkh(key, main=False):
|
||||||
|
key = check_key(key)
|
||||||
|
p2shscript = CScript([OP_0, hash160(key)])
|
||||||
|
return script_to_p2sh(p2shscript, main)
|
||||||
|
|
||||||
|
def program_to_witness(version, program, main=False):
|
||||||
|
if (type(program) is str):
|
||||||
|
program = bytes.fromhex(program)
|
||||||
|
assert 0 <= version <= 16
|
||||||
|
assert 2 <= len(program) <= 40
|
||||||
|
assert version > 0 or len(program) in [20, 32]
|
||||||
|
return encode_segwit_address("bc" if main else "bcrt", version, program)
|
||||||
|
|
||||||
|
def script_to_p2wsh(script, main=False):
|
||||||
|
script = check_script(script)
|
||||||
|
return program_to_witness(0, sha256(script), main)
|
||||||
|
|
||||||
|
def key_to_p2wpkh(key, main=False):
|
||||||
|
key = check_key(key)
|
||||||
|
return program_to_witness(0, hash160(key), main)
|
||||||
|
|
||||||
|
def script_to_p2sh_p2wsh(script, main=False):
|
||||||
|
script = check_script(script)
|
||||||
|
p2shscript = CScript([OP_0, sha256(script)])
|
||||||
|
return script_to_p2sh(p2shscript, main)
|
||||||
|
|
||||||
|
def check_key(key):
|
||||||
|
if (type(key) is str):
|
||||||
|
key = bytes.fromhex(key) # Assuming this is hex string
|
||||||
|
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
|
||||||
|
return key
|
||||||
|
assert False
|
||||||
|
|
||||||
|
def check_script(script):
|
||||||
|
if (type(script) is str):
|
||||||
|
script = bytes.fromhex(script) # Assuming this is hex string
|
||||||
|
if (type(script) is bytes or type(script) is CScript):
|
||||||
|
return script
|
||||||
|
assert False
|
||||||
|
|
||||||
|
|
||||||
|
class TestFrameworkScript(unittest.TestCase):
|
||||||
|
def test_base58encodedecode(self):
|
||||||
|
def check_base58(data, version):
|
||||||
|
self.assertEqual(base58_to_byte(byte_to_base58(data, version)), (data, version))
|
||||||
|
|
||||||
|
check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 111)
|
||||||
|
check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 111)
|
||||||
|
check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 111)
|
||||||
|
check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
|
||||||
|
check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
|
||||||
|
check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
|
||||||
|
check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 0)
|
||||||
|
check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 0)
|
||||||
|
check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 0)
|
||||||
|
check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
|
||||||
|
check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
|
||||||
|
check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
|
||||||
204
miner_imports/test_framework/authproxy.py
Normal file
204
miner_imports/test_framework/authproxy.py
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
# Copyright (c) 2011 Jeff Garzik
|
||||||
|
#
|
||||||
|
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
|
||||||
|
#
|
||||||
|
# Copyright (c) 2007 Jan-Klaas Kollhof
|
||||||
|
#
|
||||||
|
# This file is part of jsonrpc.
|
||||||
|
#
|
||||||
|
# jsonrpc is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
# the Free Software Foundation; either version 2.1 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This software is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with this software; if not, write to the Free Software
|
||||||
|
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
"""HTTP proxy for opening RPC connection to bitcoind.
|
||||||
|
|
||||||
|
AuthServiceProxy has the following improvements over python-jsonrpc's
|
||||||
|
ServiceProxy class:
|
||||||
|
|
||||||
|
- HTTP connections persist for the life of the AuthServiceProxy object
|
||||||
|
(if server supports HTTP/1.1)
|
||||||
|
- sends protocol 'version', per JSON-RPC 1.1
|
||||||
|
- sends proper, incrementing 'id'
|
||||||
|
- sends Basic HTTP authentication headers
|
||||||
|
- parses all JSON numbers that look like floats as Decimal
|
||||||
|
- uses standard Python json lib
|
||||||
|
"""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import decimal
|
||||||
|
from http import HTTPStatus
|
||||||
|
import http.client
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
|
HTTP_TIMEOUT = 30
|
||||||
|
USER_AGENT = "AuthServiceProxy/0.1"
|
||||||
|
|
||||||
|
log = logging.getLogger("BitcoinRPC")
|
||||||
|
|
||||||
|
class JSONRPCException(Exception):
|
||||||
|
def __init__(self, rpc_error, http_status=None):
|
||||||
|
try:
|
||||||
|
errmsg = '%(message)s (%(code)i)' % rpc_error
|
||||||
|
except (KeyError, TypeError):
|
||||||
|
errmsg = ''
|
||||||
|
super().__init__(errmsg)
|
||||||
|
self.error = rpc_error
|
||||||
|
self.http_status = http_status
|
||||||
|
|
||||||
|
|
||||||
|
def EncodeDecimal(o):
|
||||||
|
if isinstance(o, decimal.Decimal):
|
||||||
|
return str(o)
|
||||||
|
raise TypeError(repr(o) + " is not JSON serializable")
|
||||||
|
|
||||||
|
class AuthServiceProxy():
|
||||||
|
__id_count = 0
|
||||||
|
|
||||||
|
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
|
||||||
|
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
|
||||||
|
self.__service_url = service_url
|
||||||
|
self._service_name = service_name
|
||||||
|
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
|
||||||
|
self.__url = urllib.parse.urlparse(service_url)
|
||||||
|
user = None if self.__url.username is None else self.__url.username.encode('utf8')
|
||||||
|
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
|
||||||
|
authpair = user + b':' + passwd
|
||||||
|
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
|
||||||
|
self.timeout = timeout
|
||||||
|
self._set_conn(connection)
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
if name.startswith('__') and name.endswith('__'):
|
||||||
|
# Python internal stuff
|
||||||
|
raise AttributeError
|
||||||
|
if self._service_name is not None:
|
||||||
|
name = "%s.%s" % (self._service_name, name)
|
||||||
|
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
|
||||||
|
|
||||||
|
def _request(self, method, path, postdata):
|
||||||
|
'''
|
||||||
|
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
|
||||||
|
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
|
||||||
|
'''
|
||||||
|
headers = {'Host': self.__url.hostname,
|
||||||
|
'User-Agent': USER_AGENT,
|
||||||
|
'Authorization': self.__auth_header,
|
||||||
|
'Content-type': 'application/json'}
|
||||||
|
if os.name == 'nt':
|
||||||
|
# Windows somehow does not like to re-use connections
|
||||||
|
# TODO: Find out why the connection would disconnect occasionally and make it reusable on Windows
|
||||||
|
# Avoid "ConnectionAbortedError: [WinError 10053] An established connection was aborted by the software in your host machine"
|
||||||
|
self._set_conn()
|
||||||
|
try:
|
||||||
|
self.__conn.request(method, path, postdata, headers)
|
||||||
|
return self._get_response()
|
||||||
|
except (BrokenPipeError, ConnectionResetError):
|
||||||
|
# Python 3.5+ raises BrokenPipeError when the connection was reset
|
||||||
|
# ConnectionResetError happens on FreeBSD
|
||||||
|
self.__conn.close()
|
||||||
|
self.__conn.request(method, path, postdata, headers)
|
||||||
|
return self._get_response()
|
||||||
|
except OSError as e:
|
||||||
|
# Workaround for a bug on macOS. See https://bugs.python.org/issue33450
|
||||||
|
retry = '[Errno 41] Protocol wrong type for socket' in str(e)
|
||||||
|
if retry:
|
||||||
|
self.__conn.close()
|
||||||
|
self.__conn.request(method, path, postdata, headers)
|
||||||
|
return self._get_response()
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def get_request(self, *args, **argsn):
|
||||||
|
AuthServiceProxy.__id_count += 1
|
||||||
|
|
||||||
|
log.debug("-{}-> {} {}".format(
|
||||||
|
AuthServiceProxy.__id_count,
|
||||||
|
self._service_name,
|
||||||
|
json.dumps(args or argsn, default=EncodeDecimal, ensure_ascii=self.ensure_ascii),
|
||||||
|
))
|
||||||
|
if args and argsn:
|
||||||
|
raise ValueError('Cannot handle both named and positional arguments')
|
||||||
|
return {'version': '1.1',
|
||||||
|
'method': self._service_name,
|
||||||
|
'params': args or argsn,
|
||||||
|
'id': AuthServiceProxy.__id_count}
|
||||||
|
|
||||||
|
def __call__(self, *args, **argsn):
|
||||||
|
postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
|
||||||
|
response, status = self._request('POST', self.__url.path, postdata.encode('utf-8'))
|
||||||
|
if response['error'] is not None:
|
||||||
|
raise JSONRPCException(response['error'], status)
|
||||||
|
elif 'result' not in response:
|
||||||
|
raise JSONRPCException({
|
||||||
|
'code': -343, 'message': 'missing JSON-RPC result'}, status)
|
||||||
|
elif status != HTTPStatus.OK:
|
||||||
|
raise JSONRPCException({
|
||||||
|
'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
|
||||||
|
else:
|
||||||
|
return response['result']
|
||||||
|
|
||||||
|
def batch(self, rpc_call_list):
|
||||||
|
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
|
||||||
|
log.debug("--> " + postdata)
|
||||||
|
response, status = self._request('POST', self.__url.path, postdata.encode('utf-8'))
|
||||||
|
if status != HTTPStatus.OK:
|
||||||
|
raise JSONRPCException({
|
||||||
|
'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status)
|
||||||
|
return response
|
||||||
|
|
||||||
|
def _get_response(self):
|
||||||
|
req_start_time = time.time()
|
||||||
|
try:
|
||||||
|
http_response = self.__conn.getresponse()
|
||||||
|
except socket.timeout:
|
||||||
|
raise JSONRPCException({
|
||||||
|
'code': -344,
|
||||||
|
'message': '%r RPC took longer than %f seconds. Consider '
|
||||||
|
'using larger timeout for calls that take '
|
||||||
|
'longer to return.' % (self._service_name,
|
||||||
|
self.__conn.timeout)})
|
||||||
|
if http_response is None:
|
||||||
|
raise JSONRPCException({
|
||||||
|
'code': -342, 'message': 'missing HTTP response from server'})
|
||||||
|
|
||||||
|
content_type = http_response.getheader('Content-Type')
|
||||||
|
if content_type != 'application/json':
|
||||||
|
raise JSONRPCException(
|
||||||
|
{'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)},
|
||||||
|
http_response.status)
|
||||||
|
|
||||||
|
responsedata = http_response.read().decode('utf8')
|
||||||
|
response = json.loads(responsedata, parse_float=decimal.Decimal)
|
||||||
|
elapsed = time.time() - req_start_time
|
||||||
|
if "error" in response and response["error"] is None:
|
||||||
|
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
|
||||||
|
else:
|
||||||
|
log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
|
||||||
|
return response, http_response.status
|
||||||
|
|
||||||
|
def __truediv__(self, relative_uri):
|
||||||
|
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
|
||||||
|
|
||||||
|
def _set_conn(self, connection=None):
|
||||||
|
port = 80 if self.__url.port is None else self.__url.port
|
||||||
|
if connection:
|
||||||
|
self.__conn = connection
|
||||||
|
self.timeout = connection.timeout
|
||||||
|
elif self.__url.scheme == 'https':
|
||||||
|
self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=self.timeout)
|
||||||
|
else:
|
||||||
|
self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=self.timeout)
|
||||||
151
miner_imports/test_framework/bdb.py
Normal file
151
miner_imports/test_framework/bdb.py
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2020-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""
|
||||||
|
Utilities for working directly with the wallet's BDB database file
|
||||||
|
|
||||||
|
This is specific to the configuration of BDB used in this project:
|
||||||
|
- pagesize: 4096 bytes
|
||||||
|
- Outer database contains single subdatabase named 'main'
|
||||||
|
- btree
|
||||||
|
- btree leaf pages
|
||||||
|
|
||||||
|
Each key-value pair is two entries in a btree leaf. The first is the key, the one that follows
|
||||||
|
is the value. And so on. Note that the entry data is itself not in the correct order. Instead
|
||||||
|
entry offsets are stored in the correct order and those offsets are needed to then retrieve
|
||||||
|
the data itself.
|
||||||
|
|
||||||
|
Page format can be found in BDB source code dbinc/db_page.h
|
||||||
|
This only implements the deserialization of btree metadata pages and normal btree pages. Overflow
|
||||||
|
pages are not implemented but may be needed in the future if dealing with wallets with large
|
||||||
|
transactions.
|
||||||
|
|
||||||
|
`db_dump -da wallet.dat` is useful to see the data in a wallet.dat BDB file
|
||||||
|
"""
|
||||||
|
|
||||||
|
import struct
|
||||||
|
|
||||||
|
# Important constants
|
||||||
|
PAGESIZE = 4096
|
||||||
|
OUTER_META_PAGE = 0
|
||||||
|
INNER_META_PAGE = 2
|
||||||
|
|
||||||
|
# Page type values
|
||||||
|
BTREE_INTERNAL = 3
|
||||||
|
BTREE_LEAF = 5
|
||||||
|
BTREE_META = 9
|
||||||
|
|
||||||
|
# Some magic numbers for sanity checking
|
||||||
|
BTREE_MAGIC = 0x053162
|
||||||
|
DB_VERSION = 9
|
||||||
|
|
||||||
|
# Deserializes a leaf page into a dict.
|
||||||
|
# Btree internal pages have the same header, for those, return None.
|
||||||
|
# For the btree leaf pages, deserialize them and put all the data into a dict
|
||||||
|
def dump_leaf_page(data):
|
||||||
|
page_info = {}
|
||||||
|
page_header = data[0:26]
|
||||||
|
_, pgno, prev_pgno, next_pgno, entries, hf_offset, level, pg_type = struct.unpack('QIIIHHBB', page_header)
|
||||||
|
page_info['pgno'] = pgno
|
||||||
|
page_info['prev_pgno'] = prev_pgno
|
||||||
|
page_info['next_pgno'] = next_pgno
|
||||||
|
page_info['hf_offset'] = hf_offset
|
||||||
|
page_info['level'] = level
|
||||||
|
page_info['pg_type'] = pg_type
|
||||||
|
page_info['entry_offsets'] = struct.unpack('{}H'.format(entries), data[26:26 + entries * 2])
|
||||||
|
page_info['entries'] = []
|
||||||
|
|
||||||
|
if pg_type == BTREE_INTERNAL:
|
||||||
|
# Skip internal pages. These are the internal nodes of the btree and don't contain anything relevant to us
|
||||||
|
return None
|
||||||
|
|
||||||
|
assert pg_type == BTREE_LEAF, 'A non-btree leaf page has been encountered while dumping leaves'
|
||||||
|
|
||||||
|
for i in range(0, entries):
|
||||||
|
offset = page_info['entry_offsets'][i]
|
||||||
|
entry = {'offset': offset}
|
||||||
|
page_data_header = data[offset:offset + 3]
|
||||||
|
e_len, pg_type = struct.unpack('HB', page_data_header)
|
||||||
|
entry['len'] = e_len
|
||||||
|
entry['pg_type'] = pg_type
|
||||||
|
entry['data'] = data[offset + 3:offset + 3 + e_len]
|
||||||
|
page_info['entries'].append(entry)
|
||||||
|
|
||||||
|
return page_info
|
||||||
|
|
||||||
|
# Deserializes a btree metadata page into a dict.
|
||||||
|
# Does a simple sanity check on the magic value, type, and version
|
||||||
|
def dump_meta_page(page):
|
||||||
|
# metadata page
|
||||||
|
# general metadata
|
||||||
|
metadata = {}
|
||||||
|
meta_page = page[0:72]
|
||||||
|
_, pgno, magic, version, pagesize, encrypt_alg, pg_type, metaflags, _, free, last_pgno, nparts, key_count, record_count, flags, uid = struct.unpack('QIIIIBBBBIIIIII20s', meta_page)
|
||||||
|
metadata['pgno'] = pgno
|
||||||
|
metadata['magic'] = magic
|
||||||
|
metadata['version'] = version
|
||||||
|
metadata['pagesize'] = pagesize
|
||||||
|
metadata['encrypt_alg'] = encrypt_alg
|
||||||
|
metadata['pg_type'] = pg_type
|
||||||
|
metadata['metaflags'] = metaflags
|
||||||
|
metadata['free'] = free
|
||||||
|
metadata['last_pgno'] = last_pgno
|
||||||
|
metadata['nparts'] = nparts
|
||||||
|
metadata['key_count'] = key_count
|
||||||
|
metadata['record_count'] = record_count
|
||||||
|
metadata['flags'] = flags
|
||||||
|
metadata['uid'] = uid.hex().encode()
|
||||||
|
|
||||||
|
assert magic == BTREE_MAGIC, 'bdb magic does not match bdb btree magic'
|
||||||
|
assert pg_type == BTREE_META, 'Metadata page is not a btree metadata page'
|
||||||
|
assert version == DB_VERSION, 'Database too new'
|
||||||
|
|
||||||
|
# btree metadata
|
||||||
|
btree_meta_page = page[72:512]
|
||||||
|
_, minkey, re_len, re_pad, root, _, crypto_magic, _, iv, chksum = struct.unpack('IIIII368sI12s16s20s', btree_meta_page)
|
||||||
|
metadata['minkey'] = minkey
|
||||||
|
metadata['re_len'] = re_len
|
||||||
|
metadata['re_pad'] = re_pad
|
||||||
|
metadata['root'] = root
|
||||||
|
metadata['crypto_magic'] = crypto_magic
|
||||||
|
metadata['iv'] = iv.hex().encode()
|
||||||
|
metadata['chksum'] = chksum.hex().encode()
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
# Given the dict from dump_leaf_page, get the key-value pairs and put them into a dict
|
||||||
|
def extract_kv_pairs(page_data):
|
||||||
|
out = {}
|
||||||
|
last_key = None
|
||||||
|
for i, entry in enumerate(page_data['entries']):
|
||||||
|
# By virtue of these all being pairs, even number entries are keys, and odd are values
|
||||||
|
if i % 2 == 0:
|
||||||
|
out[entry['data']] = b''
|
||||||
|
last_key = entry['data']
|
||||||
|
else:
|
||||||
|
out[last_key] = entry['data']
|
||||||
|
return out
|
||||||
|
|
||||||
|
# Extract the key-value pairs of the BDB file given in filename
|
||||||
|
def dump_bdb_kv(filename):
|
||||||
|
# Read in the BDB file and start deserializing it
|
||||||
|
pages = []
|
||||||
|
with open(filename, 'rb') as f:
|
||||||
|
data = f.read(PAGESIZE)
|
||||||
|
while len(data) > 0:
|
||||||
|
pages.append(data)
|
||||||
|
data = f.read(PAGESIZE)
|
||||||
|
|
||||||
|
# Sanity check the meta pages
|
||||||
|
dump_meta_page(pages[OUTER_META_PAGE])
|
||||||
|
dump_meta_page(pages[INNER_META_PAGE])
|
||||||
|
|
||||||
|
# Fetch the kv pairs from the leaf pages
|
||||||
|
kv = {}
|
||||||
|
for i in range(3, len(pages)):
|
||||||
|
info = dump_leaf_page(pages[i])
|
||||||
|
if info is not None:
|
||||||
|
info_kv = extract_kv_pairs(info)
|
||||||
|
kv = {**kv, **info_kv}
|
||||||
|
return kv
|
||||||
16
miner_imports/test_framework/bip340_test_vectors.csv
Normal file
16
miner_imports/test_framework/bip340_test_vectors.csv
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
index,secret key,public key,aux_rand,message,signature,verification result,comment
|
||||||
|
0,0000000000000000000000000000000000000000000000000000000000000003,F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9,0000000000000000000000000000000000000000000000000000000000000000,0000000000000000000000000000000000000000000000000000000000000000,E907831F80848D1069A5371B402410364BDF1C5F8307B0084C55F1CE2DCA821525F66A4A85EA8B71E482A74F382D2CE5EBEEE8FDB2172F477DF4900D310536C0,TRUE,
|
||||||
|
1,B7E151628AED2A6ABF7158809CF4F3C762E7160F38B4DA56A784D9045190CFEF,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,0000000000000000000000000000000000000000000000000000000000000001,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6896BD60EEAE296DB48A229FF71DFE071BDE413E6D43F917DC8DCF8C78DE33418906D11AC976ABCCB20B091292BFF4EA897EFCB639EA871CFA95F6DE339E4B0A,TRUE,
|
||||||
|
2,C90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B14E5C9,DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8,C87AA53824B4D7AE2EB035A2B5BBBCCC080E76CDC6D1692C4B0B62D798E6D906,7E2D58D8B3BCDF1ABADEC7829054F90DDA9805AAB56C77333024B9D0A508B75C,5831AAEED7B44BB74E5EAB94BA9D4294C49BCF2A60728D8B4C200F50DD313C1BAB745879A5AD954A72C45A91C3A51D3C7ADEA98D82F8481E0E1E03674A6F3FB7,TRUE,
|
||||||
|
3,0B432B2677937381AEF05BB02A66ECD012773062CF3FA2549E44F58ED2401710,25D1DFF95105F5253C4022F628A996AD3A0D95FBF21D468A1B33F8C160D8F517,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,7EB0509757E246F19449885651611CB965ECC1A187DD51B64FDA1EDC9637D5EC97582B9CB13DB3933705B32BA982AF5AF25FD78881EBB32771FC5922EFC66EA3,TRUE,test fails if msg is reduced modulo p or n
|
||||||
|
4,,D69C3509BB99E412E68B0FE8544E72837DFA30746D8BE2AA65975F29D22DC7B9,,4DF3C3F68FCC83B27E9D42C90431A72499F17875C81A599B566C9889B9696703,00000000000000000000003B78CE563F89A0ED9414F5AA28AD0D96D6795F9C6376AFB1548AF603B3EB45C9F8207DEE1060CB71C04E80F593060B07D28308D7F4,TRUE,
|
||||||
|
5,,EEFDEA4CDB677750A420FEE807EACF21EB9898AE79B9768766E4FAA04A2D4A34,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,public key not on the curve
|
||||||
|
6,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,FFF97BD5755EEEA420453A14355235D382F6472F8568A18B2F057A14602975563CC27944640AC607CD107AE10923D9EF7A73C643E166BE5EBEAFA34B1AC553E2,FALSE,has_even_y(R) is false
|
||||||
|
7,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,1FA62E331EDBC21C394792D2AB1100A7B432B013DF3F6FF4F99FCB33E0E1515F28890B3EDB6E7189B630448B515CE4F8622A954CFE545735AAEA5134FCCDB2BD,FALSE,negated message
|
||||||
|
8,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E177769961764B3AA9B2FFCB6EF947B6887A226E8D7C93E00C5ED0C1834FF0D0C2E6DA6,FALSE,negated s value
|
||||||
|
9,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,0000000000000000000000000000000000000000000000000000000000000000123DDA8328AF9C23A94C1FEECFD123BA4FB73476F0D594DCB65C6425BD186051,FALSE,sG - eP is infinite. Test fails in single verification if has_even_y(inf) is defined as true and x(inf) as 0
|
||||||
|
10,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,00000000000000000000000000000000000000000000000000000000000000017615FBAF5AE28864013C099742DEADB4DBA87F11AC6754F93780D5A1837CF197,FALSE,sG - eP is infinite. Test fails in single verification if has_even_y(inf) is defined as true and x(inf) as 1
|
||||||
|
11,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,4A298DACAE57395A15D0795DDBFD1DCB564DA82B0F269BC70A74F8220429BA1D69E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,sig[0:32] is not an X coordinate on the curve
|
||||||
|
12,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F69E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,sig[0:32] is equal to field size
|
||||||
|
13,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E177769FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141,FALSE,sig[32:64] is equal to curve order
|
||||||
|
14,,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,public key is not a valid X coordinate because it exceeds the field size
|
||||||
|
256
miner_imports/test_framework/blocktools.py
Normal file
256
miner_imports/test_framework/blocktools.py
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2015-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Utilities for manipulating blocks and transactions."""
|
||||||
|
|
||||||
|
import struct
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from .address import (
|
||||||
|
key_to_p2sh_p2wpkh,
|
||||||
|
key_to_p2wpkh,
|
||||||
|
script_to_p2sh_p2wsh,
|
||||||
|
script_to_p2wsh,
|
||||||
|
)
|
||||||
|
from .messages import (
|
||||||
|
CBlock,
|
||||||
|
COIN,
|
||||||
|
COutPoint,
|
||||||
|
CTransaction,
|
||||||
|
CTxIn,
|
||||||
|
CTxInWitness,
|
||||||
|
CTxOut,
|
||||||
|
SEQUENCE_FINAL,
|
||||||
|
hash256,
|
||||||
|
ser_uint256,
|
||||||
|
tx_from_hex,
|
||||||
|
uint256_from_str,
|
||||||
|
)
|
||||||
|
from .script import (
|
||||||
|
CScript,
|
||||||
|
CScriptNum,
|
||||||
|
CScriptOp,
|
||||||
|
OP_1,
|
||||||
|
OP_RETURN,
|
||||||
|
OP_TRUE,
|
||||||
|
)
|
||||||
|
from .script_util import (
|
||||||
|
key_to_p2pk_script,
|
||||||
|
key_to_p2wpkh_script,
|
||||||
|
keys_to_multisig_script,
|
||||||
|
script_to_p2wsh_script,
|
||||||
|
)
|
||||||
|
from .util import assert_equal
|
||||||
|
|
||||||
|
WITNESS_SCALE_FACTOR = 4
|
||||||
|
MAX_BLOCK_SIGOPS = 20000
|
||||||
|
MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR
|
||||||
|
|
||||||
|
# Genesis block time (regtest)
|
||||||
|
TIME_GENESIS_BLOCK = 1296688602
|
||||||
|
|
||||||
|
MAX_FUTURE_BLOCK_TIME = 2 * 60 * 60
|
||||||
|
|
||||||
|
# Coinbase transaction outputs can only be spent after this number of new blocks (network rule)
|
||||||
|
COINBASE_MATURITY = 100
|
||||||
|
|
||||||
|
# From BIP141
|
||||||
|
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
|
||||||
|
|
||||||
|
NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]}
|
||||||
|
VERSIONBITS_LAST_OLD_BLOCK_VERSION = 4
|
||||||
|
|
||||||
|
|
||||||
|
def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None):
|
||||||
|
"""Create a block (with regtest difficulty)."""
|
||||||
|
block = CBlock()
|
||||||
|
if tmpl is None:
|
||||||
|
tmpl = {}
|
||||||
|
block.nVersion = version or tmpl.get('version') or VERSIONBITS_LAST_OLD_BLOCK_VERSION
|
||||||
|
block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600)
|
||||||
|
block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10)
|
||||||
|
if tmpl and not tmpl.get('bits') is None:
|
||||||
|
block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0]
|
||||||
|
else:
|
||||||
|
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
|
||||||
|
if coinbase is None:
|
||||||
|
coinbase = create_coinbase(height=tmpl['height'])
|
||||||
|
block.vtx.append(coinbase)
|
||||||
|
if txlist:
|
||||||
|
for tx in txlist:
|
||||||
|
if not hasattr(tx, 'calc_sha256'):
|
||||||
|
tx = tx_from_hex(tx)
|
||||||
|
block.vtx.append(tx)
|
||||||
|
block.hashMerkleRoot = block.calc_merkle_root()
|
||||||
|
block.calc_sha256()
|
||||||
|
return block
|
||||||
|
|
||||||
|
def get_witness_script(witness_root, witness_nonce):
|
||||||
|
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
|
||||||
|
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
|
||||||
|
return CScript([OP_RETURN, output_data])
|
||||||
|
|
||||||
|
def add_witness_commitment(block, nonce=0):
|
||||||
|
"""Add a witness commitment to the block's coinbase transaction.
|
||||||
|
|
||||||
|
According to BIP141, blocks with witness rules active must commit to the
|
||||||
|
hash of all in-block transactions including witness."""
|
||||||
|
# First calculate the merkle root of the block's
|
||||||
|
# transactions, with witnesses.
|
||||||
|
witness_nonce = nonce
|
||||||
|
witness_root = block.calc_witness_merkle_root()
|
||||||
|
# witness_nonce should go to coinbase witness.
|
||||||
|
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
|
||||||
|
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
|
||||||
|
|
||||||
|
# witness commitment is the last OP_RETURN output in coinbase
|
||||||
|
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
|
||||||
|
block.vtx[0].rehash()
|
||||||
|
block.hashMerkleRoot = block.calc_merkle_root()
|
||||||
|
block.rehash()
|
||||||
|
|
||||||
|
|
||||||
|
def script_BIP34_coinbase_height(height):
|
||||||
|
if height <= 16:
|
||||||
|
res = CScriptOp.encode_op_n(height)
|
||||||
|
# Append dummy to increase scriptSig size above 2 (see bad-cb-length consensus rule)
|
||||||
|
return CScript([res, OP_1])
|
||||||
|
return CScript([CScriptNum(height)])
|
||||||
|
|
||||||
|
|
||||||
|
def create_coinbase(height, pubkey=None, extra_output_script=None, fees=0, nValue=50):
|
||||||
|
"""Create a coinbase transaction.
|
||||||
|
|
||||||
|
If pubkey is passed in, the coinbase output will be a P2PK output;
|
||||||
|
otherwise an anyone-can-spend output.
|
||||||
|
|
||||||
|
If extra_output_script is given, make a 0-value output to that
|
||||||
|
script. This is useful to pad block weight/sigops as needed. """
|
||||||
|
coinbase = CTransaction()
|
||||||
|
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), SEQUENCE_FINAL))
|
||||||
|
coinbaseoutput = CTxOut()
|
||||||
|
coinbaseoutput.nValue = nValue * COIN
|
||||||
|
if nValue == 50:
|
||||||
|
halvings = int(height / 150) # regtest
|
||||||
|
coinbaseoutput.nValue >>= halvings
|
||||||
|
coinbaseoutput.nValue += fees
|
||||||
|
if pubkey is not None:
|
||||||
|
coinbaseoutput.scriptPubKey = key_to_p2pk_script(pubkey)
|
||||||
|
else:
|
||||||
|
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
|
||||||
|
coinbase.vout = [coinbaseoutput]
|
||||||
|
if extra_output_script is not None:
|
||||||
|
coinbaseoutput2 = CTxOut()
|
||||||
|
coinbaseoutput2.nValue = 0
|
||||||
|
coinbaseoutput2.scriptPubKey = extra_output_script
|
||||||
|
coinbase.vout.append(coinbaseoutput2)
|
||||||
|
coinbase.calc_sha256()
|
||||||
|
return coinbase
|
||||||
|
|
||||||
|
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
|
||||||
|
"""Return one-input, one-output transaction object
|
||||||
|
spending the prevtx's n-th output with the given amount.
|
||||||
|
|
||||||
|
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
|
||||||
|
"""
|
||||||
|
tx = CTransaction()
|
||||||
|
assert n < len(prevtx.vout)
|
||||||
|
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, SEQUENCE_FINAL))
|
||||||
|
tx.vout.append(CTxOut(amount, script_pub_key))
|
||||||
|
tx.calc_sha256()
|
||||||
|
return tx
|
||||||
|
|
||||||
|
def create_transaction(node, txid, to_address, *, amount):
|
||||||
|
""" Return signed transaction spending the first output of the
|
||||||
|
input txid. Note that the node must have a wallet that can
|
||||||
|
sign for the output that is being spent.
|
||||||
|
"""
|
||||||
|
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
|
||||||
|
tx = tx_from_hex(raw_tx)
|
||||||
|
return tx
|
||||||
|
|
||||||
|
def create_raw_transaction(node, txid, to_address, *, amount):
|
||||||
|
""" Return raw signed transaction spending the first output of the
|
||||||
|
input txid. Note that the node must have a wallet that can sign
|
||||||
|
for the output that is being spent.
|
||||||
|
"""
|
||||||
|
psbt = node.createpsbt(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
|
||||||
|
for _ in range(2):
|
||||||
|
for w in node.listwallets():
|
||||||
|
wrpc = node.get_wallet_rpc(w)
|
||||||
|
signed_psbt = wrpc.walletprocesspsbt(psbt)
|
||||||
|
psbt = signed_psbt['psbt']
|
||||||
|
final_psbt = node.finalizepsbt(psbt)
|
||||||
|
assert_equal(final_psbt["complete"], True)
|
||||||
|
return final_psbt['hex']
|
||||||
|
|
||||||
|
def get_legacy_sigopcount_block(block, accurate=True):
|
||||||
|
count = 0
|
||||||
|
for tx in block.vtx:
|
||||||
|
count += get_legacy_sigopcount_tx(tx, accurate)
|
||||||
|
return count
|
||||||
|
|
||||||
|
def get_legacy_sigopcount_tx(tx, accurate=True):
|
||||||
|
count = 0
|
||||||
|
for i in tx.vout:
|
||||||
|
count += i.scriptPubKey.GetSigOpCount(accurate)
|
||||||
|
for j in tx.vin:
|
||||||
|
# scriptSig might be of type bytes, so convert to CScript for the moment
|
||||||
|
count += CScript(j.scriptSig).GetSigOpCount(accurate)
|
||||||
|
return count
|
||||||
|
|
||||||
|
def witness_script(use_p2wsh, pubkey):
|
||||||
|
"""Create a scriptPubKey for a pay-to-witness TxOut.
|
||||||
|
|
||||||
|
This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
|
||||||
|
1-of-1 multisig for the given pubkey. Returns the hex encoding of the
|
||||||
|
scriptPubKey."""
|
||||||
|
if not use_p2wsh:
|
||||||
|
# P2WPKH instead
|
||||||
|
pkscript = key_to_p2wpkh_script(pubkey)
|
||||||
|
else:
|
||||||
|
# 1-of-1 multisig
|
||||||
|
witness_script = keys_to_multisig_script([pubkey])
|
||||||
|
pkscript = script_to_p2wsh_script(witness_script)
|
||||||
|
return pkscript.hex()
|
||||||
|
|
||||||
|
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
|
||||||
|
"""Return a transaction (in hex) that spends the given utxo to a segwit output.
|
||||||
|
|
||||||
|
Optionally wrap the segwit output using P2SH."""
|
||||||
|
if use_p2wsh:
|
||||||
|
program = keys_to_multisig_script([pubkey])
|
||||||
|
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
|
||||||
|
else:
|
||||||
|
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
|
||||||
|
if not encode_p2sh:
|
||||||
|
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
|
||||||
|
return node.createrawtransaction([utxo], {addr: amount})
|
||||||
|
|
||||||
|
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
|
||||||
|
"""Create a transaction spending a given utxo to a segwit output.
|
||||||
|
|
||||||
|
The output corresponds to the given pubkey: use_p2wsh determines whether to
|
||||||
|
use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
|
||||||
|
sign=True will have the given node sign the transaction.
|
||||||
|
insert_redeem_script will be added to the scriptSig, if given."""
|
||||||
|
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
|
||||||
|
if (sign):
|
||||||
|
signed = node.signrawtransactionwithwallet(tx_to_witness)
|
||||||
|
assert "errors" not in signed or len(["errors"]) == 0
|
||||||
|
return node.sendrawtransaction(signed["hex"])
|
||||||
|
else:
|
||||||
|
if (insert_redeem_script):
|
||||||
|
tx = tx_from_hex(tx_to_witness)
|
||||||
|
tx.vin[0].scriptSig += CScript([bytes.fromhex(insert_redeem_script)])
|
||||||
|
tx_to_witness = tx.serialize().hex()
|
||||||
|
|
||||||
|
return node.sendrawtransaction(tx_to_witness)
|
||||||
|
|
||||||
|
class TestFrameworkBlockTools(unittest.TestCase):
|
||||||
|
def test_create_coinbase(self):
|
||||||
|
height = 20
|
||||||
|
coinbase_tx = create_coinbase(height=height)
|
||||||
|
assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), height)
|
||||||
112
miner_imports/test_framework/coverage.py
Normal file
112
miner_imports/test_framework/coverage.py
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2015-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Utilities for doing coverage analysis on the RPC interface.
|
||||||
|
|
||||||
|
Provides a way to track which RPC commands are exercised during
|
||||||
|
testing.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from .authproxy import AuthServiceProxy
|
||||||
|
|
||||||
|
REFERENCE_FILENAME = 'rpc_interface.txt'
|
||||||
|
|
||||||
|
|
||||||
|
class AuthServiceProxyWrapper():
|
||||||
|
"""
|
||||||
|
An object that wraps AuthServiceProxy to record specific RPC calls.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, auth_service_proxy_instance: AuthServiceProxy, rpc_url: str, coverage_logfile: str=None):
|
||||||
|
"""
|
||||||
|
Kwargs:
|
||||||
|
auth_service_proxy_instance: the instance being wrapped.
|
||||||
|
rpc_url: url of the RPC instance being wrapped
|
||||||
|
coverage_logfile: if specified, write each service_name
|
||||||
|
out to a file when called.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.auth_service_proxy_instance = auth_service_proxy_instance
|
||||||
|
self.rpc_url = rpc_url
|
||||||
|
self.coverage_logfile = coverage_logfile
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return_val = getattr(self.auth_service_proxy_instance, name)
|
||||||
|
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
|
||||||
|
# If proxy getattr returned an unwrapped value, do the same here.
|
||||||
|
return return_val
|
||||||
|
return AuthServiceProxyWrapper(return_val, self.rpc_url, self.coverage_logfile)
|
||||||
|
|
||||||
|
def __call__(self, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Delegates to AuthServiceProxy, then writes the particular RPC method
|
||||||
|
called to a file.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
|
||||||
|
self._log_call()
|
||||||
|
return return_val
|
||||||
|
|
||||||
|
def _log_call(self):
|
||||||
|
rpc_method = self.auth_service_proxy_instance._service_name
|
||||||
|
|
||||||
|
if self.coverage_logfile:
|
||||||
|
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
|
||||||
|
f.write("%s\n" % rpc_method)
|
||||||
|
|
||||||
|
def __truediv__(self, relative_uri):
|
||||||
|
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
|
||||||
|
self.rpc_url,
|
||||||
|
self.coverage_logfile)
|
||||||
|
|
||||||
|
def get_request(self, *args, **kwargs):
|
||||||
|
self._log_call()
|
||||||
|
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
|
||||||
|
|
||||||
|
def get_filename(dirname, n_node):
|
||||||
|
"""
|
||||||
|
Get a filename unique to the test process ID and node.
|
||||||
|
|
||||||
|
This file will contain a list of RPC commands covered.
|
||||||
|
"""
|
||||||
|
pid = str(os.getpid())
|
||||||
|
return os.path.join(
|
||||||
|
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
|
||||||
|
|
||||||
|
|
||||||
|
def write_all_rpc_commands(dirname: str, node: AuthServiceProxy) -> bool:
|
||||||
|
"""
|
||||||
|
Write out a list of all RPC functions available in `bitcoin-cli` for
|
||||||
|
coverage comparison. This will only happen once per coverage
|
||||||
|
directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dirname: temporary test dir
|
||||||
|
node: client
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
if the RPC interface file was written.
|
||||||
|
|
||||||
|
"""
|
||||||
|
filename = os.path.join(dirname, REFERENCE_FILENAME)
|
||||||
|
|
||||||
|
if os.path.isfile(filename):
|
||||||
|
return False
|
||||||
|
|
||||||
|
help_output = node.help().split('\n')
|
||||||
|
commands = set()
|
||||||
|
|
||||||
|
for line in help_output:
|
||||||
|
line = line.strip()
|
||||||
|
|
||||||
|
# Ignore blanks and headers
|
||||||
|
if line and not line.startswith('='):
|
||||||
|
commands.add("%s\n" % line.split()[0])
|
||||||
|
|
||||||
|
with open(filename, 'w', encoding='utf8') as f:
|
||||||
|
f.writelines(list(commands))
|
||||||
|
|
||||||
|
return True
|
||||||
64
miner_imports/test_framework/descriptors.py
Normal file
64
miner_imports/test_framework/descriptors.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2019 Pieter Wuille
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Utility functions related to output descriptors"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "
|
||||||
|
CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
|
||||||
|
GENERATOR = [0xf5dee51989, 0xa9fdca3312, 0x1bab10e32d, 0x3706b1677a, 0x644d626ffd]
|
||||||
|
|
||||||
|
def descsum_polymod(symbols):
|
||||||
|
"""Internal function that computes the descriptor checksum."""
|
||||||
|
chk = 1
|
||||||
|
for value in symbols:
|
||||||
|
top = chk >> 35
|
||||||
|
chk = (chk & 0x7ffffffff) << 5 ^ value
|
||||||
|
for i in range(5):
|
||||||
|
chk ^= GENERATOR[i] if ((top >> i) & 1) else 0
|
||||||
|
return chk
|
||||||
|
|
||||||
|
def descsum_expand(s):
|
||||||
|
"""Internal function that does the character to symbol expansion"""
|
||||||
|
groups = []
|
||||||
|
symbols = []
|
||||||
|
for c in s:
|
||||||
|
if not c in INPUT_CHARSET:
|
||||||
|
return None
|
||||||
|
v = INPUT_CHARSET.find(c)
|
||||||
|
symbols.append(v & 31)
|
||||||
|
groups.append(v >> 5)
|
||||||
|
if len(groups) == 3:
|
||||||
|
symbols.append(groups[0] * 9 + groups[1] * 3 + groups[2])
|
||||||
|
groups = []
|
||||||
|
if len(groups) == 1:
|
||||||
|
symbols.append(groups[0])
|
||||||
|
elif len(groups) == 2:
|
||||||
|
symbols.append(groups[0] * 3 + groups[1])
|
||||||
|
return symbols
|
||||||
|
|
||||||
|
def descsum_create(s):
|
||||||
|
"""Add a checksum to a descriptor without"""
|
||||||
|
symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0]
|
||||||
|
checksum = descsum_polymod(symbols) ^ 1
|
||||||
|
return s + '#' + ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8))
|
||||||
|
|
||||||
|
def descsum_check(s, require=True):
|
||||||
|
"""Verify that the checksum is correct in a descriptor"""
|
||||||
|
if not '#' in s:
|
||||||
|
return not require
|
||||||
|
if s[-9] != '#':
|
||||||
|
return False
|
||||||
|
if not all(x in CHECKSUM_CHARSET for x in s[-8:]):
|
||||||
|
return False
|
||||||
|
symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]]
|
||||||
|
return descsum_polymod(symbols) == 1
|
||||||
|
|
||||||
|
def drop_origins(s):
|
||||||
|
'''Drop the key origins from a descriptor'''
|
||||||
|
desc = re.sub(r'\[.+?\]', '', s)
|
||||||
|
if '#' in s:
|
||||||
|
desc = desc[:desc.index('#')]
|
||||||
|
return descsum_create(desc)
|
||||||
563
miner_imports/test_framework/key.py
Normal file
563
miner_imports/test_framework/key.py
Normal file
@@ -0,0 +1,563 @@
|
|||||||
|
# Copyright (c) 2019-2020 Pieter Wuille
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Test-only secp256k1 elliptic curve implementation
|
||||||
|
|
||||||
|
WARNING: This code is slow, uses bad randomness, does not properly protect
|
||||||
|
keys, and is trivially vulnerable to side channel attacks. Do not use for
|
||||||
|
anything but tests."""
|
||||||
|
import csv
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from .util import modinv
|
||||||
|
|
||||||
|
def TaggedHash(tag, data):
|
||||||
|
ss = hashlib.sha256(tag.encode('utf-8')).digest()
|
||||||
|
ss += ss
|
||||||
|
ss += data
|
||||||
|
return hashlib.sha256(ss).digest()
|
||||||
|
|
||||||
|
def jacobi_symbol(n, k):
|
||||||
|
"""Compute the Jacobi symbol of n modulo k
|
||||||
|
|
||||||
|
See https://en.wikipedia.org/wiki/Jacobi_symbol
|
||||||
|
|
||||||
|
For our application k is always prime, so this is the same as the Legendre symbol."""
|
||||||
|
assert k > 0 and k & 1, "jacobi symbol is only defined for positive odd k"
|
||||||
|
n %= k
|
||||||
|
t = 0
|
||||||
|
while n != 0:
|
||||||
|
while n & 1 == 0:
|
||||||
|
n >>= 1
|
||||||
|
r = k & 7
|
||||||
|
t ^= (r == 3 or r == 5)
|
||||||
|
n, k = k, n
|
||||||
|
t ^= (n & k & 3 == 3)
|
||||||
|
n = n % k
|
||||||
|
if k == 1:
|
||||||
|
return -1 if t else 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def modsqrt(a, p):
|
||||||
|
"""Compute the square root of a modulo p when p % 4 = 3.
|
||||||
|
|
||||||
|
The Tonelli-Shanks algorithm can be used. See https://en.wikipedia.org/wiki/Tonelli-Shanks_algorithm
|
||||||
|
|
||||||
|
Limiting this function to only work for p % 4 = 3 means we don't need to
|
||||||
|
iterate through the loop. The highest n such that p - 1 = 2^n Q with Q odd
|
||||||
|
is n = 1. Therefore Q = (p-1)/2 and sqrt = a^((Q+1)/2) = a^((p+1)/4)
|
||||||
|
|
||||||
|
secp256k1's is defined over field of size 2**256 - 2**32 - 977, which is 3 mod 4.
|
||||||
|
"""
|
||||||
|
if p % 4 != 3:
|
||||||
|
raise NotImplementedError("modsqrt only implemented for p % 4 = 3")
|
||||||
|
sqrt = pow(a, (p + 1)//4, p)
|
||||||
|
if pow(sqrt, 2, p) == a % p:
|
||||||
|
return sqrt
|
||||||
|
return None
|
||||||
|
|
||||||
|
class EllipticCurve:
|
||||||
|
def __init__(self, p, a, b):
|
||||||
|
"""Initialize elliptic curve y^2 = x^3 + a*x + b over GF(p)."""
|
||||||
|
self.p = p
|
||||||
|
self.a = a % p
|
||||||
|
self.b = b % p
|
||||||
|
|
||||||
|
def affine(self, p1):
|
||||||
|
"""Convert a Jacobian point tuple p1 to affine form, or None if at infinity.
|
||||||
|
|
||||||
|
An affine point is represented as the Jacobian (x, y, 1)"""
|
||||||
|
x1, y1, z1 = p1
|
||||||
|
if z1 == 0:
|
||||||
|
return None
|
||||||
|
inv = modinv(z1, self.p)
|
||||||
|
inv_2 = (inv**2) % self.p
|
||||||
|
inv_3 = (inv_2 * inv) % self.p
|
||||||
|
return ((inv_2 * x1) % self.p, (inv_3 * y1) % self.p, 1)
|
||||||
|
|
||||||
|
def has_even_y(self, p1):
|
||||||
|
"""Whether the point p1 has an even Y coordinate when expressed in affine coordinates."""
|
||||||
|
return not (p1[2] == 0 or self.affine(p1)[1] & 1)
|
||||||
|
|
||||||
|
def negate(self, p1):
|
||||||
|
"""Negate a Jacobian point tuple p1."""
|
||||||
|
x1, y1, z1 = p1
|
||||||
|
return (x1, (self.p - y1) % self.p, z1)
|
||||||
|
|
||||||
|
def on_curve(self, p1):
|
||||||
|
"""Determine whether a Jacobian tuple p is on the curve (and not infinity)"""
|
||||||
|
x1, y1, z1 = p1
|
||||||
|
z2 = pow(z1, 2, self.p)
|
||||||
|
z4 = pow(z2, 2, self.p)
|
||||||
|
return z1 != 0 and (pow(x1, 3, self.p) + self.a * x1 * z4 + self.b * z2 * z4 - pow(y1, 2, self.p)) % self.p == 0
|
||||||
|
|
||||||
|
def is_x_coord(self, x):
|
||||||
|
"""Test whether x is a valid X coordinate on the curve."""
|
||||||
|
x_3 = pow(x, 3, self.p)
|
||||||
|
return jacobi_symbol(x_3 + self.a * x + self.b, self.p) != -1
|
||||||
|
|
||||||
|
def lift_x(self, x):
|
||||||
|
"""Given an X coordinate on the curve, return a corresponding affine point for which the Y coordinate is even."""
|
||||||
|
x_3 = pow(x, 3, self.p)
|
||||||
|
v = x_3 + self.a * x + self.b
|
||||||
|
y = modsqrt(v, self.p)
|
||||||
|
if y is None:
|
||||||
|
return None
|
||||||
|
return (x, self.p - y if y & 1 else y, 1)
|
||||||
|
|
||||||
|
def double(self, p1):
|
||||||
|
"""Double a Jacobian tuple p1
|
||||||
|
|
||||||
|
See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Doubling"""
|
||||||
|
x1, y1, z1 = p1
|
||||||
|
if z1 == 0:
|
||||||
|
return (0, 1, 0)
|
||||||
|
y1_2 = (y1**2) % self.p
|
||||||
|
y1_4 = (y1_2**2) % self.p
|
||||||
|
x1_2 = (x1**2) % self.p
|
||||||
|
s = (4*x1*y1_2) % self.p
|
||||||
|
m = 3*x1_2
|
||||||
|
if self.a:
|
||||||
|
m += self.a * pow(z1, 4, self.p)
|
||||||
|
m = m % self.p
|
||||||
|
x2 = (m**2 - 2*s) % self.p
|
||||||
|
y2 = (m*(s - x2) - 8*y1_4) % self.p
|
||||||
|
z2 = (2*y1*z1) % self.p
|
||||||
|
return (x2, y2, z2)
|
||||||
|
|
||||||
|
def add_mixed(self, p1, p2):
|
||||||
|
"""Add a Jacobian tuple p1 and an affine tuple p2
|
||||||
|
|
||||||
|
See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Addition (with affine point)"""
|
||||||
|
x1, y1, z1 = p1
|
||||||
|
x2, y2, z2 = p2
|
||||||
|
assert(z2 == 1)
|
||||||
|
# Adding to the point at infinity is a no-op
|
||||||
|
if z1 == 0:
|
||||||
|
return p2
|
||||||
|
z1_2 = (z1**2) % self.p
|
||||||
|
z1_3 = (z1_2 * z1) % self.p
|
||||||
|
u2 = (x2 * z1_2) % self.p
|
||||||
|
s2 = (y2 * z1_3) % self.p
|
||||||
|
if x1 == u2:
|
||||||
|
if (y1 != s2):
|
||||||
|
# p1 and p2 are inverses. Return the point at infinity.
|
||||||
|
return (0, 1, 0)
|
||||||
|
# p1 == p2. The formulas below fail when the two points are equal.
|
||||||
|
return self.double(p1)
|
||||||
|
h = u2 - x1
|
||||||
|
r = s2 - y1
|
||||||
|
h_2 = (h**2) % self.p
|
||||||
|
h_3 = (h_2 * h) % self.p
|
||||||
|
u1_h_2 = (x1 * h_2) % self.p
|
||||||
|
x3 = (r**2 - h_3 - 2*u1_h_2) % self.p
|
||||||
|
y3 = (r*(u1_h_2 - x3) - y1*h_3) % self.p
|
||||||
|
z3 = (h*z1) % self.p
|
||||||
|
return (x3, y3, z3)
|
||||||
|
|
||||||
|
def add(self, p1, p2):
|
||||||
|
"""Add two Jacobian tuples p1 and p2
|
||||||
|
|
||||||
|
See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Addition"""
|
||||||
|
x1, y1, z1 = p1
|
||||||
|
x2, y2, z2 = p2
|
||||||
|
# Adding the point at infinity is a no-op
|
||||||
|
if z1 == 0:
|
||||||
|
return p2
|
||||||
|
if z2 == 0:
|
||||||
|
return p1
|
||||||
|
# Adding an Affine to a Jacobian is more efficient since we save field multiplications and squarings when z = 1
|
||||||
|
if z1 == 1:
|
||||||
|
return self.add_mixed(p2, p1)
|
||||||
|
if z2 == 1:
|
||||||
|
return self.add_mixed(p1, p2)
|
||||||
|
z1_2 = (z1**2) % self.p
|
||||||
|
z1_3 = (z1_2 * z1) % self.p
|
||||||
|
z2_2 = (z2**2) % self.p
|
||||||
|
z2_3 = (z2_2 * z2) % self.p
|
||||||
|
u1 = (x1 * z2_2) % self.p
|
||||||
|
u2 = (x2 * z1_2) % self.p
|
||||||
|
s1 = (y1 * z2_3) % self.p
|
||||||
|
s2 = (y2 * z1_3) % self.p
|
||||||
|
if u1 == u2:
|
||||||
|
if (s1 != s2):
|
||||||
|
# p1 and p2 are inverses. Return the point at infinity.
|
||||||
|
return (0, 1, 0)
|
||||||
|
# p1 == p2. The formulas below fail when the two points are equal.
|
||||||
|
return self.double(p1)
|
||||||
|
h = u2 - u1
|
||||||
|
r = s2 - s1
|
||||||
|
h_2 = (h**2) % self.p
|
||||||
|
h_3 = (h_2 * h) % self.p
|
||||||
|
u1_h_2 = (u1 * h_2) % self.p
|
||||||
|
x3 = (r**2 - h_3 - 2*u1_h_2) % self.p
|
||||||
|
y3 = (r*(u1_h_2 - x3) - s1*h_3) % self.p
|
||||||
|
z3 = (h*z1*z2) % self.p
|
||||||
|
return (x3, y3, z3)
|
||||||
|
|
||||||
|
def mul(self, ps):
|
||||||
|
"""Compute a (multi) point multiplication
|
||||||
|
|
||||||
|
ps is a list of (Jacobian tuple, scalar) pairs.
|
||||||
|
"""
|
||||||
|
r = (0, 1, 0)
|
||||||
|
for i in range(255, -1, -1):
|
||||||
|
r = self.double(r)
|
||||||
|
for (p, n) in ps:
|
||||||
|
if ((n >> i) & 1):
|
||||||
|
r = self.add(r, p)
|
||||||
|
return r
|
||||||
|
|
||||||
|
SECP256K1_FIELD_SIZE = 2**256 - 2**32 - 977
|
||||||
|
SECP256K1 = EllipticCurve(SECP256K1_FIELD_SIZE, 0, 7)
|
||||||
|
SECP256K1_G = (0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8, 1)
|
||||||
|
SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
|
||||||
|
SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
|
||||||
|
|
||||||
|
class ECPubKey():
|
||||||
|
"""A secp256k1 public key"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Construct an uninitialized public key"""
|
||||||
|
self.valid = False
|
||||||
|
|
||||||
|
def set(self, data):
|
||||||
|
"""Construct a public key from a serialization in compressed or uncompressed format"""
|
||||||
|
if (len(data) == 65 and data[0] == 0x04):
|
||||||
|
p = (int.from_bytes(data[1:33], 'big'), int.from_bytes(data[33:65], 'big'), 1)
|
||||||
|
self.valid = SECP256K1.on_curve(p)
|
||||||
|
if self.valid:
|
||||||
|
self.p = p
|
||||||
|
self.compressed = False
|
||||||
|
elif (len(data) == 33 and (data[0] == 0x02 or data[0] == 0x03)):
|
||||||
|
x = int.from_bytes(data[1:33], 'big')
|
||||||
|
if SECP256K1.is_x_coord(x):
|
||||||
|
p = SECP256K1.lift_x(x)
|
||||||
|
# Make the Y coordinate odd if required (lift_x always produces
|
||||||
|
# a point with an even Y coordinate).
|
||||||
|
if data[0] & 1:
|
||||||
|
p = SECP256K1.negate(p)
|
||||||
|
self.p = p
|
||||||
|
self.valid = True
|
||||||
|
self.compressed = True
|
||||||
|
else:
|
||||||
|
self.valid = False
|
||||||
|
else:
|
||||||
|
self.valid = False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_compressed(self):
|
||||||
|
return self.compressed
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_valid(self):
|
||||||
|
return self.valid
|
||||||
|
|
||||||
|
def get_bytes(self):
|
||||||
|
assert(self.valid)
|
||||||
|
p = SECP256K1.affine(self.p)
|
||||||
|
if p is None:
|
||||||
|
return None
|
||||||
|
if self.compressed:
|
||||||
|
return bytes([0x02 + (p[1] & 1)]) + p[0].to_bytes(32, 'big')
|
||||||
|
else:
|
||||||
|
return bytes([0x04]) + p[0].to_bytes(32, 'big') + p[1].to_bytes(32, 'big')
|
||||||
|
|
||||||
|
def verify_ecdsa(self, sig, msg, low_s=True):
|
||||||
|
"""Verify a strictly DER-encoded ECDSA signature against this pubkey.
|
||||||
|
|
||||||
|
See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the
|
||||||
|
ECDSA verifier algorithm"""
|
||||||
|
assert(self.valid)
|
||||||
|
|
||||||
|
# Extract r and s from the DER formatted signature. Return false for
|
||||||
|
# any DER encoding errors.
|
||||||
|
if (sig[1] + 2 != len(sig)):
|
||||||
|
return False
|
||||||
|
if (len(sig) < 4):
|
||||||
|
return False
|
||||||
|
if (sig[0] != 0x30):
|
||||||
|
return False
|
||||||
|
if (sig[2] != 0x02):
|
||||||
|
return False
|
||||||
|
rlen = sig[3]
|
||||||
|
if (len(sig) < 6 + rlen):
|
||||||
|
return False
|
||||||
|
if rlen < 1 or rlen > 33:
|
||||||
|
return False
|
||||||
|
if sig[4] >= 0x80:
|
||||||
|
return False
|
||||||
|
if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)):
|
||||||
|
return False
|
||||||
|
r = int.from_bytes(sig[4:4+rlen], 'big')
|
||||||
|
if (sig[4+rlen] != 0x02):
|
||||||
|
return False
|
||||||
|
slen = sig[5+rlen]
|
||||||
|
if slen < 1 or slen > 33:
|
||||||
|
return False
|
||||||
|
if (len(sig) != 6 + rlen + slen):
|
||||||
|
return False
|
||||||
|
if sig[6+rlen] >= 0x80:
|
||||||
|
return False
|
||||||
|
if (slen > 1 and (sig[6+rlen] == 0) and not (sig[7+rlen] & 0x80)):
|
||||||
|
return False
|
||||||
|
s = int.from_bytes(sig[6+rlen:6+rlen+slen], 'big')
|
||||||
|
|
||||||
|
# Verify that r and s are within the group order
|
||||||
|
if r < 1 or s < 1 or r >= SECP256K1_ORDER or s >= SECP256K1_ORDER:
|
||||||
|
return False
|
||||||
|
if low_s and s >= SECP256K1_ORDER_HALF:
|
||||||
|
return False
|
||||||
|
z = int.from_bytes(msg, 'big')
|
||||||
|
|
||||||
|
# Run verifier algorithm on r, s
|
||||||
|
w = modinv(s, SECP256K1_ORDER)
|
||||||
|
u1 = z*w % SECP256K1_ORDER
|
||||||
|
u2 = r*w % SECP256K1_ORDER
|
||||||
|
R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, u1), (self.p, u2)]))
|
||||||
|
if R is None or (R[0] % SECP256K1_ORDER) != r:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def generate_privkey():
|
||||||
|
"""Generate a valid random 32-byte private key."""
|
||||||
|
return random.randrange(1, SECP256K1_ORDER).to_bytes(32, 'big')
|
||||||
|
|
||||||
|
def rfc6979_nonce(key):
|
||||||
|
"""Compute signing nonce using RFC6979."""
|
||||||
|
v = bytes([1] * 32)
|
||||||
|
k = bytes([0] * 32)
|
||||||
|
k = hmac.new(k, v + b"\x00" + key, 'sha256').digest()
|
||||||
|
v = hmac.new(k, v, 'sha256').digest()
|
||||||
|
k = hmac.new(k, v + b"\x01" + key, 'sha256').digest()
|
||||||
|
v = hmac.new(k, v, 'sha256').digest()
|
||||||
|
return hmac.new(k, v, 'sha256').digest()
|
||||||
|
|
||||||
|
class ECKey():
|
||||||
|
"""A secp256k1 private key"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.valid = False
|
||||||
|
|
||||||
|
def set(self, secret, compressed):
|
||||||
|
"""Construct a private key object with given 32-byte secret and compressed flag."""
|
||||||
|
assert(len(secret) == 32)
|
||||||
|
secret = int.from_bytes(secret, 'big')
|
||||||
|
self.valid = (secret > 0 and secret < SECP256K1_ORDER)
|
||||||
|
if self.valid:
|
||||||
|
self.secret = secret
|
||||||
|
self.compressed = compressed
|
||||||
|
|
||||||
|
def generate(self, compressed=True):
|
||||||
|
"""Generate a random private key (compressed or uncompressed)."""
|
||||||
|
self.set(generate_privkey(), compressed)
|
||||||
|
|
||||||
|
def get_bytes(self):
|
||||||
|
"""Retrieve the 32-byte representation of this key."""
|
||||||
|
assert(self.valid)
|
||||||
|
return self.secret.to_bytes(32, 'big')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_valid(self):
|
||||||
|
return self.valid
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_compressed(self):
|
||||||
|
return self.compressed
|
||||||
|
|
||||||
|
def get_pubkey(self):
|
||||||
|
"""Compute an ECPubKey object for this secret key."""
|
||||||
|
assert(self.valid)
|
||||||
|
ret = ECPubKey()
|
||||||
|
p = SECP256K1.mul([(SECP256K1_G, self.secret)])
|
||||||
|
ret.p = p
|
||||||
|
ret.valid = True
|
||||||
|
ret.compressed = self.compressed
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def sign_ecdsa(self, msg, low_s=True, rfc6979=False):
|
||||||
|
"""Construct a DER-encoded ECDSA signature with this key.
|
||||||
|
|
||||||
|
See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the
|
||||||
|
ECDSA signer algorithm."""
|
||||||
|
assert(self.valid)
|
||||||
|
z = int.from_bytes(msg, 'big')
|
||||||
|
# Note: no RFC6979 by default, but a simple random nonce (some tests rely on distinct transactions for the same operation)
|
||||||
|
if rfc6979:
|
||||||
|
k = int.from_bytes(rfc6979_nonce(self.secret.to_bytes(32, 'big') + msg), 'big')
|
||||||
|
else:
|
||||||
|
k = random.randrange(1, SECP256K1_ORDER)
|
||||||
|
R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, k)]))
|
||||||
|
r = R[0] % SECP256K1_ORDER
|
||||||
|
s = (modinv(k, SECP256K1_ORDER) * (z + self.secret * r)) % SECP256K1_ORDER
|
||||||
|
if low_s and s > SECP256K1_ORDER_HALF:
|
||||||
|
s = SECP256K1_ORDER - s
|
||||||
|
# Represent in DER format. The byte representations of r and s have
|
||||||
|
# length rounded up (255 bits becomes 32 bytes and 256 bits becomes 33
|
||||||
|
# bytes).
|
||||||
|
rb = r.to_bytes((r.bit_length() + 8) // 8, 'big')
|
||||||
|
sb = s.to_bytes((s.bit_length() + 8) // 8, 'big')
|
||||||
|
return b'\x30' + bytes([4 + len(rb) + len(sb), 2, len(rb)]) + rb + bytes([2, len(sb)]) + sb
|
||||||
|
|
||||||
|
def compute_xonly_pubkey(key):
|
||||||
|
"""Compute an x-only (32 byte) public key from a (32 byte) private key.
|
||||||
|
|
||||||
|
This also returns whether the resulting public key was negated.
|
||||||
|
"""
|
||||||
|
|
||||||
|
assert len(key) == 32
|
||||||
|
x = int.from_bytes(key, 'big')
|
||||||
|
if x == 0 or x >= SECP256K1_ORDER:
|
||||||
|
return (None, None)
|
||||||
|
P = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, x)]))
|
||||||
|
return (P[0].to_bytes(32, 'big'), not SECP256K1.has_even_y(P))
|
||||||
|
|
||||||
|
def tweak_add_privkey(key, tweak):
|
||||||
|
"""Tweak a private key (after negating it if needed)."""
|
||||||
|
|
||||||
|
assert len(key) == 32
|
||||||
|
assert len(tweak) == 32
|
||||||
|
|
||||||
|
x = int.from_bytes(key, 'big')
|
||||||
|
if x == 0 or x >= SECP256K1_ORDER:
|
||||||
|
return None
|
||||||
|
if not SECP256K1.has_even_y(SECP256K1.mul([(SECP256K1_G, x)])):
|
||||||
|
x = SECP256K1_ORDER - x
|
||||||
|
t = int.from_bytes(tweak, 'big')
|
||||||
|
if t >= SECP256K1_ORDER:
|
||||||
|
return None
|
||||||
|
x = (x + t) % SECP256K1_ORDER
|
||||||
|
if x == 0:
|
||||||
|
return None
|
||||||
|
return x.to_bytes(32, 'big')
|
||||||
|
|
||||||
|
def tweak_add_pubkey(key, tweak):
|
||||||
|
"""Tweak a public key and return whether the result had to be negated."""
|
||||||
|
|
||||||
|
assert len(key) == 32
|
||||||
|
assert len(tweak) == 32
|
||||||
|
|
||||||
|
x_coord = int.from_bytes(key, 'big')
|
||||||
|
if x_coord >= SECP256K1_FIELD_SIZE:
|
||||||
|
return None
|
||||||
|
P = SECP256K1.lift_x(x_coord)
|
||||||
|
if P is None:
|
||||||
|
return None
|
||||||
|
t = int.from_bytes(tweak, 'big')
|
||||||
|
if t >= SECP256K1_ORDER:
|
||||||
|
return None
|
||||||
|
Q = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, t), (P, 1)]))
|
||||||
|
if Q is None:
|
||||||
|
return None
|
||||||
|
return (Q[0].to_bytes(32, 'big'), not SECP256K1.has_even_y(Q))
|
||||||
|
|
||||||
|
def verify_schnorr(key, sig, msg):
|
||||||
|
"""Verify a Schnorr signature (see BIP 340).
|
||||||
|
|
||||||
|
- key is a 32-byte xonly pubkey (computed using compute_xonly_pubkey).
|
||||||
|
- sig is a 64-byte Schnorr signature
|
||||||
|
- msg is a 32-byte message
|
||||||
|
"""
|
||||||
|
assert len(key) == 32
|
||||||
|
assert len(msg) == 32
|
||||||
|
assert len(sig) == 64
|
||||||
|
|
||||||
|
x_coord = int.from_bytes(key, 'big')
|
||||||
|
if x_coord == 0 or x_coord >= SECP256K1_FIELD_SIZE:
|
||||||
|
return False
|
||||||
|
P = SECP256K1.lift_x(x_coord)
|
||||||
|
if P is None:
|
||||||
|
return False
|
||||||
|
r = int.from_bytes(sig[0:32], 'big')
|
||||||
|
if r >= SECP256K1_FIELD_SIZE:
|
||||||
|
return False
|
||||||
|
s = int.from_bytes(sig[32:64], 'big')
|
||||||
|
if s >= SECP256K1_ORDER:
|
||||||
|
return False
|
||||||
|
e = int.from_bytes(TaggedHash("BIP0340/challenge", sig[0:32] + key + msg), 'big') % SECP256K1_ORDER
|
||||||
|
R = SECP256K1.mul([(SECP256K1_G, s), (P, SECP256K1_ORDER - e)])
|
||||||
|
if not SECP256K1.has_even_y(R):
|
||||||
|
return False
|
||||||
|
if ((r * R[2] * R[2]) % SECP256K1_FIELD_SIZE) != R[0]:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def sign_schnorr(key, msg, aux=None, flip_p=False, flip_r=False):
|
||||||
|
"""Create a Schnorr signature (see BIP 340)."""
|
||||||
|
|
||||||
|
if aux is None:
|
||||||
|
aux = bytes(32)
|
||||||
|
|
||||||
|
assert len(key) == 32
|
||||||
|
assert len(msg) == 32
|
||||||
|
assert len(aux) == 32
|
||||||
|
|
||||||
|
sec = int.from_bytes(key, 'big')
|
||||||
|
if sec == 0 or sec >= SECP256K1_ORDER:
|
||||||
|
return None
|
||||||
|
P = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, sec)]))
|
||||||
|
if SECP256K1.has_even_y(P) == flip_p:
|
||||||
|
sec = SECP256K1_ORDER - sec
|
||||||
|
t = (sec ^ int.from_bytes(TaggedHash("BIP0340/aux", aux), 'big')).to_bytes(32, 'big')
|
||||||
|
kp = int.from_bytes(TaggedHash("BIP0340/nonce", t + P[0].to_bytes(32, 'big') + msg), 'big') % SECP256K1_ORDER
|
||||||
|
assert kp != 0
|
||||||
|
R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, kp)]))
|
||||||
|
k = kp if SECP256K1.has_even_y(R) != flip_r else SECP256K1_ORDER - kp
|
||||||
|
e = int.from_bytes(TaggedHash("BIP0340/challenge", R[0].to_bytes(32, 'big') + P[0].to_bytes(32, 'big') + msg), 'big') % SECP256K1_ORDER
|
||||||
|
return R[0].to_bytes(32, 'big') + ((k + e * sec) % SECP256K1_ORDER).to_bytes(32, 'big')
|
||||||
|
|
||||||
|
class TestFrameworkKey(unittest.TestCase):
|
||||||
|
def test_schnorr(self):
|
||||||
|
"""Test the Python Schnorr implementation."""
|
||||||
|
byte_arrays = [generate_privkey() for _ in range(3)] + [v.to_bytes(32, 'big') for v in [0, SECP256K1_ORDER - 1, SECP256K1_ORDER, 2**256 - 1]]
|
||||||
|
keys = {}
|
||||||
|
for privkey in byte_arrays: # build array of key/pubkey pairs
|
||||||
|
pubkey, _ = compute_xonly_pubkey(privkey)
|
||||||
|
if pubkey is not None:
|
||||||
|
keys[privkey] = pubkey
|
||||||
|
for msg in byte_arrays: # test every combination of message, signing key, verification key
|
||||||
|
for sign_privkey, _ in keys.items():
|
||||||
|
sig = sign_schnorr(sign_privkey, msg)
|
||||||
|
for verify_privkey, verify_pubkey in keys.items():
|
||||||
|
if verify_privkey == sign_privkey:
|
||||||
|
self.assertTrue(verify_schnorr(verify_pubkey, sig, msg))
|
||||||
|
sig = list(sig)
|
||||||
|
sig[random.randrange(64)] ^= (1 << (random.randrange(8))) # damaging signature should break things
|
||||||
|
sig = bytes(sig)
|
||||||
|
self.assertFalse(verify_schnorr(verify_pubkey, sig, msg))
|
||||||
|
|
||||||
|
def test_schnorr_testvectors(self):
|
||||||
|
"""Implement the BIP340 test vectors (read from bip340_test_vectors.csv)."""
|
||||||
|
num_tests = 0
|
||||||
|
vectors_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'bip340_test_vectors.csv')
|
||||||
|
with open(vectors_file, newline='', encoding='utf8') as csvfile:
|
||||||
|
reader = csv.reader(csvfile)
|
||||||
|
next(reader)
|
||||||
|
for row in reader:
|
||||||
|
(i_str, seckey_hex, pubkey_hex, aux_rand_hex, msg_hex, sig_hex, result_str, comment) = row
|
||||||
|
i = int(i_str)
|
||||||
|
pubkey = bytes.fromhex(pubkey_hex)
|
||||||
|
msg = bytes.fromhex(msg_hex)
|
||||||
|
sig = bytes.fromhex(sig_hex)
|
||||||
|
result = result_str == 'TRUE'
|
||||||
|
if seckey_hex != '':
|
||||||
|
seckey = bytes.fromhex(seckey_hex)
|
||||||
|
pubkey_actual = compute_xonly_pubkey(seckey)[0]
|
||||||
|
self.assertEqual(pubkey.hex(), pubkey_actual.hex(), "BIP340 test vector %i (%s): pubkey mismatch" % (i, comment))
|
||||||
|
aux_rand = bytes.fromhex(aux_rand_hex)
|
||||||
|
try:
|
||||||
|
sig_actual = sign_schnorr(seckey, msg, aux_rand)
|
||||||
|
self.assertEqual(sig.hex(), sig_actual.hex(), "BIP340 test vector %i (%s): sig mismatch" % (i, comment))
|
||||||
|
except RuntimeError as e:
|
||||||
|
self.fail("BIP340 test vector %i (%s): signing raised exception %s" % (i, comment, e))
|
||||||
|
result_actual = verify_schnorr(pubkey, sig, msg)
|
||||||
|
if result:
|
||||||
|
self.assertEqual(result, result_actual, "BIP340 test vector %i (%s): verification failed" % (i, comment))
|
||||||
|
else:
|
||||||
|
self.assertEqual(result, result_actual, "BIP340 test vector %i (%s): verification succeeded unexpectedly" % (i, comment))
|
||||||
|
num_tests += 1
|
||||||
|
self.assertTrue(num_tests >= 15) # expect at least 15 test vectors
|
||||||
1819
miner_imports/test_framework/messages.py
Executable file
1819
miner_imports/test_framework/messages.py
Executable file
File diff suppressed because it is too large
Load Diff
112
miner_imports/test_framework/muhash.py
Normal file
112
miner_imports/test_framework/muhash.py
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
# Copyright (c) 2020 Pieter Wuille
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Native Python MuHash3072 implementation."""
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from .util import modinv
|
||||||
|
|
||||||
|
def rot32(v, bits):
|
||||||
|
"""Rotate the 32-bit value v left by bits bits."""
|
||||||
|
bits %= 32 # Make sure the term below does not throw an exception
|
||||||
|
return ((v << bits) & 0xffffffff) | (v >> (32 - bits))
|
||||||
|
|
||||||
|
def chacha20_doubleround(s):
|
||||||
|
"""Apply a ChaCha20 double round to 16-element state array s.
|
||||||
|
|
||||||
|
See https://cr.yp.to/chacha/chacha-20080128.pdf and https://tools.ietf.org/html/rfc8439
|
||||||
|
"""
|
||||||
|
QUARTER_ROUNDS = [(0, 4, 8, 12),
|
||||||
|
(1, 5, 9, 13),
|
||||||
|
(2, 6, 10, 14),
|
||||||
|
(3, 7, 11, 15),
|
||||||
|
(0, 5, 10, 15),
|
||||||
|
(1, 6, 11, 12),
|
||||||
|
(2, 7, 8, 13),
|
||||||
|
(3, 4, 9, 14)]
|
||||||
|
|
||||||
|
for a, b, c, d in QUARTER_ROUNDS:
|
||||||
|
s[a] = (s[a] + s[b]) & 0xffffffff
|
||||||
|
s[d] = rot32(s[d] ^ s[a], 16)
|
||||||
|
s[c] = (s[c] + s[d]) & 0xffffffff
|
||||||
|
s[b] = rot32(s[b] ^ s[c], 12)
|
||||||
|
s[a] = (s[a] + s[b]) & 0xffffffff
|
||||||
|
s[d] = rot32(s[d] ^ s[a], 8)
|
||||||
|
s[c] = (s[c] + s[d]) & 0xffffffff
|
||||||
|
s[b] = rot32(s[b] ^ s[c], 7)
|
||||||
|
|
||||||
|
def chacha20_32_to_384(key32):
|
||||||
|
"""Specialized ChaCha20 implementation with 32-byte key, 0 IV, 384-byte output."""
|
||||||
|
# See RFC 8439 section 2.3 for chacha20 parameters
|
||||||
|
CONSTANTS = [0x61707865, 0x3320646e, 0x79622d32, 0x6b206574]
|
||||||
|
|
||||||
|
key_bytes = [0]*8
|
||||||
|
for i in range(8):
|
||||||
|
key_bytes[i] = int.from_bytes(key32[(4 * i):(4 * (i+1))], 'little')
|
||||||
|
|
||||||
|
INITIALIZATION_VECTOR = [0] * 4
|
||||||
|
init = CONSTANTS + key_bytes + INITIALIZATION_VECTOR
|
||||||
|
out = bytearray()
|
||||||
|
for counter in range(6):
|
||||||
|
init[12] = counter
|
||||||
|
s = init.copy()
|
||||||
|
for _ in range(10):
|
||||||
|
chacha20_doubleround(s)
|
||||||
|
for i in range(16):
|
||||||
|
out.extend(((s[i] + init[i]) & 0xffffffff).to_bytes(4, 'little'))
|
||||||
|
return bytes(out)
|
||||||
|
|
||||||
|
def data_to_num3072(data):
|
||||||
|
"""Hash a 32-byte array data to a 3072-bit number using 6 Chacha20 operations."""
|
||||||
|
bytes384 = chacha20_32_to_384(data)
|
||||||
|
return int.from_bytes(bytes384, 'little')
|
||||||
|
|
||||||
|
class MuHash3072:
|
||||||
|
"""Class representing the MuHash3072 computation of a set.
|
||||||
|
|
||||||
|
See https://cseweb.ucsd.edu/~mihir/papers/inchash.pdf and https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2017-May/014337.html
|
||||||
|
"""
|
||||||
|
|
||||||
|
MODULUS = 2**3072 - 1103717
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize for an empty set."""
|
||||||
|
self.numerator = 1
|
||||||
|
self.denominator = 1
|
||||||
|
|
||||||
|
def insert(self, data):
|
||||||
|
"""Insert a byte array data in the set."""
|
||||||
|
data_hash = hashlib.sha256(data).digest()
|
||||||
|
self.numerator = (self.numerator * data_to_num3072(data_hash)) % self.MODULUS
|
||||||
|
|
||||||
|
def remove(self, data):
|
||||||
|
"""Remove a byte array from the set."""
|
||||||
|
data_hash = hashlib.sha256(data).digest()
|
||||||
|
self.denominator = (self.denominator * data_to_num3072(data_hash)) % self.MODULUS
|
||||||
|
|
||||||
|
def digest(self):
|
||||||
|
"""Extract the final hash. Does not modify this object."""
|
||||||
|
val = (self.numerator * modinv(self.denominator, self.MODULUS)) % self.MODULUS
|
||||||
|
bytes384 = val.to_bytes(384, 'little')
|
||||||
|
return hashlib.sha256(bytes384).digest()
|
||||||
|
|
||||||
|
class TestFrameworkMuhash(unittest.TestCase):
|
||||||
|
def test_muhash(self):
|
||||||
|
muhash = MuHash3072()
|
||||||
|
muhash.insert(b'\x00' * 32)
|
||||||
|
muhash.insert((b'\x01' + b'\x00' * 31))
|
||||||
|
muhash.remove((b'\x02' + b'\x00' * 31))
|
||||||
|
finalized = muhash.digest()
|
||||||
|
# This mirrors the result in the C++ MuHash3072 unit test
|
||||||
|
self.assertEqual(finalized[::-1].hex(), "10d312b100cbd32ada024a6646e40d3482fcff103668d2625f10002a607d5863")
|
||||||
|
|
||||||
|
def test_chacha20(self):
|
||||||
|
def chacha_check(key, result):
|
||||||
|
self.assertEqual(chacha20_32_to_384(key)[:64].hex(), result)
|
||||||
|
|
||||||
|
# Test vectors from https://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04#section-7
|
||||||
|
# Since the nonce is hardcoded to 0 in our function we only use those vectors.
|
||||||
|
chacha_check([0]*32, "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586")
|
||||||
|
chacha_check([0]*31 + [1], "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea817e9ad275ae546963")
|
||||||
155
miner_imports/test_framework/netutil.py
Normal file
155
miner_imports/test_framework/netutil.py
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2014-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Linux network utilities.
|
||||||
|
|
||||||
|
Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import socket
|
||||||
|
import struct
|
||||||
|
import array
|
||||||
|
import os
|
||||||
|
|
||||||
|
# STATE_ESTABLISHED = '01'
|
||||||
|
# STATE_SYN_SENT = '02'
|
||||||
|
# STATE_SYN_RECV = '03'
|
||||||
|
# STATE_FIN_WAIT1 = '04'
|
||||||
|
# STATE_FIN_WAIT2 = '05'
|
||||||
|
# STATE_TIME_WAIT = '06'
|
||||||
|
# STATE_CLOSE = '07'
|
||||||
|
# STATE_CLOSE_WAIT = '08'
|
||||||
|
# STATE_LAST_ACK = '09'
|
||||||
|
STATE_LISTEN = '0A'
|
||||||
|
# STATE_CLOSING = '0B'
|
||||||
|
|
||||||
|
def get_socket_inodes(pid):
|
||||||
|
'''
|
||||||
|
Get list of socket inodes for process pid.
|
||||||
|
'''
|
||||||
|
base = '/proc/%i/fd' % pid
|
||||||
|
inodes = []
|
||||||
|
for item in os.listdir(base):
|
||||||
|
target = os.readlink(os.path.join(base, item))
|
||||||
|
if target.startswith('socket:'):
|
||||||
|
inodes.append(int(target[8:-1]))
|
||||||
|
return inodes
|
||||||
|
|
||||||
|
def _remove_empty(array):
|
||||||
|
return [x for x in array if x !='']
|
||||||
|
|
||||||
|
def _convert_ip_port(array):
|
||||||
|
host,port = array.split(':')
|
||||||
|
# convert host from mangled-per-four-bytes form as used by kernel
|
||||||
|
host = bytes.fromhex(host)
|
||||||
|
host_out = ''
|
||||||
|
for x in range(0, len(host) // 4):
|
||||||
|
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
|
||||||
|
host_out += '%08x' % val
|
||||||
|
|
||||||
|
return host_out,int(port,16)
|
||||||
|
|
||||||
|
def netstat(typ='tcp'):
|
||||||
|
'''
|
||||||
|
Function to return a list with status of tcp connections at linux systems
|
||||||
|
To get pid of all network process running on system, you must run this script
|
||||||
|
as superuser
|
||||||
|
'''
|
||||||
|
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
|
||||||
|
content = f.readlines()
|
||||||
|
content.pop(0)
|
||||||
|
result = []
|
||||||
|
for line in content:
|
||||||
|
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
|
||||||
|
tcp_id = line_array[0]
|
||||||
|
l_addr = _convert_ip_port(line_array[1])
|
||||||
|
r_addr = _convert_ip_port(line_array[2])
|
||||||
|
state = line_array[3]
|
||||||
|
inode = int(line_array[9]) # Need the inode to match with process pid.
|
||||||
|
nline = [tcp_id, l_addr, r_addr, state, inode]
|
||||||
|
result.append(nline)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_bind_addrs(pid):
|
||||||
|
'''
|
||||||
|
Get bind addresses as (host,port) tuples for process pid.
|
||||||
|
'''
|
||||||
|
inodes = get_socket_inodes(pid)
|
||||||
|
bind_addrs = []
|
||||||
|
for conn in netstat('tcp') + netstat('tcp6'):
|
||||||
|
if conn[3] == STATE_LISTEN and conn[4] in inodes:
|
||||||
|
bind_addrs.append(conn[1])
|
||||||
|
return bind_addrs
|
||||||
|
|
||||||
|
# from: https://code.activestate.com/recipes/439093/
|
||||||
|
def all_interfaces():
|
||||||
|
'''
|
||||||
|
Return all interfaces that are up
|
||||||
|
'''
|
||||||
|
import fcntl # Linux only, so only import when required
|
||||||
|
|
||||||
|
is_64bits = sys.maxsize > 2**32
|
||||||
|
struct_size = 40 if is_64bits else 32
|
||||||
|
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
max_possible = 8 # initial value
|
||||||
|
while True:
|
||||||
|
bytes = max_possible * struct_size
|
||||||
|
names = array.array('B', b'\0' * bytes)
|
||||||
|
outbytes = struct.unpack('iL', fcntl.ioctl(
|
||||||
|
s.fileno(),
|
||||||
|
0x8912, # SIOCGIFCONF
|
||||||
|
struct.pack('iL', bytes, names.buffer_info()[0])
|
||||||
|
))[0]
|
||||||
|
if outbytes == bytes:
|
||||||
|
max_possible *= 2
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
namestr = names.tobytes()
|
||||||
|
return [(namestr[i:i+16].split(b'\0', 1)[0],
|
||||||
|
socket.inet_ntoa(namestr[i+20:i+24]))
|
||||||
|
for i in range(0, outbytes, struct_size)]
|
||||||
|
|
||||||
|
def addr_to_hex(addr):
|
||||||
|
'''
|
||||||
|
Convert string IPv4 or IPv6 address to binary address as returned by
|
||||||
|
get_bind_addrs.
|
||||||
|
Very naive implementation that certainly doesn't work for all IPv6 variants.
|
||||||
|
'''
|
||||||
|
if '.' in addr: # IPv4
|
||||||
|
addr = [int(x) for x in addr.split('.')]
|
||||||
|
elif ':' in addr: # IPv6
|
||||||
|
sub = [[], []] # prefix, suffix
|
||||||
|
x = 0
|
||||||
|
addr = addr.split(':')
|
||||||
|
for i,comp in enumerate(addr):
|
||||||
|
if comp == '':
|
||||||
|
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
|
||||||
|
continue
|
||||||
|
x += 1 # :: skips to suffix
|
||||||
|
assert x < 2
|
||||||
|
else: # two bytes per component
|
||||||
|
val = int(comp, 16)
|
||||||
|
sub[x].append(val >> 8)
|
||||||
|
sub[x].append(val & 0xff)
|
||||||
|
nullbytes = 16 - len(sub[0]) - len(sub[1])
|
||||||
|
assert (x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)
|
||||||
|
addr = sub[0] + ([0] * nullbytes) + sub[1]
|
||||||
|
else:
|
||||||
|
raise ValueError('Could not parse address %s' % addr)
|
||||||
|
return bytearray(addr).hex()
|
||||||
|
|
||||||
|
def test_ipv6_local():
|
||||||
|
'''
|
||||||
|
Check for (local) IPv6 support.
|
||||||
|
'''
|
||||||
|
# By using SOCK_DGRAM this will not actually make a connection, but it will
|
||||||
|
# fail if there is no route to IPv6 localhost.
|
||||||
|
have_ipv6 = True
|
||||||
|
try:
|
||||||
|
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
|
||||||
|
s.connect(('::1', 1))
|
||||||
|
except socket.error:
|
||||||
|
have_ipv6 = False
|
||||||
|
return have_ipv6
|
||||||
795
miner_imports/test_framework/p2p.py
Executable file
795
miner_imports/test_framework/p2p.py
Executable file
@@ -0,0 +1,795 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2010 ArtForz -- public domain half-a-node
|
||||||
|
# Copyright (c) 2012 Jeff Garzik
|
||||||
|
# Copyright (c) 2010-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Test objects for interacting with a bitcoind node over the p2p protocol.
|
||||||
|
|
||||||
|
The P2PInterface objects interact with the bitcoind nodes under test using the
|
||||||
|
node's p2p interface. They can be used to send messages to the node, and
|
||||||
|
callbacks can be registered that execute when messages are received from the
|
||||||
|
node. Messages are sent to/received from the node on an asyncio event loop.
|
||||||
|
State held inside the objects must be guarded by the p2p_lock to avoid data
|
||||||
|
races between the main testing thread and the event loop.
|
||||||
|
|
||||||
|
P2PConnection: A low-level connection object to a node's P2P interface
|
||||||
|
P2PInterface: A high-level interface object for communicating to a node over P2P
|
||||||
|
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
|
||||||
|
and can respond correctly to getdata and getheaders messages
|
||||||
|
P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps
|
||||||
|
a count of how many times each txid has been announced."""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from collections import defaultdict
|
||||||
|
from io import BytesIO
|
||||||
|
import logging
|
||||||
|
import struct
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
|
||||||
|
from test_framework.messages import (
|
||||||
|
CBlockHeader,
|
||||||
|
MAX_HEADERS_RESULTS,
|
||||||
|
msg_addr,
|
||||||
|
msg_addrv2,
|
||||||
|
msg_block,
|
||||||
|
MSG_BLOCK,
|
||||||
|
msg_blocktxn,
|
||||||
|
msg_cfcheckpt,
|
||||||
|
msg_cfheaders,
|
||||||
|
msg_cfilter,
|
||||||
|
msg_cmpctblock,
|
||||||
|
msg_feefilter,
|
||||||
|
msg_filteradd,
|
||||||
|
msg_filterclear,
|
||||||
|
msg_filterload,
|
||||||
|
msg_getaddr,
|
||||||
|
msg_getblocks,
|
||||||
|
msg_getblocktxn,
|
||||||
|
msg_getdata,
|
||||||
|
msg_getheaders,
|
||||||
|
msg_headers,
|
||||||
|
msg_inv,
|
||||||
|
msg_mempool,
|
||||||
|
msg_merkleblock,
|
||||||
|
msg_notfound,
|
||||||
|
msg_ping,
|
||||||
|
msg_pong,
|
||||||
|
msg_sendaddrv2,
|
||||||
|
msg_sendcmpct,
|
||||||
|
msg_sendheaders,
|
||||||
|
msg_tx,
|
||||||
|
MSG_TX,
|
||||||
|
MSG_TYPE_MASK,
|
||||||
|
msg_verack,
|
||||||
|
msg_version,
|
||||||
|
MSG_WTX,
|
||||||
|
msg_wtxidrelay,
|
||||||
|
NODE_NETWORK,
|
||||||
|
NODE_WITNESS,
|
||||||
|
sha256,
|
||||||
|
)
|
||||||
|
from test_framework.util import (
|
||||||
|
MAX_NODES,
|
||||||
|
p2p_port,
|
||||||
|
wait_until_helper,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.getLogger("TestFramework.p2p")
|
||||||
|
|
||||||
|
# The minimum P2P version that this test framework supports
|
||||||
|
MIN_P2P_VERSION_SUPPORTED = 60001
|
||||||
|
# The P2P version that this test framework implements and sends in its `version` message
|
||||||
|
# Version 70016 supports wtxid relay
|
||||||
|
P2P_VERSION = 70016
|
||||||
|
# The services that this test framework offers in its `version` message
|
||||||
|
P2P_SERVICES = NODE_NETWORK | NODE_WITNESS
|
||||||
|
# The P2P user agent string that this test framework sends in its `version` message
|
||||||
|
P2P_SUBVERSION = "/python-p2p-tester:0.0.3/"
|
||||||
|
# Value for relay that this test framework sends in its `version` message
|
||||||
|
P2P_VERSION_RELAY = 1
|
||||||
|
# Delay after receiving a tx inv before requesting transactions from non-preferred peers, in seconds
|
||||||
|
NONPREF_PEER_TX_DELAY = 2
|
||||||
|
|
||||||
|
MESSAGEMAP = {
|
||||||
|
b"addr": msg_addr,
|
||||||
|
b"addrv2": msg_addrv2,
|
||||||
|
b"block": msg_block,
|
||||||
|
b"blocktxn": msg_blocktxn,
|
||||||
|
b"cfcheckpt": msg_cfcheckpt,
|
||||||
|
b"cfheaders": msg_cfheaders,
|
||||||
|
b"cfilter": msg_cfilter,
|
||||||
|
b"cmpctblock": msg_cmpctblock,
|
||||||
|
b"feefilter": msg_feefilter,
|
||||||
|
b"filteradd": msg_filteradd,
|
||||||
|
b"filterclear": msg_filterclear,
|
||||||
|
b"filterload": msg_filterload,
|
||||||
|
b"getaddr": msg_getaddr,
|
||||||
|
b"getblocks": msg_getblocks,
|
||||||
|
b"getblocktxn": msg_getblocktxn,
|
||||||
|
b"getdata": msg_getdata,
|
||||||
|
b"getheaders": msg_getheaders,
|
||||||
|
b"headers": msg_headers,
|
||||||
|
b"inv": msg_inv,
|
||||||
|
b"mempool": msg_mempool,
|
||||||
|
b"merkleblock": msg_merkleblock,
|
||||||
|
b"notfound": msg_notfound,
|
||||||
|
b"ping": msg_ping,
|
||||||
|
b"pong": msg_pong,
|
||||||
|
b"sendaddrv2": msg_sendaddrv2,
|
||||||
|
b"sendcmpct": msg_sendcmpct,
|
||||||
|
b"sendheaders": msg_sendheaders,
|
||||||
|
b"tx": msg_tx,
|
||||||
|
b"verack": msg_verack,
|
||||||
|
b"version": msg_version,
|
||||||
|
b"wtxidrelay": msg_wtxidrelay,
|
||||||
|
}
|
||||||
|
|
||||||
|
MAGIC_BYTES = {
|
||||||
|
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
|
||||||
|
"testnet3": b"\x0b\x11\x09\x07", # testnet3
|
||||||
|
"regtest": b"\xfa\xbf\xb5\xda", # regtest
|
||||||
|
"signet": b"\x0a\x03\xcf\x40", # signet
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class P2PConnection(asyncio.Protocol):
|
||||||
|
"""A low-level connection object to a node's P2P interface.
|
||||||
|
|
||||||
|
This class is responsible for:
|
||||||
|
|
||||||
|
- opening and closing the TCP connection to the node
|
||||||
|
- reading bytes from and writing bytes to the socket
|
||||||
|
- deserializing and serializing the P2P message header
|
||||||
|
- logging messages as they are sent and received
|
||||||
|
|
||||||
|
This class contains no logic for handing the P2P message payloads. It must be
|
||||||
|
sub-classed and the on_message() callback overridden."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# The underlying transport of the connection.
|
||||||
|
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
|
||||||
|
self._transport = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_connected(self):
|
||||||
|
return self._transport is not None
|
||||||
|
|
||||||
|
def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor):
|
||||||
|
assert not self.is_connected
|
||||||
|
self.timeout_factor = timeout_factor
|
||||||
|
self.dstaddr = dstaddr
|
||||||
|
self.dstport = dstport
|
||||||
|
# The initial message to send after the connection was made:
|
||||||
|
self.on_connection_send_msg = None
|
||||||
|
self.recvbuf = b""
|
||||||
|
self.magic_bytes = MAGIC_BYTES[net]
|
||||||
|
|
||||||
|
def peer_connect(self, dstaddr, dstport, *, net, timeout_factor):
|
||||||
|
self.peer_connect_helper(dstaddr, dstport, net, timeout_factor)
|
||||||
|
|
||||||
|
loop = NetworkThread.network_event_loop
|
||||||
|
logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
|
||||||
|
coroutine = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
|
||||||
|
return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine)
|
||||||
|
|
||||||
|
def peer_accept_connection(self, connect_id, connect_cb=lambda: None, *, net, timeout_factor):
|
||||||
|
self.peer_connect_helper('0', 0, net, timeout_factor)
|
||||||
|
|
||||||
|
logger.debug('Listening for Bitcoin Node with id: {}'.format(connect_id))
|
||||||
|
return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id)
|
||||||
|
|
||||||
|
def peer_disconnect(self):
|
||||||
|
# Connection could have already been closed by other end.
|
||||||
|
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
|
||||||
|
|
||||||
|
# Connection and disconnection methods
|
||||||
|
|
||||||
|
def connection_made(self, transport):
|
||||||
|
"""asyncio callback when a connection is opened."""
|
||||||
|
assert not self._transport
|
||||||
|
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
|
||||||
|
self._transport = transport
|
||||||
|
if self.on_connection_send_msg:
|
||||||
|
self.send_message(self.on_connection_send_msg)
|
||||||
|
self.on_connection_send_msg = None # Never used again
|
||||||
|
self.on_open()
|
||||||
|
|
||||||
|
def connection_lost(self, exc):
|
||||||
|
"""asyncio callback when a connection is closed."""
|
||||||
|
if exc:
|
||||||
|
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
|
||||||
|
else:
|
||||||
|
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
|
||||||
|
self._transport = None
|
||||||
|
self.recvbuf = b""
|
||||||
|
self.on_close()
|
||||||
|
|
||||||
|
# Socket read methods
|
||||||
|
|
||||||
|
def data_received(self, t):
|
||||||
|
"""asyncio callback when data is read from the socket."""
|
||||||
|
if len(t) > 0:
|
||||||
|
self.recvbuf += t
|
||||||
|
self._on_data()
|
||||||
|
|
||||||
|
def _on_data(self):
|
||||||
|
"""Try to read P2P messages from the recv buffer.
|
||||||
|
|
||||||
|
This method reads data from the buffer in a loop. It deserializes,
|
||||||
|
parses and verifies the P2P header, then passes the P2P payload to
|
||||||
|
the on_message callback for processing."""
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
if len(self.recvbuf) < 4:
|
||||||
|
return
|
||||||
|
if self.recvbuf[:4] != self.magic_bytes:
|
||||||
|
raise ValueError("magic bytes mismatch: {} != {}".format(repr(self.magic_bytes), repr(self.recvbuf)))
|
||||||
|
if len(self.recvbuf) < 4 + 12 + 4 + 4:
|
||||||
|
return
|
||||||
|
msgtype = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
|
||||||
|
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
|
||||||
|
checksum = self.recvbuf[4+12+4:4+12+4+4]
|
||||||
|
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
|
||||||
|
return
|
||||||
|
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
|
||||||
|
th = sha256(msg)
|
||||||
|
h = sha256(th)
|
||||||
|
if checksum != h[:4]:
|
||||||
|
raise ValueError("got bad checksum " + repr(self.recvbuf))
|
||||||
|
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
|
||||||
|
if msgtype not in MESSAGEMAP:
|
||||||
|
raise ValueError("Received unknown msgtype from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, msgtype, repr(msg)))
|
||||||
|
f = BytesIO(msg)
|
||||||
|
t = MESSAGEMAP[msgtype]()
|
||||||
|
t.deserialize(f)
|
||||||
|
self._log_message("receive", t)
|
||||||
|
self.on_message(t)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception('Error reading message:', repr(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def on_message(self, message):
|
||||||
|
"""Callback for processing a P2P payload. Must be overridden by derived class."""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
# Socket write methods
|
||||||
|
|
||||||
|
def send_message(self, message):
|
||||||
|
"""Send a P2P message over the socket.
|
||||||
|
|
||||||
|
This method takes a P2P payload, builds the P2P header and adds
|
||||||
|
the message to the send buffer to be sent over the socket."""
|
||||||
|
tmsg = self.build_message(message)
|
||||||
|
self._log_message("send", message)
|
||||||
|
return self.send_raw_message(tmsg)
|
||||||
|
|
||||||
|
def send_raw_message(self, raw_message_bytes):
|
||||||
|
if not self.is_connected:
|
||||||
|
raise IOError('Not connected')
|
||||||
|
|
||||||
|
def maybe_write():
|
||||||
|
if not self._transport:
|
||||||
|
return
|
||||||
|
if self._transport.is_closing():
|
||||||
|
return
|
||||||
|
self._transport.write(raw_message_bytes)
|
||||||
|
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
|
||||||
|
|
||||||
|
# Class utility methods
|
||||||
|
|
||||||
|
def build_message(self, message):
|
||||||
|
"""Build a serialized P2P message"""
|
||||||
|
msgtype = message.msgtype
|
||||||
|
data = message.serialize()
|
||||||
|
tmsg = self.magic_bytes
|
||||||
|
tmsg += msgtype
|
||||||
|
tmsg += b"\x00" * (12 - len(msgtype))
|
||||||
|
tmsg += struct.pack("<I", len(data))
|
||||||
|
th = sha256(data)
|
||||||
|
h = sha256(th)
|
||||||
|
tmsg += h[:4]
|
||||||
|
tmsg += data
|
||||||
|
return tmsg
|
||||||
|
|
||||||
|
def _log_message(self, direction, msg):
|
||||||
|
"""Logs a message being sent or received over the connection."""
|
||||||
|
if direction == "send":
|
||||||
|
log_message = "Send message to "
|
||||||
|
elif direction == "receive":
|
||||||
|
log_message = "Received message from "
|
||||||
|
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
|
||||||
|
if len(log_message) > 500:
|
||||||
|
log_message += "... (msg truncated)"
|
||||||
|
logger.debug(log_message)
|
||||||
|
|
||||||
|
|
||||||
|
class P2PInterface(P2PConnection):
|
||||||
|
"""A high-level P2P interface class for communicating with a Bitcoin node.
|
||||||
|
|
||||||
|
This class provides high-level callbacks for processing P2P message
|
||||||
|
payloads, as well as convenience methods for interacting with the
|
||||||
|
node over P2P.
|
||||||
|
|
||||||
|
Individual testcases should subclass this and override the on_* methods
|
||||||
|
if they want to alter message handling behaviour."""
|
||||||
|
def __init__(self, support_addrv2=False, wtxidrelay=True):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
# Track number of messages of each type received.
|
||||||
|
# Should be read-only in a test.
|
||||||
|
self.message_count = defaultdict(int)
|
||||||
|
|
||||||
|
# Track the most recent message of each type.
|
||||||
|
# To wait for a message to be received, pop that message from
|
||||||
|
# this and use self.wait_until.
|
||||||
|
self.last_message = {}
|
||||||
|
|
||||||
|
# A count of the number of ping messages we've sent to the node
|
||||||
|
self.ping_counter = 1
|
||||||
|
|
||||||
|
# The network services received from the peer
|
||||||
|
self.nServices = 0
|
||||||
|
|
||||||
|
self.support_addrv2 = support_addrv2
|
||||||
|
|
||||||
|
# If the peer supports wtxid-relay
|
||||||
|
self.wtxidrelay = wtxidrelay
|
||||||
|
|
||||||
|
def peer_connect_send_version(self, services):
|
||||||
|
# Send a version msg
|
||||||
|
vt = msg_version()
|
||||||
|
vt.nVersion = P2P_VERSION
|
||||||
|
vt.strSubVer = P2P_SUBVERSION
|
||||||
|
vt.relay = P2P_VERSION_RELAY
|
||||||
|
vt.nServices = services
|
||||||
|
vt.addrTo.ip = self.dstaddr
|
||||||
|
vt.addrTo.port = self.dstport
|
||||||
|
vt.addrFrom.ip = "0.0.0.0"
|
||||||
|
vt.addrFrom.port = 0
|
||||||
|
self.on_connection_send_msg = vt # Will be sent in connection_made callback
|
||||||
|
|
||||||
|
def peer_connect(self, *args, services=P2P_SERVICES, send_version=True, **kwargs):
|
||||||
|
create_conn = super().peer_connect(*args, **kwargs)
|
||||||
|
|
||||||
|
if send_version:
|
||||||
|
self.peer_connect_send_version(services)
|
||||||
|
|
||||||
|
return create_conn
|
||||||
|
|
||||||
|
def peer_accept_connection(self, *args, services=P2P_SERVICES, **kwargs):
|
||||||
|
create_conn = super().peer_accept_connection(*args, **kwargs)
|
||||||
|
self.peer_connect_send_version(services)
|
||||||
|
|
||||||
|
return create_conn
|
||||||
|
|
||||||
|
# Message receiving methods
|
||||||
|
|
||||||
|
def on_message(self, message):
|
||||||
|
"""Receive message and dispatch message to appropriate callback.
|
||||||
|
|
||||||
|
We keep a count of how many of each message type has been received
|
||||||
|
and the most recent message of each type."""
|
||||||
|
with p2p_lock:
|
||||||
|
try:
|
||||||
|
msgtype = message.msgtype.decode('ascii')
|
||||||
|
self.message_count[msgtype] += 1
|
||||||
|
self.last_message[msgtype] = message
|
||||||
|
getattr(self, 'on_' + msgtype)(message)
|
||||||
|
except:
|
||||||
|
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Callback methods. Can be overridden by subclasses in individual test
|
||||||
|
# cases to provide custom message handling behaviour.
|
||||||
|
|
||||||
|
def on_open(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def on_close(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def on_addr(self, message): pass
|
||||||
|
def on_addrv2(self, message): pass
|
||||||
|
def on_block(self, message): pass
|
||||||
|
def on_blocktxn(self, message): pass
|
||||||
|
def on_cfcheckpt(self, message): pass
|
||||||
|
def on_cfheaders(self, message): pass
|
||||||
|
def on_cfilter(self, message): pass
|
||||||
|
def on_cmpctblock(self, message): pass
|
||||||
|
def on_feefilter(self, message): pass
|
||||||
|
def on_filteradd(self, message): pass
|
||||||
|
def on_filterclear(self, message): pass
|
||||||
|
def on_filterload(self, message): pass
|
||||||
|
def on_getaddr(self, message): pass
|
||||||
|
def on_getblocks(self, message): pass
|
||||||
|
def on_getblocktxn(self, message): pass
|
||||||
|
def on_getdata(self, message): pass
|
||||||
|
def on_getheaders(self, message): pass
|
||||||
|
def on_headers(self, message): pass
|
||||||
|
def on_mempool(self, message): pass
|
||||||
|
def on_merkleblock(self, message): pass
|
||||||
|
def on_notfound(self, message): pass
|
||||||
|
def on_pong(self, message): pass
|
||||||
|
def on_sendaddrv2(self, message): pass
|
||||||
|
def on_sendcmpct(self, message): pass
|
||||||
|
def on_sendheaders(self, message): pass
|
||||||
|
def on_tx(self, message): pass
|
||||||
|
def on_wtxidrelay(self, message): pass
|
||||||
|
|
||||||
|
def on_inv(self, message):
|
||||||
|
want = msg_getdata()
|
||||||
|
for i in message.inv:
|
||||||
|
if i.type != 0:
|
||||||
|
want.inv.append(i)
|
||||||
|
if len(want.inv):
|
||||||
|
self.send_message(want)
|
||||||
|
|
||||||
|
def on_ping(self, message):
|
||||||
|
self.send_message(msg_pong(message.nonce))
|
||||||
|
|
||||||
|
def on_verack(self, message):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def on_version(self, message):
|
||||||
|
assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED)
|
||||||
|
if message.nVersion >= 70016 and self.wtxidrelay:
|
||||||
|
self.send_message(msg_wtxidrelay())
|
||||||
|
if self.support_addrv2:
|
||||||
|
self.send_message(msg_sendaddrv2())
|
||||||
|
self.send_message(msg_verack())
|
||||||
|
self.nServices = message.nServices
|
||||||
|
self.send_message(msg_getaddr())
|
||||||
|
|
||||||
|
# Connection helper methods
|
||||||
|
|
||||||
|
def wait_until(self, test_function_in, *, timeout=60, check_connected=True):
|
||||||
|
def test_function():
|
||||||
|
if check_connected:
|
||||||
|
assert self.is_connected
|
||||||
|
return test_function_in()
|
||||||
|
|
||||||
|
wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor)
|
||||||
|
|
||||||
|
def wait_for_connect(self, timeout=60):
|
||||||
|
test_function = lambda: self.is_connected
|
||||||
|
wait_until_helper(test_function, timeout=timeout, lock=p2p_lock)
|
||||||
|
|
||||||
|
def wait_for_disconnect(self, timeout=60):
|
||||||
|
test_function = lambda: not self.is_connected
|
||||||
|
self.wait_until(test_function, timeout=timeout, check_connected=False)
|
||||||
|
|
||||||
|
# Message receiving helper methods
|
||||||
|
|
||||||
|
def wait_for_tx(self, txid, timeout=60):
|
||||||
|
def test_function():
|
||||||
|
if not self.last_message.get('tx'):
|
||||||
|
return False
|
||||||
|
return self.last_message['tx'].tx.rehash() == txid
|
||||||
|
|
||||||
|
self.wait_until(test_function, timeout=timeout)
|
||||||
|
|
||||||
|
def wait_for_block(self, blockhash, timeout=60):
|
||||||
|
def test_function():
|
||||||
|
return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
|
||||||
|
|
||||||
|
self.wait_until(test_function, timeout=timeout)
|
||||||
|
|
||||||
|
def wait_for_header(self, blockhash, timeout=60):
|
||||||
|
def test_function():
|
||||||
|
last_headers = self.last_message.get('headers')
|
||||||
|
if not last_headers:
|
||||||
|
return False
|
||||||
|
return last_headers.headers[0].rehash() == int(blockhash, 16)
|
||||||
|
|
||||||
|
self.wait_until(test_function, timeout=timeout)
|
||||||
|
|
||||||
|
def wait_for_merkleblock(self, blockhash, timeout=60):
|
||||||
|
def test_function():
|
||||||
|
last_filtered_block = self.last_message.get('merkleblock')
|
||||||
|
if not last_filtered_block:
|
||||||
|
return False
|
||||||
|
return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16)
|
||||||
|
|
||||||
|
self.wait_until(test_function, timeout=timeout)
|
||||||
|
|
||||||
|
def wait_for_getdata(self, hash_list, timeout=60):
|
||||||
|
"""Waits for a getdata message.
|
||||||
|
|
||||||
|
The object hashes in the inventory vector must match the provided hash_list."""
|
||||||
|
def test_function():
|
||||||
|
last_data = self.last_message.get("getdata")
|
||||||
|
if not last_data:
|
||||||
|
return False
|
||||||
|
return [x.hash for x in last_data.inv] == hash_list
|
||||||
|
|
||||||
|
self.wait_until(test_function, timeout=timeout)
|
||||||
|
|
||||||
|
def wait_for_getheaders(self, timeout=60):
|
||||||
|
"""Waits for a getheaders message.
|
||||||
|
|
||||||
|
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
|
||||||
|
value must be explicitly cleared before calling this method, or this will return
|
||||||
|
immediately with success. TODO: change this method to take a hash value and only
|
||||||
|
return true if the correct block header has been requested."""
|
||||||
|
def test_function():
|
||||||
|
return self.last_message.get("getheaders")
|
||||||
|
|
||||||
|
self.wait_until(test_function, timeout=timeout)
|
||||||
|
|
||||||
|
def wait_for_inv(self, expected_inv, timeout=60):
|
||||||
|
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
|
||||||
|
if len(expected_inv) > 1:
|
||||||
|
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
|
||||||
|
|
||||||
|
def test_function():
|
||||||
|
return self.last_message.get("inv") and \
|
||||||
|
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
|
||||||
|
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
|
||||||
|
|
||||||
|
self.wait_until(test_function, timeout=timeout)
|
||||||
|
|
||||||
|
def wait_for_verack(self, timeout=60):
|
||||||
|
def test_function():
|
||||||
|
return "verack" in self.last_message
|
||||||
|
|
||||||
|
self.wait_until(test_function, timeout=timeout)
|
||||||
|
|
||||||
|
# Message sending helper functions
|
||||||
|
|
||||||
|
def send_and_ping(self, message, timeout=60):
|
||||||
|
self.send_message(message)
|
||||||
|
self.sync_with_ping(timeout=timeout)
|
||||||
|
|
||||||
|
def sync_send_with_ping(self, timeout=60):
|
||||||
|
"""Ensure SendMessages is called on this connection"""
|
||||||
|
# Calling sync_with_ping twice requires that the node calls
|
||||||
|
# `ProcessMessage` twice, and thus ensures `SendMessages` must have
|
||||||
|
# been called at least once
|
||||||
|
self.sync_with_ping()
|
||||||
|
self.sync_with_ping()
|
||||||
|
|
||||||
|
def sync_with_ping(self, timeout=60):
|
||||||
|
"""Ensure ProcessMessages is called on this connection"""
|
||||||
|
self.send_message(msg_ping(nonce=self.ping_counter))
|
||||||
|
|
||||||
|
def test_function():
|
||||||
|
return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
|
||||||
|
|
||||||
|
self.wait_until(test_function, timeout=timeout)
|
||||||
|
self.ping_counter += 1
|
||||||
|
|
||||||
|
|
||||||
|
# One lock for synchronizing all data access between the network event loop (see
|
||||||
|
# NetworkThread below) and the thread running the test logic. For simplicity,
|
||||||
|
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
|
||||||
|
# This lock should be acquired in the thread running the test logic to synchronize
|
||||||
|
# access to any data shared with the P2PInterface or P2PConnection.
|
||||||
|
p2p_lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkThread(threading.Thread):
|
||||||
|
network_event_loop = None
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(name="NetworkThread")
|
||||||
|
# There is only one event loop and no more than one thread must be created
|
||||||
|
assert not self.network_event_loop
|
||||||
|
|
||||||
|
NetworkThread.listeners = {}
|
||||||
|
NetworkThread.protos = {}
|
||||||
|
if sys.platform == 'win32':
|
||||||
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||||
|
NetworkThread.network_event_loop = asyncio.new_event_loop()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"""Start the network thread."""
|
||||||
|
self.network_event_loop.run_forever()
|
||||||
|
|
||||||
|
def close(self, timeout=10):
|
||||||
|
"""Close the connections and network event loop."""
|
||||||
|
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
|
||||||
|
wait_until_helper(lambda: not self.network_event_loop.is_running(), timeout=timeout)
|
||||||
|
self.network_event_loop.close()
|
||||||
|
self.join(timeout)
|
||||||
|
# Safe to remove event loop.
|
||||||
|
NetworkThread.network_event_loop = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def listen(cls, p2p, callback, port=None, addr=None, idx=1):
|
||||||
|
""" Ensure a listening server is running on the given port, and run the
|
||||||
|
protocol specified by `p2p` on the next connection to it. Once ready
|
||||||
|
for connections, call `callback`."""
|
||||||
|
|
||||||
|
if port is None:
|
||||||
|
assert 0 < idx <= MAX_NODES
|
||||||
|
port = p2p_port(MAX_NODES - idx)
|
||||||
|
if addr is None:
|
||||||
|
addr = '127.0.0.1'
|
||||||
|
|
||||||
|
coroutine = cls.create_listen_server(addr, port, callback, p2p)
|
||||||
|
cls.network_event_loop.call_soon_threadsafe(cls.network_event_loop.create_task, coroutine)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_listen_server(cls, addr, port, callback, proto):
|
||||||
|
def peer_protocol():
|
||||||
|
"""Returns a function that does the protocol handling for a new
|
||||||
|
connection. To allow different connections to have different
|
||||||
|
behaviors, the protocol function is first put in the cls.protos
|
||||||
|
dict. When the connection is made, the function removes the
|
||||||
|
protocol function from that dict, and returns it so the event loop
|
||||||
|
can start executing it."""
|
||||||
|
response = cls.protos.get((addr, port))
|
||||||
|
cls.protos[(addr, port)] = None
|
||||||
|
return response
|
||||||
|
|
||||||
|
if (addr, port) not in cls.listeners:
|
||||||
|
# When creating a listener on a given (addr, port) we only need to
|
||||||
|
# do it once. If we want different behaviors for different
|
||||||
|
# connections, we can accomplish this by providing different
|
||||||
|
# `proto` functions
|
||||||
|
|
||||||
|
listener = await cls.network_event_loop.create_server(peer_protocol, addr, port)
|
||||||
|
logger.debug("Listening server on %s:%d should be started" % (addr, port))
|
||||||
|
cls.listeners[(addr, port)] = listener
|
||||||
|
|
||||||
|
cls.protos[(addr, port)] = proto
|
||||||
|
callback(addr, port)
|
||||||
|
|
||||||
|
|
||||||
|
class P2PDataStore(P2PInterface):
|
||||||
|
"""A P2P data store class.
|
||||||
|
|
||||||
|
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
# store of blocks. key is block hash, value is a CBlock object
|
||||||
|
self.block_store = {}
|
||||||
|
self.last_block_hash = ''
|
||||||
|
# store of txs. key is txid, value is a CTransaction object
|
||||||
|
self.tx_store = {}
|
||||||
|
self.getdata_requests = []
|
||||||
|
|
||||||
|
def on_getdata(self, message):
|
||||||
|
"""Check for the tx/block in our stores and if found, reply with an inv message."""
|
||||||
|
for inv in message.inv:
|
||||||
|
self.getdata_requests.append(inv.hash)
|
||||||
|
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
|
||||||
|
self.send_message(msg_tx(self.tx_store[inv.hash]))
|
||||||
|
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
|
||||||
|
self.send_message(msg_block(self.block_store[inv.hash]))
|
||||||
|
else:
|
||||||
|
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
|
||||||
|
|
||||||
|
def on_getheaders(self, message):
|
||||||
|
"""Search back through our block store for the locator, and reply with a headers message if found."""
|
||||||
|
|
||||||
|
locator, hash_stop = message.locator, message.hashstop
|
||||||
|
|
||||||
|
# Assume that the most recent block added is the tip
|
||||||
|
if not self.block_store:
|
||||||
|
return
|
||||||
|
|
||||||
|
headers_list = [self.block_store[self.last_block_hash]]
|
||||||
|
while headers_list[-1].sha256 not in locator.vHave:
|
||||||
|
# Walk back through the block store, adding headers to headers_list
|
||||||
|
# as we go.
|
||||||
|
prev_block_hash = headers_list[-1].hashPrevBlock
|
||||||
|
if prev_block_hash in self.block_store:
|
||||||
|
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
|
||||||
|
headers_list.append(prev_block_header)
|
||||||
|
if prev_block_header.sha256 == hash_stop:
|
||||||
|
# if this is the hashstop header, stop here
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
|
||||||
|
break
|
||||||
|
|
||||||
|
# Truncate the list if there are too many headers
|
||||||
|
headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1]
|
||||||
|
response = msg_headers(headers_list)
|
||||||
|
|
||||||
|
if response is not None:
|
||||||
|
self.send_message(response)
|
||||||
|
|
||||||
|
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
|
||||||
|
"""Send blocks to test node and test whether the tip advances.
|
||||||
|
|
||||||
|
- add all blocks to our block_store
|
||||||
|
- send a headers message for the final block
|
||||||
|
- the on_getheaders handler will ensure that any getheaders are responded to
|
||||||
|
- if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
|
||||||
|
ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
|
||||||
|
- if success is True: assert that the node's tip advances to the most recent block
|
||||||
|
- if success is False: assert that the node's tip doesn't advance
|
||||||
|
- if reject_reason is set: assert that the correct reject message is logged"""
|
||||||
|
|
||||||
|
with p2p_lock:
|
||||||
|
for block in blocks:
|
||||||
|
self.block_store[block.sha256] = block
|
||||||
|
self.last_block_hash = block.sha256
|
||||||
|
|
||||||
|
reject_reason = [reject_reason] if reject_reason else []
|
||||||
|
with node.assert_debug_log(expected_msgs=reject_reason):
|
||||||
|
if force_send:
|
||||||
|
for b in blocks:
|
||||||
|
self.send_message(msg_block(block=b))
|
||||||
|
else:
|
||||||
|
self.send_message(msg_headers([CBlockHeader(block) for block in blocks]))
|
||||||
|
self.wait_until(
|
||||||
|
lambda: blocks[-1].sha256 in self.getdata_requests,
|
||||||
|
timeout=timeout,
|
||||||
|
check_connected=success,
|
||||||
|
)
|
||||||
|
|
||||||
|
if expect_disconnect:
|
||||||
|
self.wait_for_disconnect(timeout=timeout)
|
||||||
|
else:
|
||||||
|
self.sync_with_ping(timeout=timeout)
|
||||||
|
|
||||||
|
if success:
|
||||||
|
self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
|
||||||
|
else:
|
||||||
|
assert node.getbestblockhash() != blocks[-1].hash
|
||||||
|
|
||||||
|
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
|
||||||
|
"""Send txs to test node and test whether they're accepted to the mempool.
|
||||||
|
|
||||||
|
- add all txs to our tx_store
|
||||||
|
- send tx messages for all txs
|
||||||
|
- if success is True/False: assert that the txs are/are not accepted to the mempool
|
||||||
|
- if expect_disconnect is True: Skip the sync with ping
|
||||||
|
- if reject_reason is set: assert that the correct reject message is logged."""
|
||||||
|
|
||||||
|
with p2p_lock:
|
||||||
|
for tx in txs:
|
||||||
|
self.tx_store[tx.sha256] = tx
|
||||||
|
|
||||||
|
reject_reason = [reject_reason] if reject_reason else []
|
||||||
|
with node.assert_debug_log(expected_msgs=reject_reason):
|
||||||
|
for tx in txs:
|
||||||
|
self.send_message(msg_tx(tx))
|
||||||
|
|
||||||
|
if expect_disconnect:
|
||||||
|
self.wait_for_disconnect()
|
||||||
|
else:
|
||||||
|
self.sync_with_ping()
|
||||||
|
|
||||||
|
raw_mempool = node.getrawmempool()
|
||||||
|
if success:
|
||||||
|
# Check that all txs are now in the mempool
|
||||||
|
for tx in txs:
|
||||||
|
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
|
||||||
|
else:
|
||||||
|
# Check that none of the txs are now in the mempool
|
||||||
|
for tx in txs:
|
||||||
|
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
|
||||||
|
|
||||||
|
class P2PTxInvStore(P2PInterface):
|
||||||
|
"""A P2PInterface which stores a count of how many times each txid has been announced."""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.tx_invs_received = defaultdict(int)
|
||||||
|
|
||||||
|
def on_inv(self, message):
|
||||||
|
super().on_inv(message) # Send getdata in response.
|
||||||
|
# Store how many times invs have been received for each tx.
|
||||||
|
for i in message.inv:
|
||||||
|
if (i.type == MSG_TX) or (i.type == MSG_WTX):
|
||||||
|
# save txid
|
||||||
|
self.tx_invs_received[i.hash] += 1
|
||||||
|
|
||||||
|
def get_invs(self):
|
||||||
|
with p2p_lock:
|
||||||
|
return list(self.tx_invs_received.keys())
|
||||||
|
|
||||||
|
def wait_for_broadcast(self, txns, timeout=60):
|
||||||
|
"""Waits for the txns (list of txids) to complete initial broadcast.
|
||||||
|
The mempool should mark unbroadcast=False for these transactions.
|
||||||
|
"""
|
||||||
|
# Wait until invs have been received (and getdatas sent) for each txid.
|
||||||
|
self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout)
|
||||||
|
# Flush messages and wait for the getdatas to be processed
|
||||||
|
self.sync_with_ping()
|
||||||
130
miner_imports/test_framework/ripemd160.py
Normal file
130
miner_imports/test_framework/ripemd160.py
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
# Copyright (c) 2021 Pieter Wuille
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Test-only pure Python RIPEMD160 implementation."""
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
# Message schedule indexes for the left path.
|
||||||
|
ML = [
|
||||||
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||||
|
7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
|
||||||
|
3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12,
|
||||||
|
1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2,
|
||||||
|
4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13
|
||||||
|
]
|
||||||
|
|
||||||
|
# Message schedule indexes for the right path.
|
||||||
|
MR = [
|
||||||
|
5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12,
|
||||||
|
6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2,
|
||||||
|
15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13,
|
||||||
|
8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14,
|
||||||
|
12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11
|
||||||
|
]
|
||||||
|
|
||||||
|
# Rotation counts for the left path.
|
||||||
|
RL = [
|
||||||
|
11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8,
|
||||||
|
7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12,
|
||||||
|
11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5,
|
||||||
|
11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12,
|
||||||
|
9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6
|
||||||
|
]
|
||||||
|
|
||||||
|
# Rotation counts for the right path.
|
||||||
|
RR = [
|
||||||
|
8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6,
|
||||||
|
9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11,
|
||||||
|
9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5,
|
||||||
|
15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8,
|
||||||
|
8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11
|
||||||
|
]
|
||||||
|
|
||||||
|
# K constants for the left path.
|
||||||
|
KL = [0, 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xa953fd4e]
|
||||||
|
|
||||||
|
# K constants for the right path.
|
||||||
|
KR = [0x50a28be6, 0x5c4dd124, 0x6d703ef3, 0x7a6d76e9, 0]
|
||||||
|
|
||||||
|
|
||||||
|
def fi(x, y, z, i):
|
||||||
|
"""The f1, f2, f3, f4, and f5 functions from the specification."""
|
||||||
|
if i == 0:
|
||||||
|
return x ^ y ^ z
|
||||||
|
elif i == 1:
|
||||||
|
return (x & y) | (~x & z)
|
||||||
|
elif i == 2:
|
||||||
|
return (x | ~y) ^ z
|
||||||
|
elif i == 3:
|
||||||
|
return (x & z) | (y & ~z)
|
||||||
|
elif i == 4:
|
||||||
|
return x ^ (y | ~z)
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
|
||||||
|
def rol(x, i):
|
||||||
|
"""Rotate the bottom 32 bits of x left by i bits."""
|
||||||
|
return ((x << i) | ((x & 0xffffffff) >> (32 - i))) & 0xffffffff
|
||||||
|
|
||||||
|
|
||||||
|
def compress(h0, h1, h2, h3, h4, block):
|
||||||
|
"""Compress state (h0, h1, h2, h3, h4) with block."""
|
||||||
|
# Left path variables.
|
||||||
|
al, bl, cl, dl, el = h0, h1, h2, h3, h4
|
||||||
|
# Right path variables.
|
||||||
|
ar, br, cr, dr, er = h0, h1, h2, h3, h4
|
||||||
|
# Message variables.
|
||||||
|
x = [int.from_bytes(block[4*i:4*(i+1)], 'little') for i in range(16)]
|
||||||
|
|
||||||
|
# Iterate over the 80 rounds of the compression.
|
||||||
|
for j in range(80):
|
||||||
|
rnd = j >> 4
|
||||||
|
# Perform left side of the transformation.
|
||||||
|
al = rol(al + fi(bl, cl, dl, rnd) + x[ML[j]] + KL[rnd], RL[j]) + el
|
||||||
|
al, bl, cl, dl, el = el, al, bl, rol(cl, 10), dl
|
||||||
|
# Perform right side of the transformation.
|
||||||
|
ar = rol(ar + fi(br, cr, dr, 4 - rnd) + x[MR[j]] + KR[rnd], RR[j]) + er
|
||||||
|
ar, br, cr, dr, er = er, ar, br, rol(cr, 10), dr
|
||||||
|
|
||||||
|
# Compose old state, left transform, and right transform into new state.
|
||||||
|
return h1 + cl + dr, h2 + dl + er, h3 + el + ar, h4 + al + br, h0 + bl + cr
|
||||||
|
|
||||||
|
|
||||||
|
def ripemd160(data):
|
||||||
|
"""Compute the RIPEMD-160 hash of data."""
|
||||||
|
# Initialize state.
|
||||||
|
state = (0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0)
|
||||||
|
# Process full 64-byte blocks in the input.
|
||||||
|
for b in range(len(data) >> 6):
|
||||||
|
state = compress(*state, data[64*b:64*(b+1)])
|
||||||
|
# Construct final blocks (with padding and size).
|
||||||
|
pad = b"\x80" + b"\x00" * ((119 - len(data)) & 63)
|
||||||
|
fin = data[len(data) & ~63:] + pad + (8 * len(data)).to_bytes(8, 'little')
|
||||||
|
# Process final blocks.
|
||||||
|
for b in range(len(fin) >> 6):
|
||||||
|
state = compress(*state, fin[64*b:64*(b+1)])
|
||||||
|
# Produce output.
|
||||||
|
return b"".join((h & 0xffffffff).to_bytes(4, 'little') for h in state)
|
||||||
|
|
||||||
|
|
||||||
|
class TestFrameworkKey(unittest.TestCase):
|
||||||
|
def test_ripemd160(self):
|
||||||
|
"""RIPEMD-160 test vectors."""
|
||||||
|
# See https://homes.esat.kuleuven.be/~bosselae/ripemd160.html
|
||||||
|
for msg, hexout in [
|
||||||
|
(b"", "9c1185a5c5e9fc54612808977ee8f548b2258d31"),
|
||||||
|
(b"a", "0bdc9d2d256b3ee9daae347be6f4dc835a467ffe"),
|
||||||
|
(b"abc", "8eb208f7e05d987a9b044a8e98c6b087f15a0bfc"),
|
||||||
|
(b"message digest", "5d0689ef49d2fae572b881b123a85ffa21595f36"),
|
||||||
|
(b"abcdefghijklmnopqrstuvwxyz",
|
||||||
|
"f71c27109c692c1b56bbdceb5b9d2865b3708dbc"),
|
||||||
|
(b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
|
||||||
|
"12a053384a9c0c88e405a06c27dcf49ada62eb2b"),
|
||||||
|
(b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
|
||||||
|
"b0e20b6e3116640286ed3a87a5713079b21f5189"),
|
||||||
|
(b"1234567890" * 8, "9b752e45573d4b39f4dbd3323cab82bf63326bfb"),
|
||||||
|
(b"a" * 1000000, "52783243c1697bdbe16d37f97f68f08325dc1528")
|
||||||
|
]:
|
||||||
|
self.assertEqual(ripemd160(msg).hex(), hexout)
|
||||||
898
miner_imports/test_framework/script.py
Normal file
898
miner_imports/test_framework/script.py
Normal file
@@ -0,0 +1,898 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2015-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Functionality to build scripts, as well as signature hash functions.
|
||||||
|
|
||||||
|
This file is modified from python-bitcoinlib.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from collections import namedtuple
|
||||||
|
import struct
|
||||||
|
import unittest
|
||||||
|
from typing import List, Dict
|
||||||
|
|
||||||
|
from .key import TaggedHash, tweak_add_pubkey
|
||||||
|
|
||||||
|
from .messages import (
|
||||||
|
CTransaction,
|
||||||
|
CTxOut,
|
||||||
|
hash256,
|
||||||
|
ser_string,
|
||||||
|
ser_uint256,
|
||||||
|
sha256,
|
||||||
|
uint256_from_str,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .ripemd160 import ripemd160
|
||||||
|
|
||||||
|
MAX_SCRIPT_ELEMENT_SIZE = 520
|
||||||
|
LOCKTIME_THRESHOLD = 500000000
|
||||||
|
ANNEX_TAG = 0x50
|
||||||
|
|
||||||
|
LEAF_VERSION_TAPSCRIPT = 0xc0
|
||||||
|
|
||||||
|
def hash160(s):
|
||||||
|
return ripemd160(sha256(s))
|
||||||
|
|
||||||
|
def bn2vch(v):
|
||||||
|
"""Convert number to bitcoin-specific little endian format."""
|
||||||
|
# We need v.bit_length() bits, plus a sign bit for every nonzero number.
|
||||||
|
n_bits = v.bit_length() + (v != 0)
|
||||||
|
# The number of bytes for that is:
|
||||||
|
n_bytes = (n_bits + 7) // 8
|
||||||
|
# Convert number to absolute value + sign in top bit.
|
||||||
|
encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))
|
||||||
|
# Serialize to bytes
|
||||||
|
return encoded_v.to_bytes(n_bytes, 'little')
|
||||||
|
|
||||||
|
class CScriptOp(int):
|
||||||
|
"""A single script opcode"""
|
||||||
|
__slots__ = ()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def encode_op_pushdata(d):
|
||||||
|
"""Encode a PUSHDATA op, returning bytes"""
|
||||||
|
if len(d) < 0x4c:
|
||||||
|
return b'' + bytes([len(d)]) + d # OP_PUSHDATA
|
||||||
|
elif len(d) <= 0xff:
|
||||||
|
return b'\x4c' + bytes([len(d)]) + d # OP_PUSHDATA1
|
||||||
|
elif len(d) <= 0xffff:
|
||||||
|
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
|
||||||
|
elif len(d) <= 0xffffffff:
|
||||||
|
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
|
||||||
|
else:
|
||||||
|
raise ValueError("Data too long to encode in a PUSHDATA op")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def encode_op_n(n):
|
||||||
|
"""Encode a small integer op, returning an opcode"""
|
||||||
|
if not (0 <= n <= 16):
|
||||||
|
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
|
||||||
|
|
||||||
|
if n == 0:
|
||||||
|
return OP_0
|
||||||
|
else:
|
||||||
|
return CScriptOp(OP_1 + n - 1)
|
||||||
|
|
||||||
|
def decode_op_n(self):
|
||||||
|
"""Decode a small integer opcode, returning an integer"""
|
||||||
|
if self == OP_0:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if not (self == OP_0 or OP_1 <= self <= OP_16):
|
||||||
|
raise ValueError('op %r is not an OP_N' % self)
|
||||||
|
|
||||||
|
return int(self - OP_1 + 1)
|
||||||
|
|
||||||
|
def is_small_int(self):
|
||||||
|
"""Return true if the op pushes a small integer to the stack"""
|
||||||
|
if 0x51 <= self <= 0x60 or self == 0:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return repr(self)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if self in OPCODE_NAMES:
|
||||||
|
return OPCODE_NAMES[self]
|
||||||
|
else:
|
||||||
|
return 'CScriptOp(0x%x)' % self
|
||||||
|
|
||||||
|
def __new__(cls, n):
|
||||||
|
try:
|
||||||
|
return _opcode_instances[n]
|
||||||
|
except IndexError:
|
||||||
|
assert len(_opcode_instances) == n
|
||||||
|
_opcode_instances.append(super().__new__(cls, n))
|
||||||
|
return _opcode_instances[n]
|
||||||
|
|
||||||
|
OPCODE_NAMES: Dict[CScriptOp, str] = {}
|
||||||
|
_opcode_instances: List[CScriptOp] = []
|
||||||
|
|
||||||
|
# Populate opcode instance table
|
||||||
|
for n in range(0xff + 1):
|
||||||
|
CScriptOp(n)
|
||||||
|
|
||||||
|
|
||||||
|
# push value
|
||||||
|
OP_0 = CScriptOp(0x00)
|
||||||
|
OP_FALSE = OP_0
|
||||||
|
OP_PUSHDATA1 = CScriptOp(0x4c)
|
||||||
|
OP_PUSHDATA2 = CScriptOp(0x4d)
|
||||||
|
OP_PUSHDATA4 = CScriptOp(0x4e)
|
||||||
|
OP_1NEGATE = CScriptOp(0x4f)
|
||||||
|
OP_RESERVED = CScriptOp(0x50)
|
||||||
|
OP_1 = CScriptOp(0x51)
|
||||||
|
OP_TRUE = OP_1
|
||||||
|
OP_2 = CScriptOp(0x52)
|
||||||
|
OP_3 = CScriptOp(0x53)
|
||||||
|
OP_4 = CScriptOp(0x54)
|
||||||
|
OP_5 = CScriptOp(0x55)
|
||||||
|
OP_6 = CScriptOp(0x56)
|
||||||
|
OP_7 = CScriptOp(0x57)
|
||||||
|
OP_8 = CScriptOp(0x58)
|
||||||
|
OP_9 = CScriptOp(0x59)
|
||||||
|
OP_10 = CScriptOp(0x5a)
|
||||||
|
OP_11 = CScriptOp(0x5b)
|
||||||
|
OP_12 = CScriptOp(0x5c)
|
||||||
|
OP_13 = CScriptOp(0x5d)
|
||||||
|
OP_14 = CScriptOp(0x5e)
|
||||||
|
OP_15 = CScriptOp(0x5f)
|
||||||
|
OP_16 = CScriptOp(0x60)
|
||||||
|
|
||||||
|
# control
|
||||||
|
OP_NOP = CScriptOp(0x61)
|
||||||
|
OP_VER = CScriptOp(0x62)
|
||||||
|
OP_IF = CScriptOp(0x63)
|
||||||
|
OP_NOTIF = CScriptOp(0x64)
|
||||||
|
OP_VERIF = CScriptOp(0x65)
|
||||||
|
OP_VERNOTIF = CScriptOp(0x66)
|
||||||
|
OP_ELSE = CScriptOp(0x67)
|
||||||
|
OP_ENDIF = CScriptOp(0x68)
|
||||||
|
OP_VERIFY = CScriptOp(0x69)
|
||||||
|
OP_RETURN = CScriptOp(0x6a)
|
||||||
|
|
||||||
|
# stack ops
|
||||||
|
OP_TOALTSTACK = CScriptOp(0x6b)
|
||||||
|
OP_FROMALTSTACK = CScriptOp(0x6c)
|
||||||
|
OP_2DROP = CScriptOp(0x6d)
|
||||||
|
OP_2DUP = CScriptOp(0x6e)
|
||||||
|
OP_3DUP = CScriptOp(0x6f)
|
||||||
|
OP_2OVER = CScriptOp(0x70)
|
||||||
|
OP_2ROT = CScriptOp(0x71)
|
||||||
|
OP_2SWAP = CScriptOp(0x72)
|
||||||
|
OP_IFDUP = CScriptOp(0x73)
|
||||||
|
OP_DEPTH = CScriptOp(0x74)
|
||||||
|
OP_DROP = CScriptOp(0x75)
|
||||||
|
OP_DUP = CScriptOp(0x76)
|
||||||
|
OP_NIP = CScriptOp(0x77)
|
||||||
|
OP_OVER = CScriptOp(0x78)
|
||||||
|
OP_PICK = CScriptOp(0x79)
|
||||||
|
OP_ROLL = CScriptOp(0x7a)
|
||||||
|
OP_ROT = CScriptOp(0x7b)
|
||||||
|
OP_SWAP = CScriptOp(0x7c)
|
||||||
|
OP_TUCK = CScriptOp(0x7d)
|
||||||
|
|
||||||
|
# splice ops
|
||||||
|
OP_CAT = CScriptOp(0x7e)
|
||||||
|
OP_SUBSTR = CScriptOp(0x7f)
|
||||||
|
OP_LEFT = CScriptOp(0x80)
|
||||||
|
OP_RIGHT = CScriptOp(0x81)
|
||||||
|
OP_SIZE = CScriptOp(0x82)
|
||||||
|
|
||||||
|
# bit logic
|
||||||
|
OP_INVERT = CScriptOp(0x83)
|
||||||
|
OP_AND = CScriptOp(0x84)
|
||||||
|
OP_OR = CScriptOp(0x85)
|
||||||
|
OP_XOR = CScriptOp(0x86)
|
||||||
|
OP_EQUAL = CScriptOp(0x87)
|
||||||
|
OP_EQUALVERIFY = CScriptOp(0x88)
|
||||||
|
OP_RESERVED1 = CScriptOp(0x89)
|
||||||
|
OP_RESERVED2 = CScriptOp(0x8a)
|
||||||
|
|
||||||
|
# numeric
|
||||||
|
OP_1ADD = CScriptOp(0x8b)
|
||||||
|
OP_1SUB = CScriptOp(0x8c)
|
||||||
|
OP_2MUL = CScriptOp(0x8d)
|
||||||
|
OP_2DIV = CScriptOp(0x8e)
|
||||||
|
OP_NEGATE = CScriptOp(0x8f)
|
||||||
|
OP_ABS = CScriptOp(0x90)
|
||||||
|
OP_NOT = CScriptOp(0x91)
|
||||||
|
OP_0NOTEQUAL = CScriptOp(0x92)
|
||||||
|
|
||||||
|
OP_ADD = CScriptOp(0x93)
|
||||||
|
OP_SUB = CScriptOp(0x94)
|
||||||
|
OP_MUL = CScriptOp(0x95)
|
||||||
|
OP_DIV = CScriptOp(0x96)
|
||||||
|
OP_MOD = CScriptOp(0x97)
|
||||||
|
OP_LSHIFT = CScriptOp(0x98)
|
||||||
|
OP_RSHIFT = CScriptOp(0x99)
|
||||||
|
|
||||||
|
OP_BOOLAND = CScriptOp(0x9a)
|
||||||
|
OP_BOOLOR = CScriptOp(0x9b)
|
||||||
|
OP_NUMEQUAL = CScriptOp(0x9c)
|
||||||
|
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
|
||||||
|
OP_NUMNOTEQUAL = CScriptOp(0x9e)
|
||||||
|
OP_LESSTHAN = CScriptOp(0x9f)
|
||||||
|
OP_GREATERTHAN = CScriptOp(0xa0)
|
||||||
|
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
|
||||||
|
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
|
||||||
|
OP_MIN = CScriptOp(0xa3)
|
||||||
|
OP_MAX = CScriptOp(0xa4)
|
||||||
|
|
||||||
|
OP_WITHIN = CScriptOp(0xa5)
|
||||||
|
|
||||||
|
# crypto
|
||||||
|
OP_RIPEMD160 = CScriptOp(0xa6)
|
||||||
|
OP_SHA1 = CScriptOp(0xa7)
|
||||||
|
OP_SHA256 = CScriptOp(0xa8)
|
||||||
|
OP_HASH160 = CScriptOp(0xa9)
|
||||||
|
OP_HASH256 = CScriptOp(0xaa)
|
||||||
|
OP_CODESEPARATOR = CScriptOp(0xab)
|
||||||
|
OP_CHECKSIG = CScriptOp(0xac)
|
||||||
|
OP_CHECKSIGVERIFY = CScriptOp(0xad)
|
||||||
|
OP_CHECKMULTISIG = CScriptOp(0xae)
|
||||||
|
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
|
||||||
|
|
||||||
|
# expansion
|
||||||
|
OP_NOP1 = CScriptOp(0xb0)
|
||||||
|
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
|
||||||
|
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
|
||||||
|
OP_NOP4 = CScriptOp(0xb3)
|
||||||
|
OP_NOP5 = CScriptOp(0xb4)
|
||||||
|
OP_NOP6 = CScriptOp(0xb5)
|
||||||
|
OP_NOP7 = CScriptOp(0xb6)
|
||||||
|
OP_NOP8 = CScriptOp(0xb7)
|
||||||
|
OP_NOP9 = CScriptOp(0xb8)
|
||||||
|
OP_NOP10 = CScriptOp(0xb9)
|
||||||
|
|
||||||
|
# BIP 342 opcodes (Tapscript)
|
||||||
|
OP_CHECKSIGADD = CScriptOp(0xba)
|
||||||
|
|
||||||
|
OP_INVALIDOPCODE = CScriptOp(0xff)
|
||||||
|
|
||||||
|
OPCODE_NAMES.update({
|
||||||
|
OP_0: 'OP_0',
|
||||||
|
OP_PUSHDATA1: 'OP_PUSHDATA1',
|
||||||
|
OP_PUSHDATA2: 'OP_PUSHDATA2',
|
||||||
|
OP_PUSHDATA4: 'OP_PUSHDATA4',
|
||||||
|
OP_1NEGATE: 'OP_1NEGATE',
|
||||||
|
OP_RESERVED: 'OP_RESERVED',
|
||||||
|
OP_1: 'OP_1',
|
||||||
|
OP_2: 'OP_2',
|
||||||
|
OP_3: 'OP_3',
|
||||||
|
OP_4: 'OP_4',
|
||||||
|
OP_5: 'OP_5',
|
||||||
|
OP_6: 'OP_6',
|
||||||
|
OP_7: 'OP_7',
|
||||||
|
OP_8: 'OP_8',
|
||||||
|
OP_9: 'OP_9',
|
||||||
|
OP_10: 'OP_10',
|
||||||
|
OP_11: 'OP_11',
|
||||||
|
OP_12: 'OP_12',
|
||||||
|
OP_13: 'OP_13',
|
||||||
|
OP_14: 'OP_14',
|
||||||
|
OP_15: 'OP_15',
|
||||||
|
OP_16: 'OP_16',
|
||||||
|
OP_NOP: 'OP_NOP',
|
||||||
|
OP_VER: 'OP_VER',
|
||||||
|
OP_IF: 'OP_IF',
|
||||||
|
OP_NOTIF: 'OP_NOTIF',
|
||||||
|
OP_VERIF: 'OP_VERIF',
|
||||||
|
OP_VERNOTIF: 'OP_VERNOTIF',
|
||||||
|
OP_ELSE: 'OP_ELSE',
|
||||||
|
OP_ENDIF: 'OP_ENDIF',
|
||||||
|
OP_VERIFY: 'OP_VERIFY',
|
||||||
|
OP_RETURN: 'OP_RETURN',
|
||||||
|
OP_TOALTSTACK: 'OP_TOALTSTACK',
|
||||||
|
OP_FROMALTSTACK: 'OP_FROMALTSTACK',
|
||||||
|
OP_2DROP: 'OP_2DROP',
|
||||||
|
OP_2DUP: 'OP_2DUP',
|
||||||
|
OP_3DUP: 'OP_3DUP',
|
||||||
|
OP_2OVER: 'OP_2OVER',
|
||||||
|
OP_2ROT: 'OP_2ROT',
|
||||||
|
OP_2SWAP: 'OP_2SWAP',
|
||||||
|
OP_IFDUP: 'OP_IFDUP',
|
||||||
|
OP_DEPTH: 'OP_DEPTH',
|
||||||
|
OP_DROP: 'OP_DROP',
|
||||||
|
OP_DUP: 'OP_DUP',
|
||||||
|
OP_NIP: 'OP_NIP',
|
||||||
|
OP_OVER: 'OP_OVER',
|
||||||
|
OP_PICK: 'OP_PICK',
|
||||||
|
OP_ROLL: 'OP_ROLL',
|
||||||
|
OP_ROT: 'OP_ROT',
|
||||||
|
OP_SWAP: 'OP_SWAP',
|
||||||
|
OP_TUCK: 'OP_TUCK',
|
||||||
|
OP_CAT: 'OP_CAT',
|
||||||
|
OP_SUBSTR: 'OP_SUBSTR',
|
||||||
|
OP_LEFT: 'OP_LEFT',
|
||||||
|
OP_RIGHT: 'OP_RIGHT',
|
||||||
|
OP_SIZE: 'OP_SIZE',
|
||||||
|
OP_INVERT: 'OP_INVERT',
|
||||||
|
OP_AND: 'OP_AND',
|
||||||
|
OP_OR: 'OP_OR',
|
||||||
|
OP_XOR: 'OP_XOR',
|
||||||
|
OP_EQUAL: 'OP_EQUAL',
|
||||||
|
OP_EQUALVERIFY: 'OP_EQUALVERIFY',
|
||||||
|
OP_RESERVED1: 'OP_RESERVED1',
|
||||||
|
OP_RESERVED2: 'OP_RESERVED2',
|
||||||
|
OP_1ADD: 'OP_1ADD',
|
||||||
|
OP_1SUB: 'OP_1SUB',
|
||||||
|
OP_2MUL: 'OP_2MUL',
|
||||||
|
OP_2DIV: 'OP_2DIV',
|
||||||
|
OP_NEGATE: 'OP_NEGATE',
|
||||||
|
OP_ABS: 'OP_ABS',
|
||||||
|
OP_NOT: 'OP_NOT',
|
||||||
|
OP_0NOTEQUAL: 'OP_0NOTEQUAL',
|
||||||
|
OP_ADD: 'OP_ADD',
|
||||||
|
OP_SUB: 'OP_SUB',
|
||||||
|
OP_MUL: 'OP_MUL',
|
||||||
|
OP_DIV: 'OP_DIV',
|
||||||
|
OP_MOD: 'OP_MOD',
|
||||||
|
OP_LSHIFT: 'OP_LSHIFT',
|
||||||
|
OP_RSHIFT: 'OP_RSHIFT',
|
||||||
|
OP_BOOLAND: 'OP_BOOLAND',
|
||||||
|
OP_BOOLOR: 'OP_BOOLOR',
|
||||||
|
OP_NUMEQUAL: 'OP_NUMEQUAL',
|
||||||
|
OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
|
||||||
|
OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
|
||||||
|
OP_LESSTHAN: 'OP_LESSTHAN',
|
||||||
|
OP_GREATERTHAN: 'OP_GREATERTHAN',
|
||||||
|
OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
|
||||||
|
OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
|
||||||
|
OP_MIN: 'OP_MIN',
|
||||||
|
OP_MAX: 'OP_MAX',
|
||||||
|
OP_WITHIN: 'OP_WITHIN',
|
||||||
|
OP_RIPEMD160: 'OP_RIPEMD160',
|
||||||
|
OP_SHA1: 'OP_SHA1',
|
||||||
|
OP_SHA256: 'OP_SHA256',
|
||||||
|
OP_HASH160: 'OP_HASH160',
|
||||||
|
OP_HASH256: 'OP_HASH256',
|
||||||
|
OP_CODESEPARATOR: 'OP_CODESEPARATOR',
|
||||||
|
OP_CHECKSIG: 'OP_CHECKSIG',
|
||||||
|
OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
|
||||||
|
OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
|
||||||
|
OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
|
||||||
|
OP_NOP1: 'OP_NOP1',
|
||||||
|
OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
|
||||||
|
OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
|
||||||
|
OP_NOP4: 'OP_NOP4',
|
||||||
|
OP_NOP5: 'OP_NOP5',
|
||||||
|
OP_NOP6: 'OP_NOP6',
|
||||||
|
OP_NOP7: 'OP_NOP7',
|
||||||
|
OP_NOP8: 'OP_NOP8',
|
||||||
|
OP_NOP9: 'OP_NOP9',
|
||||||
|
OP_NOP10: 'OP_NOP10',
|
||||||
|
OP_CHECKSIGADD: 'OP_CHECKSIGADD',
|
||||||
|
OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
|
||||||
|
})
|
||||||
|
|
||||||
|
class CScriptInvalidError(Exception):
|
||||||
|
"""Base class for CScript exceptions"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
class CScriptTruncatedPushDataError(CScriptInvalidError):
|
||||||
|
"""Invalid pushdata due to truncation"""
|
||||||
|
def __init__(self, msg, data):
|
||||||
|
self.data = data
|
||||||
|
super().__init__(msg)
|
||||||
|
|
||||||
|
|
||||||
|
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
|
||||||
|
class CScriptNum:
|
||||||
|
__slots__ = ("value",)
|
||||||
|
|
||||||
|
def __init__(self, d=0):
|
||||||
|
self.value = d
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def encode(obj):
|
||||||
|
r = bytearray(0)
|
||||||
|
if obj.value == 0:
|
||||||
|
return bytes(r)
|
||||||
|
neg = obj.value < 0
|
||||||
|
absvalue = -obj.value if neg else obj.value
|
||||||
|
while (absvalue):
|
||||||
|
r.append(absvalue & 0xff)
|
||||||
|
absvalue >>= 8
|
||||||
|
if r[-1] & 0x80:
|
||||||
|
r.append(0x80 if neg else 0)
|
||||||
|
elif neg:
|
||||||
|
r[-1] |= 0x80
|
||||||
|
return bytes([len(r)]) + r
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def decode(vch):
|
||||||
|
result = 0
|
||||||
|
# We assume valid push_size and minimal encoding
|
||||||
|
value = vch[1:]
|
||||||
|
if len(value) == 0:
|
||||||
|
return result
|
||||||
|
for i, byte in enumerate(value):
|
||||||
|
result |= int(byte) << 8 * i
|
||||||
|
if value[-1] >= 0x80:
|
||||||
|
# Mask for all but the highest result bit
|
||||||
|
num_mask = (2**(len(value) * 8) - 1) >> 1
|
||||||
|
result &= num_mask
|
||||||
|
result *= -1
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class CScript(bytes):
|
||||||
|
"""Serialized script
|
||||||
|
|
||||||
|
A bytes subclass, so you can use this directly whenever bytes are accepted.
|
||||||
|
Note that this means that indexing does *not* work - you'll get an index by
|
||||||
|
byte rather than opcode. This format was chosen for efficiency so that the
|
||||||
|
general case would not require creating a lot of little CScriptOP objects.
|
||||||
|
|
||||||
|
iter(script) however does iterate by opcode.
|
||||||
|
"""
|
||||||
|
__slots__ = ()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __coerce_instance(cls, other):
|
||||||
|
# Coerce other into bytes
|
||||||
|
if isinstance(other, CScriptOp):
|
||||||
|
other = bytes([other])
|
||||||
|
elif isinstance(other, CScriptNum):
|
||||||
|
if (other.value == 0):
|
||||||
|
other = bytes([CScriptOp(OP_0)])
|
||||||
|
else:
|
||||||
|
other = CScriptNum.encode(other)
|
||||||
|
elif isinstance(other, int):
|
||||||
|
if 0 <= other <= 16:
|
||||||
|
other = bytes([CScriptOp.encode_op_n(other)])
|
||||||
|
elif other == -1:
|
||||||
|
other = bytes([OP_1NEGATE])
|
||||||
|
else:
|
||||||
|
other = CScriptOp.encode_op_pushdata(bn2vch(other))
|
||||||
|
elif isinstance(other, (bytes, bytearray)):
|
||||||
|
other = CScriptOp.encode_op_pushdata(other)
|
||||||
|
return other
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
# add makes no sense for a CScript()
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def join(self, iterable):
|
||||||
|
# join makes no sense for a CScript()
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def __new__(cls, value=b''):
|
||||||
|
if isinstance(value, bytes) or isinstance(value, bytearray):
|
||||||
|
return super().__new__(cls, value)
|
||||||
|
else:
|
||||||
|
def coerce_iterable(iterable):
|
||||||
|
for instance in iterable:
|
||||||
|
yield cls.__coerce_instance(instance)
|
||||||
|
# Annoyingly on both python2 and python3 bytes.join() always
|
||||||
|
# returns a bytes instance even when subclassed.
|
||||||
|
return super().__new__(cls, b''.join(coerce_iterable(value)))
|
||||||
|
|
||||||
|
def raw_iter(self):
|
||||||
|
"""Raw iteration
|
||||||
|
|
||||||
|
Yields tuples of (opcode, data, sop_idx) so that the different possible
|
||||||
|
PUSHDATA encodings can be accurately distinguished, as well as
|
||||||
|
determining the exact opcode byte indexes. (sop_idx)
|
||||||
|
"""
|
||||||
|
i = 0
|
||||||
|
while i < len(self):
|
||||||
|
sop_idx = i
|
||||||
|
opcode = self[i]
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
if opcode > OP_PUSHDATA4:
|
||||||
|
yield (opcode, None, sop_idx)
|
||||||
|
else:
|
||||||
|
datasize = None
|
||||||
|
pushdata_type = None
|
||||||
|
if opcode < OP_PUSHDATA1:
|
||||||
|
pushdata_type = 'PUSHDATA(%d)' % opcode
|
||||||
|
datasize = opcode
|
||||||
|
|
||||||
|
elif opcode == OP_PUSHDATA1:
|
||||||
|
pushdata_type = 'PUSHDATA1'
|
||||||
|
if i >= len(self):
|
||||||
|
raise CScriptInvalidError('PUSHDATA1: missing data length')
|
||||||
|
datasize = self[i]
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
elif opcode == OP_PUSHDATA2:
|
||||||
|
pushdata_type = 'PUSHDATA2'
|
||||||
|
if i + 1 >= len(self):
|
||||||
|
raise CScriptInvalidError('PUSHDATA2: missing data length')
|
||||||
|
datasize = self[i] + (self[i + 1] << 8)
|
||||||
|
i += 2
|
||||||
|
|
||||||
|
elif opcode == OP_PUSHDATA4:
|
||||||
|
pushdata_type = 'PUSHDATA4'
|
||||||
|
if i + 3 >= len(self):
|
||||||
|
raise CScriptInvalidError('PUSHDATA4: missing data length')
|
||||||
|
datasize = self[i] + (self[i + 1] << 8) + (self[i + 2] << 16) + (self[i + 3] << 24)
|
||||||
|
i += 4
|
||||||
|
|
||||||
|
else:
|
||||||
|
assert False # shouldn't happen
|
||||||
|
|
||||||
|
data = bytes(self[i:i + datasize])
|
||||||
|
|
||||||
|
# Check for truncation
|
||||||
|
if len(data) < datasize:
|
||||||
|
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
|
||||||
|
|
||||||
|
i += datasize
|
||||||
|
|
||||||
|
yield (opcode, data, sop_idx)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
"""'Cooked' iteration
|
||||||
|
|
||||||
|
Returns either a CScriptOP instance, an integer, or bytes, as
|
||||||
|
appropriate.
|
||||||
|
|
||||||
|
See raw_iter() if you need to distinguish the different possible
|
||||||
|
PUSHDATA encodings.
|
||||||
|
"""
|
||||||
|
for (opcode, data, sop_idx) in self.raw_iter():
|
||||||
|
if data is not None:
|
||||||
|
yield data
|
||||||
|
else:
|
||||||
|
opcode = CScriptOp(opcode)
|
||||||
|
|
||||||
|
if opcode.is_small_int():
|
||||||
|
yield opcode.decode_op_n()
|
||||||
|
else:
|
||||||
|
yield CScriptOp(opcode)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
def _repr(o):
|
||||||
|
if isinstance(o, bytes):
|
||||||
|
return "x('%s')" % o.hex()
|
||||||
|
else:
|
||||||
|
return repr(o)
|
||||||
|
|
||||||
|
ops = []
|
||||||
|
i = iter(self)
|
||||||
|
while True:
|
||||||
|
op = None
|
||||||
|
try:
|
||||||
|
op = _repr(next(i))
|
||||||
|
except CScriptTruncatedPushDataError as err:
|
||||||
|
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
|
||||||
|
break
|
||||||
|
except CScriptInvalidError as err:
|
||||||
|
op = '<ERROR: %s>' % err
|
||||||
|
break
|
||||||
|
except StopIteration:
|
||||||
|
break
|
||||||
|
finally:
|
||||||
|
if op is not None:
|
||||||
|
ops.append(op)
|
||||||
|
|
||||||
|
return "CScript([%s])" % ', '.join(ops)
|
||||||
|
|
||||||
|
def GetSigOpCount(self, fAccurate):
|
||||||
|
"""Get the SigOp count.
|
||||||
|
|
||||||
|
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
|
||||||
|
|
||||||
|
Note that this is consensus-critical.
|
||||||
|
"""
|
||||||
|
n = 0
|
||||||
|
lastOpcode = OP_INVALIDOPCODE
|
||||||
|
for (opcode, data, sop_idx) in self.raw_iter():
|
||||||
|
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
|
||||||
|
n += 1
|
||||||
|
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
|
||||||
|
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
|
||||||
|
n += opcode.decode_op_n()
|
||||||
|
else:
|
||||||
|
n += 20
|
||||||
|
lastOpcode = opcode
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
SIGHASH_DEFAULT = 0 # Taproot-only default, semantics same as SIGHASH_ALL
|
||||||
|
SIGHASH_ALL = 1
|
||||||
|
SIGHASH_NONE = 2
|
||||||
|
SIGHASH_SINGLE = 3
|
||||||
|
SIGHASH_ANYONECANPAY = 0x80
|
||||||
|
|
||||||
|
def FindAndDelete(script, sig):
|
||||||
|
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
|
||||||
|
r = b''
|
||||||
|
last_sop_idx = sop_idx = 0
|
||||||
|
skip = True
|
||||||
|
for (opcode, data, sop_idx) in script.raw_iter():
|
||||||
|
if not skip:
|
||||||
|
r += script[last_sop_idx:sop_idx]
|
||||||
|
last_sop_idx = sop_idx
|
||||||
|
if script[sop_idx:sop_idx + len(sig)] == sig:
|
||||||
|
skip = True
|
||||||
|
else:
|
||||||
|
skip = False
|
||||||
|
if not skip:
|
||||||
|
r += script[last_sop_idx:]
|
||||||
|
return CScript(r)
|
||||||
|
|
||||||
|
def LegacySignatureMsg(script, txTo, inIdx, hashtype):
|
||||||
|
"""Preimage of the signature hash, if it exists.
|
||||||
|
|
||||||
|
Returns either (None, err) to indicate error (which translates to sighash 1),
|
||||||
|
or (msg, None).
|
||||||
|
"""
|
||||||
|
|
||||||
|
if inIdx >= len(txTo.vin):
|
||||||
|
return (None, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
|
||||||
|
txtmp = CTransaction(txTo)
|
||||||
|
|
||||||
|
for txin in txtmp.vin:
|
||||||
|
txin.scriptSig = b''
|
||||||
|
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
|
||||||
|
|
||||||
|
if (hashtype & 0x1f) == SIGHASH_NONE:
|
||||||
|
txtmp.vout = []
|
||||||
|
|
||||||
|
for i in range(len(txtmp.vin)):
|
||||||
|
if i != inIdx:
|
||||||
|
txtmp.vin[i].nSequence = 0
|
||||||
|
|
||||||
|
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
|
||||||
|
outIdx = inIdx
|
||||||
|
if outIdx >= len(txtmp.vout):
|
||||||
|
return (None, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
|
||||||
|
|
||||||
|
tmp = txtmp.vout[outIdx]
|
||||||
|
txtmp.vout = []
|
||||||
|
for _ in range(outIdx):
|
||||||
|
txtmp.vout.append(CTxOut(-1))
|
||||||
|
txtmp.vout.append(tmp)
|
||||||
|
|
||||||
|
for i in range(len(txtmp.vin)):
|
||||||
|
if i != inIdx:
|
||||||
|
txtmp.vin[i].nSequence = 0
|
||||||
|
|
||||||
|
if hashtype & SIGHASH_ANYONECANPAY:
|
||||||
|
tmp = txtmp.vin[inIdx]
|
||||||
|
txtmp.vin = []
|
||||||
|
txtmp.vin.append(tmp)
|
||||||
|
|
||||||
|
s = txtmp.serialize_without_witness()
|
||||||
|
s += struct.pack(b"<I", hashtype)
|
||||||
|
|
||||||
|
return (s, None)
|
||||||
|
|
||||||
|
def LegacySignatureHash(*args, **kwargs):
|
||||||
|
"""Consensus-correct SignatureHash
|
||||||
|
|
||||||
|
Returns (hash, err) to precisely match the consensus-critical behavior of
|
||||||
|
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
|
||||||
|
"""
|
||||||
|
|
||||||
|
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
|
||||||
|
msg, err = LegacySignatureMsg(*args, **kwargs)
|
||||||
|
if msg is None:
|
||||||
|
return (HASH_ONE, err)
|
||||||
|
else:
|
||||||
|
return (hash256(msg), err)
|
||||||
|
|
||||||
|
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
|
||||||
|
# Performance optimization probably not necessary for python tests, however.
|
||||||
|
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
|
||||||
|
# for version 0 witnesses.
|
||||||
|
def SegwitV0SignatureMsg(script, txTo, inIdx, hashtype, amount):
|
||||||
|
|
||||||
|
hashPrevouts = 0
|
||||||
|
hashSequence = 0
|
||||||
|
hashOutputs = 0
|
||||||
|
|
||||||
|
if not (hashtype & SIGHASH_ANYONECANPAY):
|
||||||
|
serialize_prevouts = bytes()
|
||||||
|
for i in txTo.vin:
|
||||||
|
serialize_prevouts += i.prevout.serialize()
|
||||||
|
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
|
||||||
|
|
||||||
|
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
|
||||||
|
serialize_sequence = bytes()
|
||||||
|
for i in txTo.vin:
|
||||||
|
serialize_sequence += struct.pack("<I", i.nSequence)
|
||||||
|
hashSequence = uint256_from_str(hash256(serialize_sequence))
|
||||||
|
|
||||||
|
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
|
||||||
|
serialize_outputs = bytes()
|
||||||
|
for o in txTo.vout:
|
||||||
|
serialize_outputs += o.serialize()
|
||||||
|
hashOutputs = uint256_from_str(hash256(serialize_outputs))
|
||||||
|
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
|
||||||
|
serialize_outputs = txTo.vout[inIdx].serialize()
|
||||||
|
hashOutputs = uint256_from_str(hash256(serialize_outputs))
|
||||||
|
|
||||||
|
ss = bytes()
|
||||||
|
ss += struct.pack("<i", txTo.nVersion)
|
||||||
|
ss += ser_uint256(hashPrevouts)
|
||||||
|
ss += ser_uint256(hashSequence)
|
||||||
|
ss += txTo.vin[inIdx].prevout.serialize()
|
||||||
|
ss += ser_string(script)
|
||||||
|
ss += struct.pack("<q", amount)
|
||||||
|
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
|
||||||
|
ss += ser_uint256(hashOutputs)
|
||||||
|
ss += struct.pack("<i", txTo.nLockTime)
|
||||||
|
ss += struct.pack("<I", hashtype)
|
||||||
|
return ss
|
||||||
|
|
||||||
|
def SegwitV0SignatureHash(*args, **kwargs):
|
||||||
|
return hash256(SegwitV0SignatureMsg(*args, **kwargs))
|
||||||
|
|
||||||
|
class TestFrameworkScript(unittest.TestCase):
|
||||||
|
def test_bn2vch(self):
|
||||||
|
self.assertEqual(bn2vch(0), bytes([]))
|
||||||
|
self.assertEqual(bn2vch(1), bytes([0x01]))
|
||||||
|
self.assertEqual(bn2vch(-1), bytes([0x81]))
|
||||||
|
self.assertEqual(bn2vch(0x7F), bytes([0x7F]))
|
||||||
|
self.assertEqual(bn2vch(-0x7F), bytes([0xFF]))
|
||||||
|
self.assertEqual(bn2vch(0x80), bytes([0x80, 0x00]))
|
||||||
|
self.assertEqual(bn2vch(-0x80), bytes([0x80, 0x80]))
|
||||||
|
self.assertEqual(bn2vch(0xFF), bytes([0xFF, 0x00]))
|
||||||
|
self.assertEqual(bn2vch(-0xFF), bytes([0xFF, 0x80]))
|
||||||
|
self.assertEqual(bn2vch(0x100), bytes([0x00, 0x01]))
|
||||||
|
self.assertEqual(bn2vch(-0x100), bytes([0x00, 0x81]))
|
||||||
|
self.assertEqual(bn2vch(0x7FFF), bytes([0xFF, 0x7F]))
|
||||||
|
self.assertEqual(bn2vch(-0x8000), bytes([0x00, 0x80, 0x80]))
|
||||||
|
self.assertEqual(bn2vch(-0x7FFFFF), bytes([0xFF, 0xFF, 0xFF]))
|
||||||
|
self.assertEqual(bn2vch(0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x00]))
|
||||||
|
self.assertEqual(bn2vch(-0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x80]))
|
||||||
|
self.assertEqual(bn2vch(0xFFFFFFFF), bytes([0xFF, 0xFF, 0xFF, 0xFF, 0x00]))
|
||||||
|
self.assertEqual(bn2vch(123456789), bytes([0x15, 0xCD, 0x5B, 0x07]))
|
||||||
|
self.assertEqual(bn2vch(-54321), bytes([0x31, 0xD4, 0x80]))
|
||||||
|
|
||||||
|
def test_cscriptnum_encoding(self):
|
||||||
|
# round-trip negative and multi-byte CScriptNums
|
||||||
|
values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1, -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32), 1 << 40, 1500, -1500]
|
||||||
|
for value in values:
|
||||||
|
self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value)
|
||||||
|
|
||||||
|
def BIP341_sha_prevouts(txTo):
|
||||||
|
return sha256(b"".join(i.prevout.serialize() for i in txTo.vin))
|
||||||
|
|
||||||
|
def BIP341_sha_amounts(spent_utxos):
|
||||||
|
return sha256(b"".join(struct.pack("<q", u.nValue) for u in spent_utxos))
|
||||||
|
|
||||||
|
def BIP341_sha_scriptpubkeys(spent_utxos):
|
||||||
|
return sha256(b"".join(ser_string(u.scriptPubKey) for u in spent_utxos))
|
||||||
|
|
||||||
|
def BIP341_sha_sequences(txTo):
|
||||||
|
return sha256(b"".join(struct.pack("<I", i.nSequence) for i in txTo.vin))
|
||||||
|
|
||||||
|
def BIP341_sha_outputs(txTo):
|
||||||
|
return sha256(b"".join(o.serialize() for o in txTo.vout))
|
||||||
|
|
||||||
|
def TaprootSignatureMsg(txTo, spent_utxos, hash_type, input_index = 0, scriptpath = False, script = CScript(), codeseparator_pos = -1, annex = None, leaf_ver = LEAF_VERSION_TAPSCRIPT):
|
||||||
|
assert (len(txTo.vin) == len(spent_utxos))
|
||||||
|
assert (input_index < len(txTo.vin))
|
||||||
|
out_type = SIGHASH_ALL if hash_type == 0 else hash_type & 3
|
||||||
|
in_type = hash_type & SIGHASH_ANYONECANPAY
|
||||||
|
spk = spent_utxos[input_index].scriptPubKey
|
||||||
|
ss = bytes([0, hash_type]) # epoch, hash_type
|
||||||
|
ss += struct.pack("<i", txTo.nVersion)
|
||||||
|
ss += struct.pack("<I", txTo.nLockTime)
|
||||||
|
if in_type != SIGHASH_ANYONECANPAY:
|
||||||
|
ss += BIP341_sha_prevouts(txTo)
|
||||||
|
ss += BIP341_sha_amounts(spent_utxos)
|
||||||
|
ss += BIP341_sha_scriptpubkeys(spent_utxos)
|
||||||
|
ss += BIP341_sha_sequences(txTo)
|
||||||
|
if out_type == SIGHASH_ALL:
|
||||||
|
ss += BIP341_sha_outputs(txTo)
|
||||||
|
spend_type = 0
|
||||||
|
if annex is not None:
|
||||||
|
spend_type |= 1
|
||||||
|
if (scriptpath):
|
||||||
|
spend_type |= 2
|
||||||
|
ss += bytes([spend_type])
|
||||||
|
if in_type == SIGHASH_ANYONECANPAY:
|
||||||
|
ss += txTo.vin[input_index].prevout.serialize()
|
||||||
|
ss += struct.pack("<q", spent_utxos[input_index].nValue)
|
||||||
|
ss += ser_string(spk)
|
||||||
|
ss += struct.pack("<I", txTo.vin[input_index].nSequence)
|
||||||
|
else:
|
||||||
|
ss += struct.pack("<I", input_index)
|
||||||
|
if (spend_type & 1):
|
||||||
|
ss += sha256(ser_string(annex))
|
||||||
|
if out_type == SIGHASH_SINGLE:
|
||||||
|
if input_index < len(txTo.vout):
|
||||||
|
ss += sha256(txTo.vout[input_index].serialize())
|
||||||
|
else:
|
||||||
|
ss += bytes(0 for _ in range(32))
|
||||||
|
if (scriptpath):
|
||||||
|
ss += TaggedHash("TapLeaf", bytes([leaf_ver]) + ser_string(script))
|
||||||
|
ss += bytes([0])
|
||||||
|
ss += struct.pack("<i", codeseparator_pos)
|
||||||
|
assert len(ss) == 175 - (in_type == SIGHASH_ANYONECANPAY) * 49 - (out_type != SIGHASH_ALL and out_type != SIGHASH_SINGLE) * 32 + (annex is not None) * 32 + scriptpath * 37
|
||||||
|
return ss
|
||||||
|
|
||||||
|
def TaprootSignatureHash(*args, **kwargs):
|
||||||
|
return TaggedHash("TapSighash", TaprootSignatureMsg(*args, **kwargs))
|
||||||
|
|
||||||
|
def taproot_tree_helper(scripts):
|
||||||
|
if len(scripts) == 0:
|
||||||
|
return ([], bytes())
|
||||||
|
if len(scripts) == 1:
|
||||||
|
# One entry: treat as a leaf
|
||||||
|
script = scripts[0]
|
||||||
|
assert(not callable(script))
|
||||||
|
if isinstance(script, list):
|
||||||
|
return taproot_tree_helper(script)
|
||||||
|
assert(isinstance(script, tuple))
|
||||||
|
version = LEAF_VERSION_TAPSCRIPT
|
||||||
|
name = script[0]
|
||||||
|
code = script[1]
|
||||||
|
if len(script) == 3:
|
||||||
|
version = script[2]
|
||||||
|
assert version & 1 == 0
|
||||||
|
assert isinstance(code, bytes)
|
||||||
|
h = TaggedHash("TapLeaf", bytes([version]) + ser_string(code))
|
||||||
|
if name is None:
|
||||||
|
return ([], h)
|
||||||
|
return ([(name, version, code, bytes(), h)], h)
|
||||||
|
elif len(scripts) == 2 and callable(scripts[1]):
|
||||||
|
# Two entries, and the right one is a function
|
||||||
|
left, left_h = taproot_tree_helper(scripts[0:1])
|
||||||
|
right_h = scripts[1](left_h)
|
||||||
|
left = [(name, version, script, control + right_h, leaf) for name, version, script, control, leaf in left]
|
||||||
|
right = []
|
||||||
|
else:
|
||||||
|
# Two or more entries: descend into each side
|
||||||
|
split_pos = len(scripts) // 2
|
||||||
|
left, left_h = taproot_tree_helper(scripts[0:split_pos])
|
||||||
|
right, right_h = taproot_tree_helper(scripts[split_pos:])
|
||||||
|
left = [(name, version, script, control + right_h, leaf) for name, version, script, control, leaf in left]
|
||||||
|
right = [(name, version, script, control + left_h, leaf) for name, version, script, control, leaf in right]
|
||||||
|
if right_h < left_h:
|
||||||
|
right_h, left_h = left_h, right_h
|
||||||
|
h = TaggedHash("TapBranch", left_h + right_h)
|
||||||
|
return (left + right, h)
|
||||||
|
|
||||||
|
# A TaprootInfo object has the following fields:
|
||||||
|
# - scriptPubKey: the scriptPubKey (witness v1 CScript)
|
||||||
|
# - internal_pubkey: the internal pubkey (32 bytes)
|
||||||
|
# - negflag: whether the pubkey in the scriptPubKey was negated from internal_pubkey+tweak*G (bool).
|
||||||
|
# - tweak: the tweak (32 bytes)
|
||||||
|
# - leaves: a dict of name -> TaprootLeafInfo objects for all known leaves
|
||||||
|
# - merkle_root: the script tree's Merkle root, or bytes() if no leaves are present
|
||||||
|
TaprootInfo = namedtuple("TaprootInfo", "scriptPubKey,internal_pubkey,negflag,tweak,leaves,merkle_root,output_pubkey")
|
||||||
|
|
||||||
|
# A TaprootLeafInfo object has the following fields:
|
||||||
|
# - script: the leaf script (CScript or bytes)
|
||||||
|
# - version: the leaf version (0xc0 for BIP342 tapscript)
|
||||||
|
# - merklebranch: the merkle branch to use for this leaf (32*N bytes)
|
||||||
|
TaprootLeafInfo = namedtuple("TaprootLeafInfo", "script,version,merklebranch,leaf_hash")
|
||||||
|
|
||||||
|
def taproot_construct(pubkey, scripts=None):
|
||||||
|
"""Construct a tree of Taproot spending conditions
|
||||||
|
|
||||||
|
pubkey: a 32-byte xonly pubkey for the internal pubkey (bytes)
|
||||||
|
scripts: a list of items; each item is either:
|
||||||
|
- a (name, CScript or bytes, leaf version) tuple
|
||||||
|
- a (name, CScript or bytes) tuple (defaulting to leaf version 0xc0)
|
||||||
|
- another list of items (with the same structure)
|
||||||
|
- a list of two items; the first of which is an item itself, and the
|
||||||
|
second is a function. The function takes as input the Merkle root of the
|
||||||
|
first item, and produces a (fictitious) partner to hash with.
|
||||||
|
|
||||||
|
Returns: a TaprootInfo object
|
||||||
|
"""
|
||||||
|
if scripts is None:
|
||||||
|
scripts = []
|
||||||
|
|
||||||
|
ret, h = taproot_tree_helper(scripts)
|
||||||
|
tweak = TaggedHash("TapTweak", pubkey + h)
|
||||||
|
tweaked, negated = tweak_add_pubkey(pubkey, tweak)
|
||||||
|
leaves = dict((name, TaprootLeafInfo(script, version, merklebranch, leaf)) for name, version, script, merklebranch, leaf in ret)
|
||||||
|
return TaprootInfo(CScript([OP_1, tweaked]), pubkey, negated + 0, tweak, leaves, h, tweaked)
|
||||||
|
|
||||||
|
def is_op_success(o):
|
||||||
|
return o == 0x50 or o == 0x62 or o == 0x89 or o == 0x8a or o == 0x8d or o == 0x8e or (o >= 0x7e and o <= 0x81) or (o >= 0x83 and o <= 0x86) or (o >= 0x95 and o <= 0x99) or (o >= 0xbb and o <= 0xfe)
|
||||||
121
miner_imports/test_framework/script_util.py
Executable file
121
miner_imports/test_framework/script_util.py
Executable file
@@ -0,0 +1,121 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2019-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Useful Script constants and utils."""
|
||||||
|
from test_framework.script import (
|
||||||
|
CScript,
|
||||||
|
CScriptOp,
|
||||||
|
OP_0,
|
||||||
|
OP_CHECKMULTISIG,
|
||||||
|
OP_CHECKSIG,
|
||||||
|
OP_DUP,
|
||||||
|
OP_EQUAL,
|
||||||
|
OP_EQUALVERIFY,
|
||||||
|
OP_HASH160,
|
||||||
|
hash160,
|
||||||
|
sha256,
|
||||||
|
)
|
||||||
|
|
||||||
|
# To prevent a "tx-size-small" policy rule error, a transaction has to have a
|
||||||
|
# non-witness size of at least 82 bytes (MIN_STANDARD_TX_NONWITNESS_SIZE in
|
||||||
|
# src/policy/policy.h). Considering a Tx with the smallest possible single
|
||||||
|
# input (blank, empty scriptSig), and with an output omitting the scriptPubKey,
|
||||||
|
# we get to a minimum size of 60 bytes:
|
||||||
|
#
|
||||||
|
# Tx Skeleton: 4 [Version] + 1 [InCount] + 1 [OutCount] + 4 [LockTime] = 10 bytes
|
||||||
|
# Blank Input: 32 [PrevTxHash] + 4 [Index] + 1 [scriptSigLen] + 4 [SeqNo] = 41 bytes
|
||||||
|
# Output: 8 [Amount] + 1 [scriptPubKeyLen] = 9 bytes
|
||||||
|
#
|
||||||
|
# Hence, the scriptPubKey of the single output has to have a size of at
|
||||||
|
# least 22 bytes, which corresponds to the size of a P2WPKH scriptPubKey.
|
||||||
|
# The following script constant consists of a single push of 21 bytes of 'a':
|
||||||
|
# <PUSH_21> <21-bytes of 'a'>
|
||||||
|
# resulting in a 22-byte size. It should be used whenever (small) fake
|
||||||
|
# scriptPubKeys are needed, to guarantee that the minimum transaction size is
|
||||||
|
# met.
|
||||||
|
DUMMY_P2WPKH_SCRIPT = CScript([b'a' * 21])
|
||||||
|
DUMMY_2_P2WPKH_SCRIPT = CScript([b'b' * 21])
|
||||||
|
|
||||||
|
|
||||||
|
def key_to_p2pk_script(key):
|
||||||
|
key = check_key(key)
|
||||||
|
return CScript([key, OP_CHECKSIG])
|
||||||
|
|
||||||
|
|
||||||
|
def keys_to_multisig_script(keys, *, k=None):
|
||||||
|
n = len(keys)
|
||||||
|
if k is None: # n-of-n multisig by default
|
||||||
|
k = n
|
||||||
|
assert k <= n
|
||||||
|
op_k = CScriptOp.encode_op_n(k)
|
||||||
|
op_n = CScriptOp.encode_op_n(n)
|
||||||
|
checked_keys = [check_key(key) for key in keys]
|
||||||
|
return CScript([op_k] + checked_keys + [op_n, OP_CHECKMULTISIG])
|
||||||
|
|
||||||
|
|
||||||
|
def keyhash_to_p2pkh_script(hash):
|
||||||
|
assert len(hash) == 20
|
||||||
|
return CScript([OP_DUP, OP_HASH160, hash, OP_EQUALVERIFY, OP_CHECKSIG])
|
||||||
|
|
||||||
|
|
||||||
|
def scripthash_to_p2sh_script(hash):
|
||||||
|
assert len(hash) == 20
|
||||||
|
return CScript([OP_HASH160, hash, OP_EQUAL])
|
||||||
|
|
||||||
|
|
||||||
|
def key_to_p2pkh_script(key):
|
||||||
|
key = check_key(key)
|
||||||
|
return keyhash_to_p2pkh_script(hash160(key))
|
||||||
|
|
||||||
|
|
||||||
|
def script_to_p2sh_script(script):
|
||||||
|
script = check_script(script)
|
||||||
|
return scripthash_to_p2sh_script(hash160(script))
|
||||||
|
|
||||||
|
|
||||||
|
def key_to_p2sh_p2wpkh_script(key):
|
||||||
|
key = check_key(key)
|
||||||
|
p2shscript = CScript([OP_0, hash160(key)])
|
||||||
|
return script_to_p2sh_script(p2shscript)
|
||||||
|
|
||||||
|
|
||||||
|
def program_to_witness_script(version, program):
|
||||||
|
if isinstance(program, str):
|
||||||
|
program = bytes.fromhex(program)
|
||||||
|
assert 0 <= version <= 16
|
||||||
|
assert 2 <= len(program) <= 40
|
||||||
|
assert version > 0 or len(program) in [20, 32]
|
||||||
|
return CScript([version, program])
|
||||||
|
|
||||||
|
|
||||||
|
def script_to_p2wsh_script(script):
|
||||||
|
script = check_script(script)
|
||||||
|
return program_to_witness_script(0, sha256(script))
|
||||||
|
|
||||||
|
|
||||||
|
def key_to_p2wpkh_script(key):
|
||||||
|
key = check_key(key)
|
||||||
|
return program_to_witness_script(0, hash160(key))
|
||||||
|
|
||||||
|
|
||||||
|
def script_to_p2sh_p2wsh_script(script):
|
||||||
|
script = check_script(script)
|
||||||
|
p2shscript = CScript([OP_0, sha256(script)])
|
||||||
|
return script_to_p2sh_script(p2shscript)
|
||||||
|
|
||||||
|
|
||||||
|
def check_key(key):
|
||||||
|
if isinstance(key, str):
|
||||||
|
key = bytes.fromhex(key) # Assuming this is hex string
|
||||||
|
if isinstance(key, bytes) and (len(key) == 33 or len(key) == 65):
|
||||||
|
return key
|
||||||
|
assert False
|
||||||
|
|
||||||
|
|
||||||
|
def check_script(script):
|
||||||
|
if isinstance(script, str):
|
||||||
|
script = bytes.fromhex(script) # Assuming this is hex string
|
||||||
|
if isinstance(script, bytes) or isinstance(script, CScript):
|
||||||
|
return script
|
||||||
|
assert False
|
||||||
141
miner_imports/test_framework/segwit_addr.py
Normal file
141
miner_imports/test_framework/segwit_addr.py
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2017 Pieter Wuille
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Reference implementation for Bech32/Bech32m and segwit addresses."""
|
||||||
|
import unittest
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
|
||||||
|
BECH32_CONST = 1
|
||||||
|
BECH32M_CONST = 0x2bc830a3
|
||||||
|
|
||||||
|
class Encoding(Enum):
|
||||||
|
"""Enumeration type to list the various supported encodings."""
|
||||||
|
BECH32 = 1
|
||||||
|
BECH32M = 2
|
||||||
|
|
||||||
|
|
||||||
|
def bech32_polymod(values):
|
||||||
|
"""Internal function that computes the Bech32 checksum."""
|
||||||
|
generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
|
||||||
|
chk = 1
|
||||||
|
for value in values:
|
||||||
|
top = chk >> 25
|
||||||
|
chk = (chk & 0x1ffffff) << 5 ^ value
|
||||||
|
for i in range(5):
|
||||||
|
chk ^= generator[i] if ((top >> i) & 1) else 0
|
||||||
|
return chk
|
||||||
|
|
||||||
|
|
||||||
|
def bech32_hrp_expand(hrp):
|
||||||
|
"""Expand the HRP into values for checksum computation."""
|
||||||
|
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
|
||||||
|
|
||||||
|
|
||||||
|
def bech32_verify_checksum(hrp, data):
|
||||||
|
"""Verify a checksum given HRP and converted data characters."""
|
||||||
|
check = bech32_polymod(bech32_hrp_expand(hrp) + data)
|
||||||
|
if check == BECH32_CONST:
|
||||||
|
return Encoding.BECH32
|
||||||
|
elif check == BECH32M_CONST:
|
||||||
|
return Encoding.BECH32M
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def bech32_create_checksum(encoding, hrp, data):
|
||||||
|
"""Compute the checksum values given HRP and data."""
|
||||||
|
values = bech32_hrp_expand(hrp) + data
|
||||||
|
const = BECH32M_CONST if encoding == Encoding.BECH32M else BECH32_CONST
|
||||||
|
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ const
|
||||||
|
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
|
||||||
|
|
||||||
|
|
||||||
|
def bech32_encode(encoding, hrp, data):
|
||||||
|
"""Compute a Bech32 or Bech32m string given HRP and data values."""
|
||||||
|
combined = data + bech32_create_checksum(encoding, hrp, data)
|
||||||
|
return hrp + '1' + ''.join([CHARSET[d] for d in combined])
|
||||||
|
|
||||||
|
|
||||||
|
def bech32_decode(bech):
|
||||||
|
"""Validate a Bech32/Bech32m string, and determine HRP and data."""
|
||||||
|
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
|
||||||
|
(bech.lower() != bech and bech.upper() != bech)):
|
||||||
|
return (None, None, None)
|
||||||
|
bech = bech.lower()
|
||||||
|
pos = bech.rfind('1')
|
||||||
|
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
|
||||||
|
return (None, None, None)
|
||||||
|
if not all(x in CHARSET for x in bech[pos+1:]):
|
||||||
|
return (None, None, None)
|
||||||
|
hrp = bech[:pos]
|
||||||
|
data = [CHARSET.find(x) for x in bech[pos+1:]]
|
||||||
|
encoding = bech32_verify_checksum(hrp, data)
|
||||||
|
if encoding is None:
|
||||||
|
return (None, None, None)
|
||||||
|
return (encoding, hrp, data[:-6])
|
||||||
|
|
||||||
|
|
||||||
|
def convertbits(data, frombits, tobits, pad=True):
|
||||||
|
"""General power-of-2 base conversion."""
|
||||||
|
acc = 0
|
||||||
|
bits = 0
|
||||||
|
ret = []
|
||||||
|
maxv = (1 << tobits) - 1
|
||||||
|
max_acc = (1 << (frombits + tobits - 1)) - 1
|
||||||
|
for value in data:
|
||||||
|
if value < 0 or (value >> frombits):
|
||||||
|
return None
|
||||||
|
acc = ((acc << frombits) | value) & max_acc
|
||||||
|
bits += frombits
|
||||||
|
while bits >= tobits:
|
||||||
|
bits -= tobits
|
||||||
|
ret.append((acc >> bits) & maxv)
|
||||||
|
if pad:
|
||||||
|
if bits:
|
||||||
|
ret.append((acc << (tobits - bits)) & maxv)
|
||||||
|
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
|
||||||
|
return None
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def decode_segwit_address(hrp, addr):
|
||||||
|
"""Decode a segwit address."""
|
||||||
|
encoding, hrpgot, data = bech32_decode(addr)
|
||||||
|
if hrpgot != hrp:
|
||||||
|
return (None, None)
|
||||||
|
decoded = convertbits(data[1:], 5, 8, False)
|
||||||
|
if decoded is None or len(decoded) < 2 or len(decoded) > 40:
|
||||||
|
return (None, None)
|
||||||
|
if data[0] > 16:
|
||||||
|
return (None, None)
|
||||||
|
if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:
|
||||||
|
return (None, None)
|
||||||
|
if (data[0] == 0 and encoding != Encoding.BECH32) or (data[0] != 0 and encoding != Encoding.BECH32M):
|
||||||
|
return (None, None)
|
||||||
|
return (data[0], decoded)
|
||||||
|
|
||||||
|
|
||||||
|
def encode_segwit_address(hrp, witver, witprog):
|
||||||
|
"""Encode a segwit address."""
|
||||||
|
encoding = Encoding.BECH32 if witver == 0 else Encoding.BECH32M
|
||||||
|
ret = bech32_encode(encoding, hrp, [witver] + convertbits(witprog, 8, 5))
|
||||||
|
if decode_segwit_address(hrp, ret) == (None, None):
|
||||||
|
return None
|
||||||
|
return ret
|
||||||
|
|
||||||
|
class TestFrameworkScript(unittest.TestCase):
|
||||||
|
def test_segwit_encode_decode(self):
|
||||||
|
def test_python_bech32(addr):
|
||||||
|
hrp = addr[:4]
|
||||||
|
self.assertEqual(hrp, "bcrt")
|
||||||
|
(witver, witprog) = decode_segwit_address(hrp, addr)
|
||||||
|
self.assertEqual(encode_segwit_address(hrp, witver, witprog), addr)
|
||||||
|
|
||||||
|
# P2WPKH
|
||||||
|
test_python_bech32('bcrt1qthmht0k2qnh3wy7336z05lu2km7emzfpm3wg46')
|
||||||
|
# P2WSH
|
||||||
|
test_python_bech32('bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj')
|
||||||
|
test_python_bech32('bcrt1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqseac85')
|
||||||
|
# P2TR
|
||||||
|
test_python_bech32('bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6')
|
||||||
63
miner_imports/test_framework/siphash.py
Normal file
63
miner_imports/test_framework/siphash.py
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2016-2018 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Specialized SipHash-2-4 implementations.
|
||||||
|
|
||||||
|
This implements SipHash-2-4 for 256-bit integers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def rotl64(n, b):
|
||||||
|
return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b
|
||||||
|
|
||||||
|
def siphash_round(v0, v1, v2, v3):
|
||||||
|
v0 = (v0 + v1) & ((1 << 64) - 1)
|
||||||
|
v1 = rotl64(v1, 13)
|
||||||
|
v1 ^= v0
|
||||||
|
v0 = rotl64(v0, 32)
|
||||||
|
v2 = (v2 + v3) & ((1 << 64) - 1)
|
||||||
|
v3 = rotl64(v3, 16)
|
||||||
|
v3 ^= v2
|
||||||
|
v0 = (v0 + v3) & ((1 << 64) - 1)
|
||||||
|
v3 = rotl64(v3, 21)
|
||||||
|
v3 ^= v0
|
||||||
|
v2 = (v2 + v1) & ((1 << 64) - 1)
|
||||||
|
v1 = rotl64(v1, 17)
|
||||||
|
v1 ^= v2
|
||||||
|
v2 = rotl64(v2, 32)
|
||||||
|
return (v0, v1, v2, v3)
|
||||||
|
|
||||||
|
def siphash256(k0, k1, h):
|
||||||
|
n0 = h & ((1 << 64) - 1)
|
||||||
|
n1 = (h >> 64) & ((1 << 64) - 1)
|
||||||
|
n2 = (h >> 128) & ((1 << 64) - 1)
|
||||||
|
n3 = (h >> 192) & ((1 << 64) - 1)
|
||||||
|
v0 = 0x736f6d6570736575 ^ k0
|
||||||
|
v1 = 0x646f72616e646f6d ^ k1
|
||||||
|
v2 = 0x6c7967656e657261 ^ k0
|
||||||
|
v3 = 0x7465646279746573 ^ k1 ^ n0
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0 ^= n0
|
||||||
|
v3 ^= n1
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0 ^= n1
|
||||||
|
v3 ^= n2
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0 ^= n2
|
||||||
|
v3 ^= n3
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0 ^= n3
|
||||||
|
v3 ^= 0x2000000000000000
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0 ^= 0x2000000000000000
|
||||||
|
v2 ^= 0xFF
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||||
|
return v0 ^ v1 ^ v2 ^ v3
|
||||||
160
miner_imports/test_framework/socks5.py
Normal file
160
miner_imports/test_framework/socks5.py
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2015-2019 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Dummy Socks5 server for testing."""
|
||||||
|
|
||||||
|
import socket
|
||||||
|
import threading
|
||||||
|
import queue
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger("TestFramework.socks5")
|
||||||
|
|
||||||
|
# Protocol constants
|
||||||
|
class Command:
|
||||||
|
CONNECT = 0x01
|
||||||
|
|
||||||
|
class AddressType:
|
||||||
|
IPV4 = 0x01
|
||||||
|
DOMAINNAME = 0x03
|
||||||
|
IPV6 = 0x04
|
||||||
|
|
||||||
|
# Utility functions
|
||||||
|
def recvall(s, n):
|
||||||
|
"""Receive n bytes from a socket, or fail."""
|
||||||
|
rv = bytearray()
|
||||||
|
while n > 0:
|
||||||
|
d = s.recv(n)
|
||||||
|
if not d:
|
||||||
|
raise IOError('Unexpected end of stream')
|
||||||
|
rv.extend(d)
|
||||||
|
n -= len(d)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
# Implementation classes
|
||||||
|
class Socks5Configuration():
|
||||||
|
"""Proxy configuration."""
|
||||||
|
def __init__(self):
|
||||||
|
self.addr = None # Bind address (must be set)
|
||||||
|
self.af = socket.AF_INET # Bind address family
|
||||||
|
self.unauth = False # Support unauthenticated
|
||||||
|
self.auth = False # Support authentication
|
||||||
|
|
||||||
|
class Socks5Command():
|
||||||
|
"""Information about an incoming socks5 command."""
|
||||||
|
def __init__(self, cmd, atyp, addr, port, username, password):
|
||||||
|
self.cmd = cmd # Command (one of Command.*)
|
||||||
|
self.atyp = atyp # Address type (one of AddressType.*)
|
||||||
|
self.addr = addr # Address
|
||||||
|
self.port = port # Port to connect to
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
def __repr__(self):
|
||||||
|
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
|
||||||
|
|
||||||
|
class Socks5Connection():
|
||||||
|
def __init__(self, serv, conn):
|
||||||
|
self.serv = serv
|
||||||
|
self.conn = conn
|
||||||
|
|
||||||
|
def handle(self):
|
||||||
|
"""Handle socks5 request according to RFC192."""
|
||||||
|
try:
|
||||||
|
# Verify socks version
|
||||||
|
ver = recvall(self.conn, 1)[0]
|
||||||
|
if ver != 0x05:
|
||||||
|
raise IOError('Invalid socks version %i' % ver)
|
||||||
|
# Choose authentication method
|
||||||
|
nmethods = recvall(self.conn, 1)[0]
|
||||||
|
methods = bytearray(recvall(self.conn, nmethods))
|
||||||
|
method = None
|
||||||
|
if 0x02 in methods and self.serv.conf.auth:
|
||||||
|
method = 0x02 # username/password
|
||||||
|
elif 0x00 in methods and self.serv.conf.unauth:
|
||||||
|
method = 0x00 # unauthenticated
|
||||||
|
if method is None:
|
||||||
|
raise IOError('No supported authentication method was offered')
|
||||||
|
# Send response
|
||||||
|
self.conn.sendall(bytearray([0x05, method]))
|
||||||
|
# Read authentication (optional)
|
||||||
|
username = None
|
||||||
|
password = None
|
||||||
|
if method == 0x02:
|
||||||
|
ver = recvall(self.conn, 1)[0]
|
||||||
|
if ver != 0x01:
|
||||||
|
raise IOError('Invalid auth packet version %i' % ver)
|
||||||
|
ulen = recvall(self.conn, 1)[0]
|
||||||
|
username = str(recvall(self.conn, ulen))
|
||||||
|
plen = recvall(self.conn, 1)[0]
|
||||||
|
password = str(recvall(self.conn, plen))
|
||||||
|
# Send authentication response
|
||||||
|
self.conn.sendall(bytearray([0x01, 0x00]))
|
||||||
|
|
||||||
|
# Read connect request
|
||||||
|
ver, cmd, _, atyp = recvall(self.conn, 4)
|
||||||
|
if ver != 0x05:
|
||||||
|
raise IOError('Invalid socks version %i in connect request' % ver)
|
||||||
|
if cmd != Command.CONNECT:
|
||||||
|
raise IOError('Unhandled command %i in connect request' % cmd)
|
||||||
|
|
||||||
|
if atyp == AddressType.IPV4:
|
||||||
|
addr = recvall(self.conn, 4)
|
||||||
|
elif atyp == AddressType.DOMAINNAME:
|
||||||
|
n = recvall(self.conn, 1)[0]
|
||||||
|
addr = recvall(self.conn, n)
|
||||||
|
elif atyp == AddressType.IPV6:
|
||||||
|
addr = recvall(self.conn, 16)
|
||||||
|
else:
|
||||||
|
raise IOError('Unknown address type %i' % atyp)
|
||||||
|
port_hi,port_lo = recvall(self.conn, 2)
|
||||||
|
port = (port_hi << 8) | port_lo
|
||||||
|
|
||||||
|
# Send dummy response
|
||||||
|
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
|
||||||
|
|
||||||
|
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
|
||||||
|
self.serv.queue.put(cmdin)
|
||||||
|
logger.info('Proxy: %s', cmdin)
|
||||||
|
# Fall through to disconnect
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("socks5 request handling failed.")
|
||||||
|
self.serv.queue.put(e)
|
||||||
|
finally:
|
||||||
|
self.conn.close()
|
||||||
|
|
||||||
|
class Socks5Server():
|
||||||
|
def __init__(self, conf):
|
||||||
|
self.conf = conf
|
||||||
|
self.s = socket.socket(conf.af)
|
||||||
|
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
|
self.s.bind(conf.addr)
|
||||||
|
self.s.listen(5)
|
||||||
|
self.running = False
|
||||||
|
self.thread = None
|
||||||
|
self.queue = queue.Queue() # report connections and exceptions to client
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while self.running:
|
||||||
|
(sockconn, _) = self.s.accept()
|
||||||
|
if self.running:
|
||||||
|
conn = Socks5Connection(self, sockconn)
|
||||||
|
thread = threading.Thread(None, conn.handle)
|
||||||
|
thread.daemon = True
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
assert not self.running
|
||||||
|
self.running = True
|
||||||
|
self.thread = threading.Thread(None, self.run)
|
||||||
|
self.thread.daemon = True
|
||||||
|
self.thread.start()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.running = False
|
||||||
|
# connect to self to end run loop
|
||||||
|
s = socket.socket(self.conf.af)
|
||||||
|
s.connect(self.conf.addr)
|
||||||
|
s.close()
|
||||||
|
self.thread.join()
|
||||||
|
|
||||||
920
miner_imports/test_framework/test_framework.py
Executable file
920
miner_imports/test_framework/test_framework.py
Executable file
@@ -0,0 +1,920 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2014-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Base class for RPC testing."""
|
||||||
|
|
||||||
|
import configparser
|
||||||
|
from enum import Enum
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import pdb
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
|
||||||
|
from typing import List
|
||||||
|
from .address import create_deterministic_address_bcrt1_p2tr_op_true
|
||||||
|
from .authproxy import JSONRPCException
|
||||||
|
from . import coverage
|
||||||
|
from .p2p import NetworkThread
|
||||||
|
from .test_node import TestNode
|
||||||
|
from .util import (
|
||||||
|
MAX_NODES,
|
||||||
|
PortSeed,
|
||||||
|
assert_equal,
|
||||||
|
check_json_precision,
|
||||||
|
get_datadir_path,
|
||||||
|
initialize_datadir,
|
||||||
|
p2p_port,
|
||||||
|
wait_until_helper,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestStatus(Enum):
|
||||||
|
PASSED = 1
|
||||||
|
FAILED = 2
|
||||||
|
SKIPPED = 3
|
||||||
|
|
||||||
|
TEST_EXIT_PASSED = 0
|
||||||
|
TEST_EXIT_FAILED = 1
|
||||||
|
TEST_EXIT_SKIPPED = 77
|
||||||
|
|
||||||
|
TMPDIR_PREFIX = "bitcoin_func_test_"
|
||||||
|
|
||||||
|
|
||||||
|
class SkipTest(Exception):
|
||||||
|
"""This exception is raised to skip a test"""
|
||||||
|
|
||||||
|
def __init__(self, message):
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
|
||||||
|
class BitcoinTestMetaClass(type):
|
||||||
|
"""Metaclass for BitcoinTestFramework.
|
||||||
|
|
||||||
|
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
|
||||||
|
adheres to a standard whereby the subclass overrides `set_test_params` and
|
||||||
|
`run_test` but DOES NOT override either `__init__` or `main`. If any of
|
||||||
|
those standards are violated, a ``TypeError`` is raised."""
|
||||||
|
|
||||||
|
def __new__(cls, clsname, bases, dct):
|
||||||
|
if not clsname == 'BitcoinTestFramework':
|
||||||
|
if not ('run_test' in dct and 'set_test_params' in dct):
|
||||||
|
raise TypeError("BitcoinTestFramework subclasses must override "
|
||||||
|
"'run_test' and 'set_test_params'")
|
||||||
|
if '__init__' in dct or 'main' in dct:
|
||||||
|
raise TypeError("BitcoinTestFramework subclasses may not override "
|
||||||
|
"'__init__' or 'main'")
|
||||||
|
|
||||||
|
return super().__new__(cls, clsname, bases, dct)
|
||||||
|
|
||||||
|
|
||||||
|
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
|
||||||
|
"""Base class for a bitcoin test script.
|
||||||
|
|
||||||
|
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
|
||||||
|
|
||||||
|
Individual tests can also override the following methods to customize the test setup:
|
||||||
|
|
||||||
|
- add_options()
|
||||||
|
- setup_chain()
|
||||||
|
- setup_network()
|
||||||
|
- setup_nodes()
|
||||||
|
|
||||||
|
The __init__() and main() methods should not be overridden.
|
||||||
|
|
||||||
|
This class also contains various public and private helper methods."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
|
||||||
|
self.chain: str = 'regtest'
|
||||||
|
self.setup_clean_chain: bool = False
|
||||||
|
self.nodes: List[TestNode] = []
|
||||||
|
self.network_thread = None
|
||||||
|
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
|
||||||
|
self.supports_cli = True
|
||||||
|
self.bind_to_localhost_only = True
|
||||||
|
self.parse_args()
|
||||||
|
self.disable_syscall_sandbox = self.options.nosandbox or self.options.valgrind
|
||||||
|
self.default_wallet_name = "default_wallet" if self.options.descriptors else ""
|
||||||
|
self.wallet_data_filename = "wallet.dat"
|
||||||
|
# Optional list of wallet names that can be set in set_test_params to
|
||||||
|
# create and import keys to. If unset, default is len(nodes) *
|
||||||
|
# [default_wallet_name]. If wallet names are None, wallet creation is
|
||||||
|
# skipped. If list is truncated, wallet creation is skipped and keys
|
||||||
|
# are not imported.
|
||||||
|
self.wallet_names = None
|
||||||
|
# By default the wallet is not required. Set to true by skip_if_no_wallet().
|
||||||
|
# When False, we ignore wallet_names regardless of what it is.
|
||||||
|
self.requires_wallet = False
|
||||||
|
# Disable ThreadOpenConnections by default, so that adding entries to
|
||||||
|
# addrman will not result in automatic connections to them.
|
||||||
|
self.disable_autoconnect = True
|
||||||
|
self.set_test_params()
|
||||||
|
assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes
|
||||||
|
if self.options.timeout_factor == 0 :
|
||||||
|
self.options.timeout_factor = 99999
|
||||||
|
self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) # optionally, increase timeout by a factor
|
||||||
|
|
||||||
|
def main(self):
|
||||||
|
"""Main function. This should not be overridden by the subclass test scripts."""
|
||||||
|
|
||||||
|
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.setup()
|
||||||
|
self.run_test()
|
||||||
|
except JSONRPCException:
|
||||||
|
self.log.exception("JSONRPC error")
|
||||||
|
self.success = TestStatus.FAILED
|
||||||
|
except SkipTest as e:
|
||||||
|
self.log.warning("Test Skipped: %s" % e.message)
|
||||||
|
self.success = TestStatus.SKIPPED
|
||||||
|
except AssertionError:
|
||||||
|
self.log.exception("Assertion failed")
|
||||||
|
self.success = TestStatus.FAILED
|
||||||
|
except KeyError:
|
||||||
|
self.log.exception("Key error")
|
||||||
|
self.success = TestStatus.FAILED
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
self.log.exception("Called Process failed with '{}'".format(e.output))
|
||||||
|
self.success = TestStatus.FAILED
|
||||||
|
except Exception:
|
||||||
|
self.log.exception("Unexpected exception caught during testing")
|
||||||
|
self.success = TestStatus.FAILED
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
self.log.warning("Exiting after keyboard interrupt")
|
||||||
|
self.success = TestStatus.FAILED
|
||||||
|
finally:
|
||||||
|
exit_code = self.shutdown()
|
||||||
|
sys.exit(exit_code)
|
||||||
|
|
||||||
|
def parse_args(self):
|
||||||
|
previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
|
||||||
|
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
|
||||||
|
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
|
||||||
|
help="Leave bitcoinds and test.* datadir on exit or error")
|
||||||
|
parser.add_argument("--nosandbox", dest="nosandbox", default=False, action="store_true",
|
||||||
|
help="Don't use the syscall sandbox")
|
||||||
|
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
|
||||||
|
help="Don't stop bitcoinds after the test execution")
|
||||||
|
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
|
||||||
|
help="Directory for caching pregenerated datadirs (default: %(default)s)")
|
||||||
|
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
|
||||||
|
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
|
||||||
|
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
|
||||||
|
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
|
||||||
|
help="Print out all RPC calls as they are made")
|
||||||
|
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
|
||||||
|
help="The seed to use for assigning port numbers (default: current process id)")
|
||||||
|
parser.add_argument("--previous-releases", dest="prev_releases", action="store_true",
|
||||||
|
default=os.path.isdir(previous_releases_path) and bool(os.listdir(previous_releases_path)),
|
||||||
|
help="Force test of previous releases (default: %(default)s)")
|
||||||
|
parser.add_argument("--coveragedir", dest="coveragedir",
|
||||||
|
help="Write tested RPC commands into this directory")
|
||||||
|
parser.add_argument("--configfile", dest="configfile",
|
||||||
|
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
|
||||||
|
help="Location of the test framework config file (default: %(default)s)")
|
||||||
|
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
|
||||||
|
help="Attach a python debugger if test fails")
|
||||||
|
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
|
||||||
|
help="use bitcoin-cli instead of RPC for all commands")
|
||||||
|
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
|
||||||
|
help="profile running nodes with perf for the duration of the test")
|
||||||
|
parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
|
||||||
|
help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required. Forces --nosandbox.")
|
||||||
|
parser.add_argument("--randomseed", type=int,
|
||||||
|
help="set a random seed for deterministically reproducing a previous test run")
|
||||||
|
parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts')
|
||||||
|
|
||||||
|
group = parser.add_mutually_exclusive_group()
|
||||||
|
group.add_argument("--descriptors", action='store_const', const=True,
|
||||||
|
help="Run test using a descriptor wallet", dest='descriptors')
|
||||||
|
group.add_argument("--legacy-wallet", action='store_const', const=False,
|
||||||
|
help="Run test using legacy wallets", dest='descriptors')
|
||||||
|
|
||||||
|
self.add_options(parser)
|
||||||
|
# Running TestShell in a Jupyter notebook causes an additional -f argument
|
||||||
|
# To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument
|
||||||
|
# source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168
|
||||||
|
parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1")
|
||||||
|
self.options = parser.parse_args()
|
||||||
|
self.options.previous_releases_path = previous_releases_path
|
||||||
|
|
||||||
|
config = configparser.ConfigParser()
|
||||||
|
config.read_file(open(self.options.configfile))
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
if self.options.descriptors is None:
|
||||||
|
# Prefer BDB unless it isn't available
|
||||||
|
if self.is_bdb_compiled():
|
||||||
|
self.options.descriptors = False
|
||||||
|
elif self.is_sqlite_compiled():
|
||||||
|
self.options.descriptors = True
|
||||||
|
else:
|
||||||
|
# If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter
|
||||||
|
# It still needs to exist and be None in order for tests to work however.
|
||||||
|
self.options.descriptors = None
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
"""Call this method to start up the test framework object with options set."""
|
||||||
|
|
||||||
|
PortSeed.n = self.options.port_seed
|
||||||
|
|
||||||
|
check_json_precision()
|
||||||
|
|
||||||
|
self.options.cachedir = os.path.abspath(self.options.cachedir)
|
||||||
|
|
||||||
|
config = self.config
|
||||||
|
|
||||||
|
fname_bitcoind = os.path.join(
|
||||||
|
config["environment"]["BUILDDIR"],
|
||||||
|
"src",
|
||||||
|
"bitcoind" + config["environment"]["EXEEXT"],
|
||||||
|
)
|
||||||
|
fname_bitcoincli = os.path.join(
|
||||||
|
config["environment"]["BUILDDIR"],
|
||||||
|
"src",
|
||||||
|
"bitcoin-cli" + config["environment"]["EXEEXT"],
|
||||||
|
)
|
||||||
|
self.options.bitcoind = os.getenv("BITCOIND", default=fname_bitcoind)
|
||||||
|
self.options.bitcoincli = os.getenv("BITCOINCLI", default=fname_bitcoincli)
|
||||||
|
|
||||||
|
os.environ['PATH'] = os.pathsep.join([
|
||||||
|
os.path.join(config['environment']['BUILDDIR'], 'src'),
|
||||||
|
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'), os.environ['PATH']
|
||||||
|
])
|
||||||
|
|
||||||
|
# Set up temp directory and start logging
|
||||||
|
if self.options.tmpdir:
|
||||||
|
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
|
||||||
|
os.makedirs(self.options.tmpdir, exist_ok=False)
|
||||||
|
else:
|
||||||
|
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
|
||||||
|
self._start_logging()
|
||||||
|
|
||||||
|
# Seed the PRNG. Note that test runs are reproducible if and only if
|
||||||
|
# a single thread accesses the PRNG. For more information, see
|
||||||
|
# https://docs.python.org/3/library/random.html#notes-on-reproducibility.
|
||||||
|
# The network thread shouldn't access random. If we need to change the
|
||||||
|
# network thread to access randomness, it should instantiate its own
|
||||||
|
# random.Random object.
|
||||||
|
seed = self.options.randomseed
|
||||||
|
|
||||||
|
if seed is None:
|
||||||
|
seed = random.randrange(sys.maxsize)
|
||||||
|
else:
|
||||||
|
self.log.debug("User supplied random seed {}".format(seed))
|
||||||
|
|
||||||
|
random.seed(seed)
|
||||||
|
self.log.debug("PRNG seed is: {}".format(seed))
|
||||||
|
|
||||||
|
self.log.debug('Setting up network thread')
|
||||||
|
self.network_thread = NetworkThread()
|
||||||
|
self.network_thread.start()
|
||||||
|
|
||||||
|
if self.options.usecli:
|
||||||
|
if not self.supports_cli:
|
||||||
|
raise SkipTest("--usecli specified but test does not support using CLI")
|
||||||
|
self.skip_if_no_cli()
|
||||||
|
self.skip_test_if_missing_module()
|
||||||
|
self.setup_chain()
|
||||||
|
self.setup_network()
|
||||||
|
|
||||||
|
self.success = TestStatus.PASSED
|
||||||
|
|
||||||
|
def shutdown(self):
|
||||||
|
"""Call this method to shut down the test framework object."""
|
||||||
|
|
||||||
|
if self.success == TestStatus.FAILED and self.options.pdbonfailure:
|
||||||
|
print("Testcase failed. Attaching python debugger. Enter ? for help")
|
||||||
|
pdb.set_trace()
|
||||||
|
|
||||||
|
self.log.debug('Closing down network thread')
|
||||||
|
self.network_thread.close()
|
||||||
|
if not self.options.noshutdown:
|
||||||
|
self.log.info("Stopping nodes")
|
||||||
|
if self.nodes:
|
||||||
|
self.stop_nodes()
|
||||||
|
else:
|
||||||
|
for node in self.nodes:
|
||||||
|
node.cleanup_on_exit = False
|
||||||
|
self.log.info("Note: bitcoinds were not stopped and may still be running")
|
||||||
|
|
||||||
|
should_clean_up = (
|
||||||
|
not self.options.nocleanup and
|
||||||
|
not self.options.noshutdown and
|
||||||
|
self.success != TestStatus.FAILED and
|
||||||
|
not self.options.perf
|
||||||
|
)
|
||||||
|
if should_clean_up:
|
||||||
|
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
|
||||||
|
cleanup_tree_on_exit = True
|
||||||
|
elif self.options.perf:
|
||||||
|
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
|
||||||
|
cleanup_tree_on_exit = False
|
||||||
|
else:
|
||||||
|
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
|
||||||
|
cleanup_tree_on_exit = False
|
||||||
|
|
||||||
|
if self.success == TestStatus.PASSED:
|
||||||
|
self.log.info("Tests successful")
|
||||||
|
exit_code = TEST_EXIT_PASSED
|
||||||
|
elif self.success == TestStatus.SKIPPED:
|
||||||
|
self.log.info("Test skipped")
|
||||||
|
exit_code = TEST_EXIT_SKIPPED
|
||||||
|
else:
|
||||||
|
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
|
||||||
|
self.log.error("")
|
||||||
|
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
|
||||||
|
self.log.error("")
|
||||||
|
self.log.error("If this failure happened unexpectedly or intermittently, please file a bug and provide a link or upload of the combined log.")
|
||||||
|
self.log.error(self.config['environment']['PACKAGE_BUGREPORT'])
|
||||||
|
self.log.error("")
|
||||||
|
exit_code = TEST_EXIT_FAILED
|
||||||
|
# Logging.shutdown will not remove stream- and filehandlers, so we must
|
||||||
|
# do it explicitly. Handlers are removed so the next test run can apply
|
||||||
|
# different log handler settings.
|
||||||
|
# See: https://docs.python.org/3/library/logging.html#logging.shutdown
|
||||||
|
for h in list(self.log.handlers):
|
||||||
|
h.flush()
|
||||||
|
h.close()
|
||||||
|
self.log.removeHandler(h)
|
||||||
|
rpc_logger = logging.getLogger("BitcoinRPC")
|
||||||
|
for h in list(rpc_logger.handlers):
|
||||||
|
h.flush()
|
||||||
|
rpc_logger.removeHandler(h)
|
||||||
|
if cleanup_tree_on_exit:
|
||||||
|
shutil.rmtree(self.options.tmpdir)
|
||||||
|
|
||||||
|
self.nodes.clear()
|
||||||
|
return exit_code
|
||||||
|
|
||||||
|
# Methods to override in subclass test scripts.
|
||||||
|
def set_test_params(self):
|
||||||
|
"""Tests must override this method to change default values for number of nodes, topology, etc"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def add_options(self, parser):
|
||||||
|
"""Override this method to add command-line options to the test"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def skip_test_if_missing_module(self):
|
||||||
|
"""Override this method to skip a test if a module is not compiled"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def setup_chain(self):
|
||||||
|
"""Override this method to customize blockchain setup"""
|
||||||
|
self.log.info("Initializing test directory " + self.options.tmpdir)
|
||||||
|
if self.setup_clean_chain:
|
||||||
|
self._initialize_chain_clean()
|
||||||
|
else:
|
||||||
|
self._initialize_chain()
|
||||||
|
|
||||||
|
def setup_network(self):
|
||||||
|
"""Override this method to customize test network topology"""
|
||||||
|
self.setup_nodes()
|
||||||
|
|
||||||
|
# Connect the nodes as a "chain". This allows us
|
||||||
|
# to split the network between nodes 1 and 2 to get
|
||||||
|
# two halves that can work on competing chains.
|
||||||
|
#
|
||||||
|
# Topology looks like this:
|
||||||
|
# node0 <-- node1 <-- node2 <-- node3
|
||||||
|
#
|
||||||
|
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
|
||||||
|
# ensure block propagation, all nodes will establish outgoing connections toward node0.
|
||||||
|
# See fPreferredDownload in net_processing.
|
||||||
|
#
|
||||||
|
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
|
||||||
|
# self.connect_nodes(1, 2)
|
||||||
|
for i in range(self.num_nodes - 1):
|
||||||
|
self.connect_nodes(i + 1, i)
|
||||||
|
self.sync_all()
|
||||||
|
|
||||||
|
def setup_nodes(self):
|
||||||
|
"""Override this method to customize test node setup"""
|
||||||
|
extra_args = [[]] * self.num_nodes
|
||||||
|
if hasattr(self, "extra_args"):
|
||||||
|
extra_args = self.extra_args
|
||||||
|
self.add_nodes(self.num_nodes, extra_args)
|
||||||
|
self.start_nodes()
|
||||||
|
if self.requires_wallet:
|
||||||
|
self.import_deterministic_coinbase_privkeys()
|
||||||
|
if not self.setup_clean_chain:
|
||||||
|
for n in self.nodes:
|
||||||
|
assert_equal(n.getblockchaininfo()["blocks"], 199)
|
||||||
|
# To ensure that all nodes are out of IBD, the most recent block
|
||||||
|
# must have a timestamp not too old (see IsInitialBlockDownload()).
|
||||||
|
self.log.debug('Generate a block with current time')
|
||||||
|
block_hash = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0]
|
||||||
|
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
|
||||||
|
for n in self.nodes:
|
||||||
|
n.submitblock(block)
|
||||||
|
chain_info = n.getblockchaininfo()
|
||||||
|
assert_equal(chain_info["blocks"], 200)
|
||||||
|
assert_equal(chain_info["initialblockdownload"], False)
|
||||||
|
|
||||||
|
def import_deterministic_coinbase_privkeys(self):
|
||||||
|
for i in range(self.num_nodes):
|
||||||
|
self.init_wallet(node=i)
|
||||||
|
|
||||||
|
def init_wallet(self, *, node):
|
||||||
|
wallet_name = self.default_wallet_name if self.wallet_names is None else self.wallet_names[node] if node < len(self.wallet_names) else False
|
||||||
|
if wallet_name is not False:
|
||||||
|
n = self.nodes[node]
|
||||||
|
if wallet_name is not None:
|
||||||
|
n.createwallet(wallet_name=wallet_name, descriptors=self.options.descriptors, load_on_startup=True)
|
||||||
|
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase', rescan=True)
|
||||||
|
|
||||||
|
def run_test(self):
|
||||||
|
"""Tests must override this method to define test logic"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
# Public helper methods. These can be accessed by the subclass test scripts.
|
||||||
|
|
||||||
|
def add_nodes(self, num_nodes: int, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None):
|
||||||
|
"""Instantiate TestNode objects.
|
||||||
|
|
||||||
|
Should only be called once after the nodes have been specified in
|
||||||
|
set_test_params()."""
|
||||||
|
def get_bin_from_version(version, bin_name, bin_default):
|
||||||
|
if not version:
|
||||||
|
return bin_default
|
||||||
|
if version > 219999:
|
||||||
|
# Starting at client version 220000 the first two digits represent
|
||||||
|
# the major version, e.g. v22.0 instead of v0.22.0.
|
||||||
|
version *= 100
|
||||||
|
return os.path.join(
|
||||||
|
self.options.previous_releases_path,
|
||||||
|
re.sub(
|
||||||
|
r'\.0$' if version <= 219999 else r'(\.0){1,2}$',
|
||||||
|
'', # Remove trailing dot for point releases, after 22.0 also remove double trailing dot.
|
||||||
|
'v{}.{}.{}.{}'.format(
|
||||||
|
(version % 100000000) // 1000000,
|
||||||
|
(version % 1000000) // 10000,
|
||||||
|
(version % 10000) // 100,
|
||||||
|
(version % 100) // 1,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
'bin',
|
||||||
|
bin_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.bind_to_localhost_only:
|
||||||
|
extra_confs = [["bind=127.0.0.1"]] * num_nodes
|
||||||
|
else:
|
||||||
|
extra_confs = [[]] * num_nodes
|
||||||
|
if extra_args is None:
|
||||||
|
extra_args = [[]] * num_nodes
|
||||||
|
if versions is None:
|
||||||
|
versions = [None] * num_nodes
|
||||||
|
if self.is_syscall_sandbox_compiled() and not self.disable_syscall_sandbox:
|
||||||
|
for i in range(len(extra_args)):
|
||||||
|
# The -sandbox argument is not present in the v22.0 release.
|
||||||
|
if versions[i] is None or versions[i] >= 229900:
|
||||||
|
extra_args[i] = extra_args[i] + ["-sandbox=log-and-abort"]
|
||||||
|
if binary is None:
|
||||||
|
binary = [get_bin_from_version(v, 'bitcoind', self.options.bitcoind) for v in versions]
|
||||||
|
if binary_cli is None:
|
||||||
|
binary_cli = [get_bin_from_version(v, 'bitcoin-cli', self.options.bitcoincli) for v in versions]
|
||||||
|
assert_equal(len(extra_confs), num_nodes)
|
||||||
|
assert_equal(len(extra_args), num_nodes)
|
||||||
|
assert_equal(len(versions), num_nodes)
|
||||||
|
assert_equal(len(binary), num_nodes)
|
||||||
|
assert_equal(len(binary_cli), num_nodes)
|
||||||
|
for i in range(num_nodes):
|
||||||
|
test_node_i = TestNode(
|
||||||
|
i,
|
||||||
|
get_datadir_path(self.options.tmpdir, i),
|
||||||
|
chain=self.chain,
|
||||||
|
rpchost=rpchost,
|
||||||
|
timewait=self.rpc_timeout,
|
||||||
|
timeout_factor=self.options.timeout_factor,
|
||||||
|
bitcoind=binary[i],
|
||||||
|
bitcoin_cli=binary_cli[i],
|
||||||
|
version=versions[i],
|
||||||
|
coverage_dir=self.options.coveragedir,
|
||||||
|
cwd=self.options.tmpdir,
|
||||||
|
extra_conf=extra_confs[i],
|
||||||
|
extra_args=extra_args[i],
|
||||||
|
use_cli=self.options.usecli,
|
||||||
|
start_perf=self.options.perf,
|
||||||
|
use_valgrind=self.options.valgrind,
|
||||||
|
descriptors=self.options.descriptors,
|
||||||
|
)
|
||||||
|
self.nodes.append(test_node_i)
|
||||||
|
if not test_node_i.version_is_at_least(170000):
|
||||||
|
# adjust conf for pre 17
|
||||||
|
conf_file = test_node_i.bitcoinconf
|
||||||
|
with open(conf_file, 'r', encoding='utf8') as conf:
|
||||||
|
conf_data = conf.read()
|
||||||
|
with open(conf_file, 'w', encoding='utf8') as conf:
|
||||||
|
conf.write(conf_data.replace('[regtest]', ''))
|
||||||
|
|
||||||
|
def start_node(self, i, *args, **kwargs):
|
||||||
|
"""Start a bitcoind"""
|
||||||
|
|
||||||
|
node = self.nodes[i]
|
||||||
|
|
||||||
|
node.start(*args, **kwargs)
|
||||||
|
node.wait_for_rpc_connection()
|
||||||
|
|
||||||
|
if self.options.coveragedir is not None:
|
||||||
|
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
|
||||||
|
|
||||||
|
def start_nodes(self, extra_args=None, *args, **kwargs):
|
||||||
|
"""Start multiple bitcoinds"""
|
||||||
|
|
||||||
|
if extra_args is None:
|
||||||
|
extra_args = [None] * self.num_nodes
|
||||||
|
assert_equal(len(extra_args), self.num_nodes)
|
||||||
|
try:
|
||||||
|
for i, node in enumerate(self.nodes):
|
||||||
|
node.start(extra_args[i], *args, **kwargs)
|
||||||
|
for node in self.nodes:
|
||||||
|
node.wait_for_rpc_connection()
|
||||||
|
except:
|
||||||
|
# If one node failed to start, stop the others
|
||||||
|
self.stop_nodes()
|
||||||
|
raise
|
||||||
|
|
||||||
|
if self.options.coveragedir is not None:
|
||||||
|
for node in self.nodes:
|
||||||
|
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
|
||||||
|
|
||||||
|
def stop_node(self, i, expected_stderr='', wait=0):
|
||||||
|
"""Stop a bitcoind test node"""
|
||||||
|
self.nodes[i].stop_node(expected_stderr, wait=wait)
|
||||||
|
|
||||||
|
def stop_nodes(self, wait=0):
|
||||||
|
"""Stop multiple bitcoind test nodes"""
|
||||||
|
for node in self.nodes:
|
||||||
|
# Issue RPC to stop nodes
|
||||||
|
node.stop_node(wait=wait, wait_until_stopped=False)
|
||||||
|
|
||||||
|
for node in self.nodes:
|
||||||
|
# Wait for nodes to stop
|
||||||
|
node.wait_until_stopped()
|
||||||
|
|
||||||
|
def restart_node(self, i, extra_args=None):
|
||||||
|
"""Stop and start a test node"""
|
||||||
|
self.stop_node(i)
|
||||||
|
self.start_node(i, extra_args)
|
||||||
|
|
||||||
|
def wait_for_node_exit(self, i, timeout):
|
||||||
|
self.nodes[i].process.wait(timeout)
|
||||||
|
|
||||||
|
def connect_nodes(self, a, b):
|
||||||
|
from_connection = self.nodes[a]
|
||||||
|
to_connection = self.nodes[b]
|
||||||
|
ip_port = "127.0.0.1:" + str(p2p_port(b))
|
||||||
|
from_connection.addnode(ip_port, "onetry")
|
||||||
|
# poll until version handshake complete to avoid race conditions
|
||||||
|
# with transaction relaying
|
||||||
|
# See comments in net_processing:
|
||||||
|
# * Must have a version message before anything else
|
||||||
|
# * Must have a verack message before anything else
|
||||||
|
wait_until_helper(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
|
||||||
|
wait_until_helper(lambda: all(peer['version'] != 0 for peer in to_connection.getpeerinfo()))
|
||||||
|
wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()))
|
||||||
|
wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in to_connection.getpeerinfo()))
|
||||||
|
|
||||||
|
def disconnect_nodes(self, a, b):
|
||||||
|
def disconnect_nodes_helper(from_connection, node_num):
|
||||||
|
def get_peer_ids():
|
||||||
|
result = []
|
||||||
|
for peer in from_connection.getpeerinfo():
|
||||||
|
if "testnode{}".format(node_num) in peer['subver']:
|
||||||
|
result.append(peer['id'])
|
||||||
|
return result
|
||||||
|
|
||||||
|
peer_ids = get_peer_ids()
|
||||||
|
if not peer_ids:
|
||||||
|
self.log.warning("disconnect_nodes: {} and {} were not connected".format(
|
||||||
|
from_connection.index,
|
||||||
|
node_num,
|
||||||
|
))
|
||||||
|
return
|
||||||
|
for peer_id in peer_ids:
|
||||||
|
try:
|
||||||
|
from_connection.disconnectnode(nodeid=peer_id)
|
||||||
|
except JSONRPCException as e:
|
||||||
|
# If this node is disconnected between calculating the peer id
|
||||||
|
# and issuing the disconnect, don't worry about it.
|
||||||
|
# This avoids a race condition if we're mass-disconnecting peers.
|
||||||
|
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
|
||||||
|
raise
|
||||||
|
|
||||||
|
# wait to disconnect
|
||||||
|
wait_until_helper(lambda: not get_peer_ids(), timeout=5)
|
||||||
|
|
||||||
|
disconnect_nodes_helper(self.nodes[a], b)
|
||||||
|
|
||||||
|
def split_network(self):
|
||||||
|
"""
|
||||||
|
Split the network of four nodes into nodes 0/1 and 2/3.
|
||||||
|
"""
|
||||||
|
self.disconnect_nodes(1, 2)
|
||||||
|
self.sync_all(self.nodes[:2])
|
||||||
|
self.sync_all(self.nodes[2:])
|
||||||
|
|
||||||
|
def join_network(self):
|
||||||
|
"""
|
||||||
|
Join the (previously split) network halves together.
|
||||||
|
"""
|
||||||
|
self.connect_nodes(1, 2)
|
||||||
|
self.sync_all()
|
||||||
|
|
||||||
|
def no_op(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def generate(self, generator, *args, sync_fun=None, **kwargs):
|
||||||
|
blocks = generator.generate(*args, invalid_call=False, **kwargs)
|
||||||
|
sync_fun() if sync_fun else self.sync_all()
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
def generateblock(self, generator, *args, sync_fun=None, **kwargs):
|
||||||
|
blocks = generator.generateblock(*args, invalid_call=False, **kwargs)
|
||||||
|
sync_fun() if sync_fun else self.sync_all()
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
def generatetoaddress(self, generator, *args, sync_fun=None, **kwargs):
|
||||||
|
blocks = generator.generatetoaddress(*args, invalid_call=False, **kwargs)
|
||||||
|
sync_fun() if sync_fun else self.sync_all()
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
def generatetodescriptor(self, generator, *args, sync_fun=None, **kwargs):
|
||||||
|
blocks = generator.generatetodescriptor(*args, invalid_call=False, **kwargs)
|
||||||
|
sync_fun() if sync_fun else self.sync_all()
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
def sync_blocks(self, nodes=None, wait=1, timeout=60):
|
||||||
|
"""
|
||||||
|
Wait until everybody has the same tip.
|
||||||
|
sync_blocks needs to be called with an rpc_connections set that has least
|
||||||
|
one node already synced to the latest, stable tip, otherwise there's a
|
||||||
|
chance it might return before all nodes are stably synced.
|
||||||
|
"""
|
||||||
|
rpc_connections = nodes or self.nodes
|
||||||
|
timeout = int(timeout * self.options.timeout_factor)
|
||||||
|
stop_time = time.time() + timeout
|
||||||
|
while time.time() <= stop_time:
|
||||||
|
best_hash = [x.getbestblockhash() for x in rpc_connections]
|
||||||
|
if best_hash.count(best_hash[0]) == len(rpc_connections):
|
||||||
|
return
|
||||||
|
# Check that each peer has at least one connection
|
||||||
|
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
|
||||||
|
time.sleep(wait)
|
||||||
|
raise AssertionError("Block sync timed out after {}s:{}".format(
|
||||||
|
timeout,
|
||||||
|
"".join("\n {!r}".format(b) for b in best_hash),
|
||||||
|
))
|
||||||
|
|
||||||
|
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
|
||||||
|
"""
|
||||||
|
Wait until everybody has the same transactions in their memory
|
||||||
|
pools
|
||||||
|
"""
|
||||||
|
rpc_connections = nodes or self.nodes
|
||||||
|
timeout = int(timeout * self.options.timeout_factor)
|
||||||
|
stop_time = time.time() + timeout
|
||||||
|
while time.time() <= stop_time:
|
||||||
|
pool = [set(r.getrawmempool()) for r in rpc_connections]
|
||||||
|
if pool.count(pool[0]) == len(rpc_connections):
|
||||||
|
if flush_scheduler:
|
||||||
|
for r in rpc_connections:
|
||||||
|
r.syncwithvalidationinterfacequeue()
|
||||||
|
return
|
||||||
|
# Check that each peer has at least one connection
|
||||||
|
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
|
||||||
|
time.sleep(wait)
|
||||||
|
raise AssertionError("Mempool sync timed out after {}s:{}".format(
|
||||||
|
timeout,
|
||||||
|
"".join("\n {!r}".format(m) for m in pool),
|
||||||
|
))
|
||||||
|
|
||||||
|
def sync_all(self, nodes=None):
|
||||||
|
self.sync_blocks(nodes)
|
||||||
|
self.sync_mempools(nodes)
|
||||||
|
|
||||||
|
def wait_until(self, test_function, timeout=60):
|
||||||
|
return wait_until_helper(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor)
|
||||||
|
|
||||||
|
# Private helper methods. These should not be accessed by the subclass test scripts.
|
||||||
|
|
||||||
|
def _start_logging(self):
|
||||||
|
# Add logger and logging handlers
|
||||||
|
self.log = logging.getLogger('TestFramework')
|
||||||
|
self.log.setLevel(logging.DEBUG)
|
||||||
|
# Create file handler to log all messages
|
||||||
|
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
|
||||||
|
fh.setLevel(logging.DEBUG)
|
||||||
|
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
|
||||||
|
ch = logging.StreamHandler(sys.stdout)
|
||||||
|
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
|
||||||
|
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
|
||||||
|
ch.setLevel(ll)
|
||||||
|
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
|
||||||
|
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
|
||||||
|
formatter.converter = time.gmtime
|
||||||
|
fh.setFormatter(formatter)
|
||||||
|
ch.setFormatter(formatter)
|
||||||
|
# add the handlers to the logger
|
||||||
|
self.log.addHandler(fh)
|
||||||
|
self.log.addHandler(ch)
|
||||||
|
|
||||||
|
if self.options.trace_rpc:
|
||||||
|
rpc_logger = logging.getLogger("BitcoinRPC")
|
||||||
|
rpc_logger.setLevel(logging.DEBUG)
|
||||||
|
rpc_handler = logging.StreamHandler(sys.stdout)
|
||||||
|
rpc_handler.setLevel(logging.DEBUG)
|
||||||
|
rpc_logger.addHandler(rpc_handler)
|
||||||
|
|
||||||
|
def _initialize_chain(self):
|
||||||
|
"""Initialize a pre-mined blockchain for use by the test.
|
||||||
|
|
||||||
|
Create a cache of a 199-block-long chain
|
||||||
|
Afterward, create num_nodes copies from the cache."""
|
||||||
|
|
||||||
|
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
|
||||||
|
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
|
||||||
|
assert self.num_nodes <= MAX_NODES
|
||||||
|
|
||||||
|
if not os.path.isdir(cache_node_dir):
|
||||||
|
self.log.debug("Creating cache directory {}".format(cache_node_dir))
|
||||||
|
|
||||||
|
initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain, self.disable_autoconnect)
|
||||||
|
self.nodes.append(
|
||||||
|
TestNode(
|
||||||
|
CACHE_NODE_ID,
|
||||||
|
cache_node_dir,
|
||||||
|
chain=self.chain,
|
||||||
|
extra_conf=["bind=127.0.0.1"],
|
||||||
|
extra_args=['-disablewallet'],
|
||||||
|
rpchost=None,
|
||||||
|
timewait=self.rpc_timeout,
|
||||||
|
timeout_factor=self.options.timeout_factor,
|
||||||
|
bitcoind=self.options.bitcoind,
|
||||||
|
bitcoin_cli=self.options.bitcoincli,
|
||||||
|
coverage_dir=None,
|
||||||
|
cwd=self.options.tmpdir,
|
||||||
|
descriptors=self.options.descriptors,
|
||||||
|
))
|
||||||
|
self.start_node(CACHE_NODE_ID)
|
||||||
|
cache_node = self.nodes[CACHE_NODE_ID]
|
||||||
|
|
||||||
|
# Wait for RPC connections to be ready
|
||||||
|
cache_node.wait_for_rpc_connection()
|
||||||
|
|
||||||
|
# Set a time in the past, so that blocks don't end up in the future
|
||||||
|
cache_node.setmocktime(cache_node.getblockheader(cache_node.getbestblockhash())['time'])
|
||||||
|
|
||||||
|
# Create a 199-block-long chain; each of the 3 first nodes
|
||||||
|
# gets 25 mature blocks and 25 immature.
|
||||||
|
# The 4th address gets 25 mature and only 24 immature blocks so that the very last
|
||||||
|
# block in the cache does not age too much (have an old tip age).
|
||||||
|
# This is needed so that we are out of IBD when the test starts,
|
||||||
|
# see the tip age check in IsInitialBlockDownload().
|
||||||
|
gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [create_deterministic_address_bcrt1_p2tr_op_true()[0]]
|
||||||
|
assert_equal(len(gen_addresses), 4)
|
||||||
|
for i in range(8):
|
||||||
|
self.generatetoaddress(
|
||||||
|
cache_node,
|
||||||
|
nblocks=25 if i != 7 else 24,
|
||||||
|
address=gen_addresses[i % len(gen_addresses)],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert_equal(cache_node.getblockchaininfo()["blocks"], 199)
|
||||||
|
|
||||||
|
# Shut it down, and clean up cache directories:
|
||||||
|
self.stop_nodes()
|
||||||
|
self.nodes = []
|
||||||
|
|
||||||
|
def cache_path(*paths):
|
||||||
|
return os.path.join(cache_node_dir, self.chain, *paths)
|
||||||
|
|
||||||
|
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
|
||||||
|
for entry in os.listdir(cache_path()):
|
||||||
|
if entry not in ['chainstate', 'blocks', 'indexes']: # Only indexes, chainstate and blocks folders
|
||||||
|
os.remove(cache_path(entry))
|
||||||
|
|
||||||
|
for i in range(self.num_nodes):
|
||||||
|
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
|
||||||
|
to_dir = get_datadir_path(self.options.tmpdir, i)
|
||||||
|
shutil.copytree(cache_node_dir, to_dir)
|
||||||
|
initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect) # Overwrite port/rpcport in bitcoin.conf
|
||||||
|
|
||||||
|
def _initialize_chain_clean(self):
|
||||||
|
"""Initialize empty blockchain for use by the test.
|
||||||
|
|
||||||
|
Create an empty blockchain and num_nodes wallets.
|
||||||
|
Useful if a test case wants complete control over initialization."""
|
||||||
|
for i in range(self.num_nodes):
|
||||||
|
initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect)
|
||||||
|
|
||||||
|
def skip_if_no_py3_zmq(self):
|
||||||
|
"""Attempt to import the zmq package and skip the test if the import fails."""
|
||||||
|
try:
|
||||||
|
import zmq # noqa
|
||||||
|
except ImportError:
|
||||||
|
raise SkipTest("python3-zmq module not available.")
|
||||||
|
|
||||||
|
def skip_if_no_bitcoind_zmq(self):
|
||||||
|
"""Skip the running test if bitcoind has not been compiled with zmq support."""
|
||||||
|
if not self.is_zmq_compiled():
|
||||||
|
raise SkipTest("bitcoind has not been built with zmq enabled.")
|
||||||
|
|
||||||
|
def skip_if_no_wallet(self):
|
||||||
|
"""Skip the running test if wallet has not been compiled."""
|
||||||
|
self.requires_wallet = True
|
||||||
|
if not self.is_wallet_compiled():
|
||||||
|
raise SkipTest("wallet has not been compiled.")
|
||||||
|
if self.options.descriptors:
|
||||||
|
self.skip_if_no_sqlite()
|
||||||
|
else:
|
||||||
|
self.skip_if_no_bdb()
|
||||||
|
|
||||||
|
def skip_if_no_sqlite(self):
|
||||||
|
"""Skip the running test if sqlite has not been compiled."""
|
||||||
|
if not self.is_sqlite_compiled():
|
||||||
|
raise SkipTest("sqlite has not been compiled.")
|
||||||
|
|
||||||
|
def skip_if_no_bdb(self):
|
||||||
|
"""Skip the running test if BDB has not been compiled."""
|
||||||
|
if not self.is_bdb_compiled():
|
||||||
|
raise SkipTest("BDB has not been compiled.")
|
||||||
|
|
||||||
|
def skip_if_no_wallet_tool(self):
|
||||||
|
"""Skip the running test if bitcoin-wallet has not been compiled."""
|
||||||
|
if not self.is_wallet_tool_compiled():
|
||||||
|
raise SkipTest("bitcoin-wallet has not been compiled")
|
||||||
|
|
||||||
|
def skip_if_no_cli(self):
|
||||||
|
"""Skip the running test if bitcoin-cli has not been compiled."""
|
||||||
|
if not self.is_cli_compiled():
|
||||||
|
raise SkipTest("bitcoin-cli has not been compiled.")
|
||||||
|
|
||||||
|
def skip_if_no_previous_releases(self):
|
||||||
|
"""Skip the running test if previous releases are not available."""
|
||||||
|
if not self.has_previous_releases():
|
||||||
|
raise SkipTest("previous releases not available or disabled")
|
||||||
|
|
||||||
|
def has_previous_releases(self):
|
||||||
|
"""Checks whether previous releases are present and enabled."""
|
||||||
|
if not os.path.isdir(self.options.previous_releases_path):
|
||||||
|
if self.options.prev_releases:
|
||||||
|
raise AssertionError("Force test of previous releases but releases missing: {}".format(
|
||||||
|
self.options.previous_releases_path))
|
||||||
|
return self.options.prev_releases
|
||||||
|
|
||||||
|
def skip_if_no_external_signer(self):
|
||||||
|
"""Skip the running test if external signer support has not been compiled."""
|
||||||
|
if not self.is_external_signer_compiled():
|
||||||
|
raise SkipTest("external signer support has not been compiled.")
|
||||||
|
|
||||||
|
def is_cli_compiled(self):
|
||||||
|
"""Checks whether bitcoin-cli was compiled."""
|
||||||
|
return self.config["components"].getboolean("ENABLE_CLI")
|
||||||
|
|
||||||
|
def is_external_signer_compiled(self):
|
||||||
|
"""Checks whether external signer support was compiled."""
|
||||||
|
return self.config["components"].getboolean("ENABLE_EXTERNAL_SIGNER")
|
||||||
|
|
||||||
|
def is_wallet_compiled(self):
|
||||||
|
"""Checks whether the wallet module was compiled."""
|
||||||
|
return self.config["components"].getboolean("ENABLE_WALLET")
|
||||||
|
|
||||||
|
def is_specified_wallet_compiled(self):
|
||||||
|
"""Checks whether wallet support for the specified type
|
||||||
|
(legacy or descriptor wallet) was compiled."""
|
||||||
|
if self.options.descriptors:
|
||||||
|
return self.is_sqlite_compiled()
|
||||||
|
else:
|
||||||
|
return self.is_bdb_compiled()
|
||||||
|
|
||||||
|
def is_wallet_tool_compiled(self):
|
||||||
|
"""Checks whether bitcoin-wallet was compiled."""
|
||||||
|
return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
|
||||||
|
|
||||||
|
def is_zmq_compiled(self):
|
||||||
|
"""Checks whether the zmq module was compiled."""
|
||||||
|
return self.config["components"].getboolean("ENABLE_ZMQ")
|
||||||
|
|
||||||
|
def is_sqlite_compiled(self):
|
||||||
|
"""Checks whether the wallet module was compiled with Sqlite support."""
|
||||||
|
return self.config["components"].getboolean("USE_SQLITE")
|
||||||
|
|
||||||
|
def is_bdb_compiled(self):
|
||||||
|
"""Checks whether the wallet module was compiled with BDB support."""
|
||||||
|
return self.config["components"].getboolean("USE_BDB")
|
||||||
|
|
||||||
|
def is_syscall_sandbox_compiled(self):
|
||||||
|
"""Checks whether the syscall sandbox was compiled."""
|
||||||
|
return self.config["components"].getboolean("ENABLE_SYSCALL_SANDBOX")
|
||||||
822
miner_imports/test_framework/test_node.py
Executable file
822
miner_imports/test_framework/test_node.py
Executable file
@@ -0,0 +1,822 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2017-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Class for bitcoind node under test"""
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import decimal
|
||||||
|
import errno
|
||||||
|
from enum import Enum
|
||||||
|
import http.client
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
import urllib.parse
|
||||||
|
import collections
|
||||||
|
import shlex
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from .authproxy import JSONRPCException
|
||||||
|
from .descriptors import descsum_create
|
||||||
|
from .p2p import P2P_SUBVERSION
|
||||||
|
from .util import (
|
||||||
|
MAX_NODES,
|
||||||
|
assert_equal,
|
||||||
|
append_config,
|
||||||
|
delete_cookie_file,
|
||||||
|
get_auth_cookie,
|
||||||
|
get_rpc_proxy,
|
||||||
|
rpc_url,
|
||||||
|
wait_until_helper,
|
||||||
|
p2p_port,
|
||||||
|
EncodeDecimal,
|
||||||
|
)
|
||||||
|
|
||||||
|
BITCOIND_PROC_WAIT_TIMEOUT = 60
|
||||||
|
|
||||||
|
|
||||||
|
class FailedToStartError(Exception):
|
||||||
|
"""Raised when a node fails to start correctly."""
|
||||||
|
|
||||||
|
|
||||||
|
class ErrorMatch(Enum):
|
||||||
|
FULL_TEXT = 1
|
||||||
|
FULL_REGEX = 2
|
||||||
|
PARTIAL_REGEX = 3
|
||||||
|
|
||||||
|
|
||||||
|
class TestNode():
|
||||||
|
"""A class for representing a bitcoind node under test.
|
||||||
|
|
||||||
|
This class contains:
|
||||||
|
|
||||||
|
- state about the node (whether it's running, etc)
|
||||||
|
- a Python subprocess.Popen object representing the running process
|
||||||
|
- an RPC connection to the node
|
||||||
|
- one or more P2P connections to the node
|
||||||
|
|
||||||
|
|
||||||
|
To make things easier for the test writer, any unrecognised messages will
|
||||||
|
be dispatched to the RPC connection."""
|
||||||
|
|
||||||
|
def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False):
|
||||||
|
"""
|
||||||
|
Kwargs:
|
||||||
|
start_perf (bool): If True, begin profiling the node with `perf` as soon as
|
||||||
|
the node starts.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.index = i
|
||||||
|
self.p2p_conn_index = 1
|
||||||
|
self.datadir = datadir
|
||||||
|
self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf")
|
||||||
|
self.stdout_dir = os.path.join(self.datadir, "stdout")
|
||||||
|
self.stderr_dir = os.path.join(self.datadir, "stderr")
|
||||||
|
self.chain = chain
|
||||||
|
self.rpchost = rpchost
|
||||||
|
self.rpc_timeout = timewait
|
||||||
|
self.binary = bitcoind
|
||||||
|
self.coverage_dir = coverage_dir
|
||||||
|
self.cwd = cwd
|
||||||
|
self.descriptors = descriptors
|
||||||
|
if extra_conf is not None:
|
||||||
|
append_config(datadir, extra_conf)
|
||||||
|
# Most callers will just need to add extra args to the standard list below.
|
||||||
|
# For those callers that need more flexibility, they can just set the args property directly.
|
||||||
|
# Note that common args are set in the config file (see initialize_datadir)
|
||||||
|
self.extra_args = extra_args
|
||||||
|
self.version = version
|
||||||
|
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
|
||||||
|
# This means that starting a bitcoind using the temp dir to debug a failed test won't
|
||||||
|
# spam debug.log.
|
||||||
|
self.args = [
|
||||||
|
self.binary,
|
||||||
|
"-datadir=" + self.datadir,
|
||||||
|
"-logtimemicros",
|
||||||
|
"-debug",
|
||||||
|
"-debugexclude=libevent",
|
||||||
|
"-debugexclude=leveldb",
|
||||||
|
"-uacomment=testnode%d" % i,
|
||||||
|
]
|
||||||
|
if use_valgrind:
|
||||||
|
default_suppressions_file = os.path.join(
|
||||||
|
os.path.dirname(os.path.realpath(__file__)),
|
||||||
|
"..", "..", "..", "contrib", "valgrind.supp")
|
||||||
|
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
|
||||||
|
default_suppressions_file)
|
||||||
|
self.args = ["valgrind", "--suppressions={}".format(suppressions_file),
|
||||||
|
"--gen-suppressions=all", "--exit-on-first-error=yes",
|
||||||
|
"--error-exitcode=1", "--quiet"] + self.args
|
||||||
|
|
||||||
|
if self.version_is_at_least(190000):
|
||||||
|
self.args.append("-logthreadnames")
|
||||||
|
if self.version_is_at_least(219900):
|
||||||
|
self.args.append("-logsourcelocations")
|
||||||
|
|
||||||
|
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
|
||||||
|
self.use_cli = use_cli
|
||||||
|
self.start_perf = start_perf
|
||||||
|
|
||||||
|
self.running = False
|
||||||
|
self.process = None
|
||||||
|
self.rpc_connected = False
|
||||||
|
self.rpc = None
|
||||||
|
self.url = None
|
||||||
|
self.log = logging.getLogger('TestFramework.node%d' % i)
|
||||||
|
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
|
||||||
|
# Cache perf subprocesses here by their data output filename.
|
||||||
|
self.perf_subprocesses = {}
|
||||||
|
|
||||||
|
self.p2ps = []
|
||||||
|
self.timeout_factor = timeout_factor
|
||||||
|
|
||||||
|
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
|
||||||
|
PRIV_KEYS = [
|
||||||
|
# address , privkey
|
||||||
|
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
|
||||||
|
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
|
||||||
|
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
|
||||||
|
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
|
||||||
|
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
|
||||||
|
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
|
||||||
|
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
|
||||||
|
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
|
||||||
|
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
|
||||||
|
AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
|
||||||
|
AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
|
||||||
|
AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_deterministic_priv_key(self):
|
||||||
|
"""Return a deterministic priv key in base58, that only depends on the node's index"""
|
||||||
|
assert len(self.PRIV_KEYS) == MAX_NODES
|
||||||
|
return self.PRIV_KEYS[self.index]
|
||||||
|
|
||||||
|
def _node_msg(self, msg: str) -> str:
|
||||||
|
"""Return a modified msg that identifies this node by its index as a debugging aid."""
|
||||||
|
return "[node %d] %s" % (self.index, msg)
|
||||||
|
|
||||||
|
def _raise_assertion_error(self, msg: str):
|
||||||
|
"""Raise an AssertionError with msg modified to identify this node."""
|
||||||
|
raise AssertionError(self._node_msg(msg))
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
# Ensure that we don't leave any bitcoind processes lying around after
|
||||||
|
# the test ends
|
||||||
|
if self.process and self.cleanup_on_exit:
|
||||||
|
# Should only happen on test failure
|
||||||
|
# Avoid using logger, as that may have already been shutdown when
|
||||||
|
# this destructor is called.
|
||||||
|
print(self._node_msg("Cleaning up leftover process"))
|
||||||
|
self.process.kill()
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
|
||||||
|
if self.use_cli:
|
||||||
|
return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name)
|
||||||
|
else:
|
||||||
|
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
|
||||||
|
return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name)
|
||||||
|
|
||||||
|
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
|
||||||
|
"""Start the node."""
|
||||||
|
if extra_args is None:
|
||||||
|
extra_args = self.extra_args
|
||||||
|
|
||||||
|
# Add a new stdout and stderr file each time bitcoind is started
|
||||||
|
if stderr is None:
|
||||||
|
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
|
||||||
|
if stdout is None:
|
||||||
|
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
|
||||||
|
self.stderr = stderr
|
||||||
|
self.stdout = stdout
|
||||||
|
|
||||||
|
if cwd is None:
|
||||||
|
cwd = self.cwd
|
||||||
|
|
||||||
|
# Delete any existing cookie file -- if such a file exists (eg due to
|
||||||
|
# unclean shutdown), it will get overwritten anyway by bitcoind, and
|
||||||
|
# potentially interfere with our attempt to authenticate
|
||||||
|
delete_cookie_file(self.datadir, self.chain)
|
||||||
|
|
||||||
|
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
|
||||||
|
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
|
||||||
|
|
||||||
|
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
|
||||||
|
|
||||||
|
self.running = True
|
||||||
|
self.log.debug("bitcoind started, waiting for RPC to come up")
|
||||||
|
|
||||||
|
if self.start_perf:
|
||||||
|
self._start_perf()
|
||||||
|
|
||||||
|
def wait_for_rpc_connection(self):
|
||||||
|
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
|
||||||
|
# Poll at a rate of four times per second
|
||||||
|
poll_per_s = 4
|
||||||
|
for _ in range(poll_per_s * self.rpc_timeout):
|
||||||
|
if self.process.poll() is not None:
|
||||||
|
raise FailedToStartError(self._node_msg(
|
||||||
|
'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
|
||||||
|
try:
|
||||||
|
rpc = get_rpc_proxy(
|
||||||
|
rpc_url(self.datadir, self.index, self.chain, self.rpchost),
|
||||||
|
self.index,
|
||||||
|
timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT
|
||||||
|
coveragedir=self.coverage_dir,
|
||||||
|
)
|
||||||
|
rpc.getblockcount()
|
||||||
|
# If the call to getblockcount() succeeds then the RPC connection is up
|
||||||
|
if self.version_is_at_least(190000):
|
||||||
|
# getmempoolinfo.loaded is available since commit
|
||||||
|
# bb8ae2c (version 0.19.0)
|
||||||
|
wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor)
|
||||||
|
# Wait for the node to finish reindex, block import, and
|
||||||
|
# loading the mempool. Usually importing happens fast or
|
||||||
|
# even "immediate" when the node is started. However, there
|
||||||
|
# is no guarantee and sometimes ThreadImport might finish
|
||||||
|
# later. This is going to cause intermittent test failures,
|
||||||
|
# because generally the tests assume the node is fully
|
||||||
|
# ready after being started.
|
||||||
|
#
|
||||||
|
# For example, the node will reject block messages from p2p
|
||||||
|
# when it is still importing with the error "Unexpected
|
||||||
|
# block message received"
|
||||||
|
#
|
||||||
|
# The wait is done here to make tests as robust as possible
|
||||||
|
# and prevent racy tests and intermittent failures as much
|
||||||
|
# as possible. Some tests might not need this, but the
|
||||||
|
# overhead is trivial, and the added guarantees are worth
|
||||||
|
# the minimal performance cost.
|
||||||
|
self.log.debug("RPC successfully started")
|
||||||
|
if self.use_cli:
|
||||||
|
return
|
||||||
|
self.rpc = rpc
|
||||||
|
self.rpc_connected = True
|
||||||
|
self.url = self.rpc.rpc_url
|
||||||
|
return
|
||||||
|
except JSONRPCException as e: # Initialization phase
|
||||||
|
# -28 RPC in warmup
|
||||||
|
# -342 Service unavailable, RPC server started but is shutting down due to error
|
||||||
|
if e.error['code'] != -28 and e.error['code'] != -342:
|
||||||
|
raise # unknown JSON RPC exception
|
||||||
|
except ConnectionResetError:
|
||||||
|
# This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
|
||||||
|
# succeeds. Try again to properly raise the FailedToStartError
|
||||||
|
pass
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno == errno.ETIMEDOUT:
|
||||||
|
pass # Treat identical to ConnectionResetError
|
||||||
|
elif e.errno == errno.ECONNREFUSED:
|
||||||
|
pass # Port not yet open?
|
||||||
|
else:
|
||||||
|
raise # unknown OS error
|
||||||
|
except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
|
||||||
|
if "No RPC credentials" not in str(e):
|
||||||
|
raise
|
||||||
|
time.sleep(1.0 / poll_per_s)
|
||||||
|
self._raise_assertion_error("Unable to connect to bitcoind after {}s".format(self.rpc_timeout))
|
||||||
|
|
||||||
|
def wait_for_cookie_credentials(self):
|
||||||
|
"""Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up."""
|
||||||
|
self.log.debug("Waiting for cookie credentials")
|
||||||
|
# Poll at a rate of four times per second.
|
||||||
|
poll_per_s = 4
|
||||||
|
for _ in range(poll_per_s * self.rpc_timeout):
|
||||||
|
try:
|
||||||
|
get_auth_cookie(self.datadir, self.chain)
|
||||||
|
self.log.debug("Cookie credentials successfully retrieved")
|
||||||
|
return
|
||||||
|
except ValueError: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
|
||||||
|
pass # so we continue polling until RPC credentials are retrieved
|
||||||
|
time.sleep(1.0 / poll_per_s)
|
||||||
|
self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout))
|
||||||
|
|
||||||
|
def generate(self, nblocks, maxtries=1000000, **kwargs):
|
||||||
|
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
|
||||||
|
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs)
|
||||||
|
|
||||||
|
def generateblock(self, *args, invalid_call, **kwargs):
|
||||||
|
assert not invalid_call
|
||||||
|
return self.__getattr__('generateblock')(*args, **kwargs)
|
||||||
|
|
||||||
|
def generatetoaddress(self, *args, invalid_call, **kwargs):
|
||||||
|
assert not invalid_call
|
||||||
|
return self.__getattr__('generatetoaddress')(*args, **kwargs)
|
||||||
|
|
||||||
|
def generatetodescriptor(self, *args, invalid_call, **kwargs):
|
||||||
|
assert not invalid_call
|
||||||
|
return self.__getattr__('generatetodescriptor')(*args, **kwargs)
|
||||||
|
|
||||||
|
def get_wallet_rpc(self, wallet_name):
|
||||||
|
if self.use_cli:
|
||||||
|
return RPCOverloadWrapper(self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors)
|
||||||
|
else:
|
||||||
|
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
|
||||||
|
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
|
||||||
|
return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors)
|
||||||
|
|
||||||
|
def version_is_at_least(self, ver):
|
||||||
|
return self.version is None or self.version >= ver
|
||||||
|
|
||||||
|
def stop_node(self, expected_stderr='', *, wait=0, wait_until_stopped=True):
|
||||||
|
"""Stop the node."""
|
||||||
|
if not self.running:
|
||||||
|
return
|
||||||
|
self.log.debug("Stopping node")
|
||||||
|
try:
|
||||||
|
# Do not use wait argument when testing older nodes, e.g. in feature_backwards_compatibility.py
|
||||||
|
if self.version_is_at_least(180000):
|
||||||
|
self.stop(wait=wait)
|
||||||
|
else:
|
||||||
|
self.stop()
|
||||||
|
except http.client.CannotSendRequest:
|
||||||
|
self.log.exception("Unable to stop node.")
|
||||||
|
|
||||||
|
# If there are any running perf processes, stop them.
|
||||||
|
for profile_name in tuple(self.perf_subprocesses.keys()):
|
||||||
|
self._stop_perf(profile_name)
|
||||||
|
|
||||||
|
# Check that stderr is as expected
|
||||||
|
self.stderr.seek(0)
|
||||||
|
stderr = self.stderr.read().decode('utf-8').strip()
|
||||||
|
if stderr != expected_stderr:
|
||||||
|
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
|
||||||
|
|
||||||
|
self.stdout.close()
|
||||||
|
self.stderr.close()
|
||||||
|
|
||||||
|
del self.p2ps[:]
|
||||||
|
|
||||||
|
if wait_until_stopped:
|
||||||
|
self.wait_until_stopped()
|
||||||
|
|
||||||
|
def is_node_stopped(self):
|
||||||
|
"""Checks whether the node has stopped.
|
||||||
|
|
||||||
|
Returns True if the node has stopped. False otherwise.
|
||||||
|
This method is responsible for freeing resources (self.process)."""
|
||||||
|
if not self.running:
|
||||||
|
return True
|
||||||
|
return_code = self.process.poll()
|
||||||
|
if return_code is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# process has stopped. Assert that it didn't return an error code.
|
||||||
|
assert return_code == 0, self._node_msg(
|
||||||
|
"Node returned non-zero exit code (%d) when stopping" % return_code)
|
||||||
|
self.running = False
|
||||||
|
self.process = None
|
||||||
|
self.rpc_connected = False
|
||||||
|
self.rpc = None
|
||||||
|
self.log.debug("Node stopped")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
|
||||||
|
wait_until_helper(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def chain_path(self) -> Path:
|
||||||
|
return Path(self.datadir) / self.chain
|
||||||
|
|
||||||
|
@property
|
||||||
|
def debug_log_path(self) -> Path:
|
||||||
|
return self.chain_path / 'debug.log'
|
||||||
|
|
||||||
|
def debug_log_bytes(self) -> int:
|
||||||
|
with open(self.debug_log_path, encoding='utf-8') as dl:
|
||||||
|
dl.seek(0, 2)
|
||||||
|
return dl.tell()
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
|
||||||
|
if unexpected_msgs is None:
|
||||||
|
unexpected_msgs = []
|
||||||
|
time_end = time.time() + timeout * self.timeout_factor
|
||||||
|
prev_size = self.debug_log_bytes()
|
||||||
|
|
||||||
|
yield
|
||||||
|
|
||||||
|
while True:
|
||||||
|
found = True
|
||||||
|
with open(self.debug_log_path, encoding='utf-8') as dl:
|
||||||
|
dl.seek(prev_size)
|
||||||
|
log = dl.read()
|
||||||
|
print_log = " - " + "\n - ".join(log.splitlines())
|
||||||
|
for unexpected_msg in unexpected_msgs:
|
||||||
|
if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE):
|
||||||
|
self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log))
|
||||||
|
for expected_msg in expected_msgs:
|
||||||
|
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
|
||||||
|
found = False
|
||||||
|
if found:
|
||||||
|
return
|
||||||
|
if time.time() >= time_end:
|
||||||
|
break
|
||||||
|
time.sleep(0.05)
|
||||||
|
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def wait_for_debug_log(self, expected_msgs, timeout=60, ignore_case=False):
|
||||||
|
"""
|
||||||
|
Block until we see a particular debug log message fragment or until we exceed the timeout.
|
||||||
|
Return:
|
||||||
|
the number of log lines we encountered when matching
|
||||||
|
"""
|
||||||
|
time_end = time.time() + timeout * self.timeout_factor
|
||||||
|
prev_size = self.debug_log_bytes()
|
||||||
|
re_flags = re.MULTILINE | (re.IGNORECASE if ignore_case else 0)
|
||||||
|
|
||||||
|
yield
|
||||||
|
|
||||||
|
while True:
|
||||||
|
found = True
|
||||||
|
with open(self.debug_log_path, encoding='utf-8') as dl:
|
||||||
|
dl.seek(prev_size)
|
||||||
|
log = dl.read()
|
||||||
|
|
||||||
|
for expected_msg in expected_msgs:
|
||||||
|
if re.search(re.escape(expected_msg), log, flags=re_flags) is None:
|
||||||
|
found = False
|
||||||
|
|
||||||
|
if found:
|
||||||
|
return
|
||||||
|
|
||||||
|
if time.time() >= time_end:
|
||||||
|
print_log = " - " + "\n - ".join(log.splitlines())
|
||||||
|
break
|
||||||
|
|
||||||
|
# No sleep here because we want to detect the message fragment as fast as
|
||||||
|
# possible.
|
||||||
|
|
||||||
|
self._raise_assertion_error(
|
||||||
|
'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(
|
||||||
|
str(expected_msgs), print_log))
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def profile_with_perf(self, profile_name: str):
|
||||||
|
"""
|
||||||
|
Context manager that allows easy profiling of node activity using `perf`.
|
||||||
|
|
||||||
|
See `test/functional/README.md` for details on perf usage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
profile_name: This string will be appended to the
|
||||||
|
profile data filename generated by perf.
|
||||||
|
"""
|
||||||
|
subp = self._start_perf(profile_name)
|
||||||
|
|
||||||
|
yield
|
||||||
|
|
||||||
|
if subp:
|
||||||
|
self._stop_perf(profile_name)
|
||||||
|
|
||||||
|
def _start_perf(self, profile_name=None):
|
||||||
|
"""Start a perf process to profile this node.
|
||||||
|
|
||||||
|
Returns the subprocess running perf."""
|
||||||
|
subp = None
|
||||||
|
|
||||||
|
def test_success(cmd):
|
||||||
|
return subprocess.call(
|
||||||
|
# shell=True required for pipe use below
|
||||||
|
cmd, shell=True,
|
||||||
|
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
|
||||||
|
|
||||||
|
if not sys.platform.startswith('linux'):
|
||||||
|
self.log.warning("Can't profile with perf; only available on Linux platforms")
|
||||||
|
return None
|
||||||
|
|
||||||
|
if not test_success('which perf'):
|
||||||
|
self.log.warning("Can't profile with perf; must install perf-tools")
|
||||||
|
return None
|
||||||
|
|
||||||
|
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
|
||||||
|
self.log.warning(
|
||||||
|
"perf output won't be very useful without debug symbols compiled into bitcoind")
|
||||||
|
|
||||||
|
output_path = tempfile.NamedTemporaryFile(
|
||||||
|
dir=self.datadir,
|
||||||
|
prefix="{}.perf.data.".format(profile_name or 'test'),
|
||||||
|
delete=False,
|
||||||
|
).name
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
'perf', 'record',
|
||||||
|
'-g', # Record the callgraph.
|
||||||
|
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
|
||||||
|
'-F', '101', # Sampling frequency in Hz.
|
||||||
|
'-p', str(self.process.pid),
|
||||||
|
'-o', output_path,
|
||||||
|
]
|
||||||
|
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
self.perf_subprocesses[profile_name] = subp
|
||||||
|
|
||||||
|
return subp
|
||||||
|
|
||||||
|
def _stop_perf(self, profile_name):
|
||||||
|
"""Stop (and pop) a perf subprocess."""
|
||||||
|
subp = self.perf_subprocesses.pop(profile_name)
|
||||||
|
output_path = subp.args[subp.args.index('-o') + 1]
|
||||||
|
|
||||||
|
subp.terminate()
|
||||||
|
subp.wait(timeout=10)
|
||||||
|
|
||||||
|
stderr = subp.stderr.read().decode()
|
||||||
|
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
|
||||||
|
self.log.warning(
|
||||||
|
"perf couldn't collect data! Try "
|
||||||
|
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
|
||||||
|
else:
|
||||||
|
report_cmd = "perf report -i {}".format(output_path)
|
||||||
|
self.log.info("See perf output by running '{}'".format(report_cmd))
|
||||||
|
|
||||||
|
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
|
||||||
|
"""Attempt to start the node and expect it to raise an error.
|
||||||
|
|
||||||
|
extra_args: extra arguments to pass through to bitcoind
|
||||||
|
expected_msg: regex that stderr should match when bitcoind fails
|
||||||
|
|
||||||
|
Will throw if bitcoind starts without an error.
|
||||||
|
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
|
||||||
|
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
|
||||||
|
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
|
||||||
|
try:
|
||||||
|
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
|
||||||
|
ret = self.process.wait(timeout=self.rpc_timeout)
|
||||||
|
self.log.debug(self._node_msg(f'bitcoind exited with status {ret} during initialization'))
|
||||||
|
assert ret != 0 # Exit code must indicate failure
|
||||||
|
self.running = False
|
||||||
|
self.process = None
|
||||||
|
# Check stderr for expected message
|
||||||
|
if expected_msg is not None:
|
||||||
|
log_stderr.seek(0)
|
||||||
|
stderr = log_stderr.read().decode('utf-8').strip()
|
||||||
|
if match == ErrorMatch.PARTIAL_REGEX:
|
||||||
|
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
|
||||||
|
self._raise_assertion_error(
|
||||||
|
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
|
||||||
|
elif match == ErrorMatch.FULL_REGEX:
|
||||||
|
if re.fullmatch(expected_msg, stderr) is None:
|
||||||
|
self._raise_assertion_error(
|
||||||
|
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
|
||||||
|
elif match == ErrorMatch.FULL_TEXT:
|
||||||
|
if expected_msg != stderr:
|
||||||
|
self._raise_assertion_error(
|
||||||
|
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
self.process.kill()
|
||||||
|
self.running = False
|
||||||
|
self.process = None
|
||||||
|
assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s '
|
||||||
|
if expected_msg is None:
|
||||||
|
assert_msg += "with an error"
|
||||||
|
else:
|
||||||
|
assert_msg += "with expected error " + expected_msg
|
||||||
|
self._raise_assertion_error(assert_msg)
|
||||||
|
|
||||||
|
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
|
||||||
|
"""Add an inbound p2p connection to the node.
|
||||||
|
|
||||||
|
This method adds the p2p connection to the self.p2ps list and also
|
||||||
|
returns the connection to the caller."""
|
||||||
|
if 'dstport' not in kwargs:
|
||||||
|
kwargs['dstport'] = p2p_port(self.index)
|
||||||
|
if 'dstaddr' not in kwargs:
|
||||||
|
kwargs['dstaddr'] = '127.0.0.1'
|
||||||
|
|
||||||
|
p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)()
|
||||||
|
self.p2ps.append(p2p_conn)
|
||||||
|
p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False)
|
||||||
|
if wait_for_verack:
|
||||||
|
# Wait for the node to send us the version and verack
|
||||||
|
p2p_conn.wait_for_verack()
|
||||||
|
# At this point we have sent our version message and received the version and verack, however the full node
|
||||||
|
# has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
|
||||||
|
# established (fSuccessfullyConnected).
|
||||||
|
#
|
||||||
|
# This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
|
||||||
|
# message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
|
||||||
|
# transaction that will be added to the mempool as soon as we return here.
|
||||||
|
#
|
||||||
|
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
|
||||||
|
# in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely.
|
||||||
|
p2p_conn.sync_with_ping()
|
||||||
|
|
||||||
|
# Consistency check that the Bitcoin Core has received our user agent string. This checks the
|
||||||
|
# node's newest peer. It could be racy if another Bitcoin Core node has connected since we opened
|
||||||
|
# our connection, but we don't expect that to happen.
|
||||||
|
assert_equal(self.getpeerinfo()[-1]['subver'], P2P_SUBVERSION)
|
||||||
|
|
||||||
|
return p2p_conn
|
||||||
|
|
||||||
|
def add_outbound_p2p_connection(self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs):
|
||||||
|
"""Add an outbound p2p connection from node. Must be an
|
||||||
|
"outbound-full-relay", "block-relay-only", "addr-fetch" or "feeler" connection.
|
||||||
|
|
||||||
|
This method adds the p2p connection to the self.p2ps list and returns
|
||||||
|
the connection to the caller.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def addconnection_callback(address, port):
|
||||||
|
self.log.debug("Connecting to %s:%d %s" % (address, port, connection_type))
|
||||||
|
self.addconnection('%s:%d' % (address, port), connection_type)
|
||||||
|
|
||||||
|
p2p_conn.peer_accept_connection(connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, **kwargs)()
|
||||||
|
|
||||||
|
if connection_type == "feeler":
|
||||||
|
# feeler connections are closed as soon as the node receives a `version` message
|
||||||
|
p2p_conn.wait_until(lambda: p2p_conn.message_count["version"] == 1, check_connected=False)
|
||||||
|
p2p_conn.wait_until(lambda: not p2p_conn.is_connected, check_connected=False)
|
||||||
|
else:
|
||||||
|
p2p_conn.wait_for_connect()
|
||||||
|
self.p2ps.append(p2p_conn)
|
||||||
|
|
||||||
|
p2p_conn.wait_for_verack()
|
||||||
|
p2p_conn.sync_with_ping()
|
||||||
|
|
||||||
|
return p2p_conn
|
||||||
|
|
||||||
|
def num_test_p2p_connections(self):
|
||||||
|
"""Return number of test framework p2p connections to the node."""
|
||||||
|
return len([peer for peer in self.getpeerinfo() if peer['subver'] == P2P_SUBVERSION])
|
||||||
|
|
||||||
|
def disconnect_p2ps(self):
|
||||||
|
"""Close all p2p connections to the node."""
|
||||||
|
for p in self.p2ps:
|
||||||
|
p.peer_disconnect()
|
||||||
|
del self.p2ps[:]
|
||||||
|
|
||||||
|
wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor)
|
||||||
|
|
||||||
|
|
||||||
|
class TestNodeCLIAttr:
|
||||||
|
def __init__(self, cli, command):
|
||||||
|
self.cli = cli
|
||||||
|
self.command = command
|
||||||
|
|
||||||
|
def __call__(self, *args, **kwargs):
|
||||||
|
return self.cli.send_cli(self.command, *args, **kwargs)
|
||||||
|
|
||||||
|
def get_request(self, *args, **kwargs):
|
||||||
|
return lambda: self(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def arg_to_cli(arg):
|
||||||
|
if isinstance(arg, bool):
|
||||||
|
return str(arg).lower()
|
||||||
|
elif arg is None:
|
||||||
|
return 'null'
|
||||||
|
elif isinstance(arg, dict) or isinstance(arg, list):
|
||||||
|
return json.dumps(arg, default=EncodeDecimal)
|
||||||
|
else:
|
||||||
|
return str(arg)
|
||||||
|
|
||||||
|
|
||||||
|
class TestNodeCLI():
|
||||||
|
"""Interface to bitcoin-cli for an individual node"""
|
||||||
|
def __init__(self, binary, datadir):
|
||||||
|
self.options = []
|
||||||
|
self.binary = binary
|
||||||
|
self.datadir = datadir
|
||||||
|
self.input = None
|
||||||
|
self.log = logging.getLogger('TestFramework.bitcoincli')
|
||||||
|
|
||||||
|
def __call__(self, *options, input=None):
|
||||||
|
# TestNodeCLI is callable with bitcoin-cli command-line options
|
||||||
|
cli = TestNodeCLI(self.binary, self.datadir)
|
||||||
|
cli.options = [str(o) for o in options]
|
||||||
|
cli.input = input
|
||||||
|
return cli
|
||||||
|
|
||||||
|
def __getattr__(self, command):
|
||||||
|
return TestNodeCLIAttr(self, command)
|
||||||
|
|
||||||
|
def batch(self, requests):
|
||||||
|
results = []
|
||||||
|
for request in requests:
|
||||||
|
try:
|
||||||
|
results.append(dict(result=request()))
|
||||||
|
except JSONRPCException as e:
|
||||||
|
results.append(dict(error=e))
|
||||||
|
return results
|
||||||
|
|
||||||
|
def send_cli(self, command=None, *args, **kwargs):
|
||||||
|
"""Run bitcoin-cli command. Deserializes returned string as python object."""
|
||||||
|
pos_args = [arg_to_cli(arg) for arg in args]
|
||||||
|
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
|
||||||
|
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
|
||||||
|
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
|
||||||
|
if named_args:
|
||||||
|
p_args += ["-named"]
|
||||||
|
if command is not None:
|
||||||
|
p_args += [command]
|
||||||
|
p_args += pos_args + named_args
|
||||||
|
self.log.debug("Running bitcoin-cli {}".format(p_args[2:]))
|
||||||
|
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
|
||||||
|
cli_stdout, cli_stderr = process.communicate(input=self.input)
|
||||||
|
returncode = process.poll()
|
||||||
|
if returncode:
|
||||||
|
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
|
||||||
|
if match:
|
||||||
|
code, message = match.groups()
|
||||||
|
raise JSONRPCException(dict(code=int(code), message=message))
|
||||||
|
# Ignore cli_stdout, raise with cli_stderr
|
||||||
|
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
|
||||||
|
try:
|
||||||
|
return json.loads(cli_stdout, parse_float=decimal.Decimal)
|
||||||
|
except (json.JSONDecodeError, decimal.InvalidOperation):
|
||||||
|
return cli_stdout.rstrip("\n")
|
||||||
|
|
||||||
|
class RPCOverloadWrapper():
|
||||||
|
def __init__(self, rpc, cli=False, descriptors=False):
|
||||||
|
self.rpc = rpc
|
||||||
|
self.is_cli = cli
|
||||||
|
self.descriptors = descriptors
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return getattr(self.rpc, name)
|
||||||
|
|
||||||
|
def createwallet_passthrough(self, *args, **kwargs):
|
||||||
|
return self.__getattr__("createwallet")(*args, **kwargs)
|
||||||
|
|
||||||
|
def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None, external_signer=None):
|
||||||
|
if descriptors is None:
|
||||||
|
descriptors = self.descriptors
|
||||||
|
return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup, external_signer)
|
||||||
|
|
||||||
|
def importprivkey(self, privkey, label=None, rescan=None):
|
||||||
|
wallet_info = self.getwalletinfo()
|
||||||
|
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
|
||||||
|
return self.__getattr__('importprivkey')(privkey, label, rescan)
|
||||||
|
desc = descsum_create('combo(' + privkey + ')')
|
||||||
|
req = [{
|
||||||
|
'desc': desc,
|
||||||
|
'timestamp': 0 if rescan else 'now',
|
||||||
|
'label': label if label else ''
|
||||||
|
}]
|
||||||
|
import_res = self.importdescriptors(req)
|
||||||
|
if not import_res[0]['success']:
|
||||||
|
raise JSONRPCException(import_res[0]['error'])
|
||||||
|
|
||||||
|
def addmultisigaddress(self, nrequired, keys, label=None, address_type=None):
|
||||||
|
wallet_info = self.getwalletinfo()
|
||||||
|
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
|
||||||
|
return self.__getattr__('addmultisigaddress')(nrequired, keys, label, address_type)
|
||||||
|
cms = self.createmultisig(nrequired, keys, address_type)
|
||||||
|
req = [{
|
||||||
|
'desc': cms['descriptor'],
|
||||||
|
'timestamp': 0,
|
||||||
|
'label': label if label else ''
|
||||||
|
}]
|
||||||
|
import_res = self.importdescriptors(req)
|
||||||
|
if not import_res[0]['success']:
|
||||||
|
raise JSONRPCException(import_res[0]['error'])
|
||||||
|
return cms
|
||||||
|
|
||||||
|
def importpubkey(self, pubkey, label=None, rescan=None):
|
||||||
|
wallet_info = self.getwalletinfo()
|
||||||
|
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
|
||||||
|
return self.__getattr__('importpubkey')(pubkey, label, rescan)
|
||||||
|
desc = descsum_create('combo(' + pubkey + ')')
|
||||||
|
req = [{
|
||||||
|
'desc': desc,
|
||||||
|
'timestamp': 0 if rescan else 'now',
|
||||||
|
'label': label if label else ''
|
||||||
|
}]
|
||||||
|
import_res = self.importdescriptors(req)
|
||||||
|
if not import_res[0]['success']:
|
||||||
|
raise JSONRPCException(import_res[0]['error'])
|
||||||
|
|
||||||
|
def importaddress(self, address, label=None, rescan=None, p2sh=None):
|
||||||
|
wallet_info = self.getwalletinfo()
|
||||||
|
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
|
||||||
|
return self.__getattr__('importaddress')(address, label, rescan, p2sh)
|
||||||
|
is_hex = False
|
||||||
|
try:
|
||||||
|
int(address ,16)
|
||||||
|
is_hex = True
|
||||||
|
desc = descsum_create('raw(' + address + ')')
|
||||||
|
except:
|
||||||
|
desc = descsum_create('addr(' + address + ')')
|
||||||
|
reqs = [{
|
||||||
|
'desc': desc,
|
||||||
|
'timestamp': 0 if rescan else 'now',
|
||||||
|
'label': label if label else ''
|
||||||
|
}]
|
||||||
|
if is_hex and p2sh:
|
||||||
|
reqs.append({
|
||||||
|
'desc': descsum_create('p2sh(raw(' + address + '))'),
|
||||||
|
'timestamp': 0 if rescan else 'now',
|
||||||
|
'label': label if label else ''
|
||||||
|
})
|
||||||
|
import_res = self.importdescriptors(reqs)
|
||||||
|
for res in import_res:
|
||||||
|
if not res['success']:
|
||||||
|
raise JSONRPCException(res['error'])
|
||||||
75
miner_imports/test_framework/test_shell.py
Normal file
75
miner_imports/test_framework/test_shell.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2019 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
|
||||||
|
from test_framework.test_framework import BitcoinTestFramework
|
||||||
|
|
||||||
|
class TestShell:
|
||||||
|
"""Wrapper Class for BitcoinTestFramework.
|
||||||
|
|
||||||
|
The TestShell class extends the BitcoinTestFramework
|
||||||
|
rpc & daemon process management functionality to external
|
||||||
|
python environments.
|
||||||
|
|
||||||
|
It is a singleton class, which ensures that users only
|
||||||
|
start a single TestShell at a time."""
|
||||||
|
|
||||||
|
class __TestShell(BitcoinTestFramework):
|
||||||
|
def set_test_params(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run_test(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def setup(self, **kwargs):
|
||||||
|
if self.running:
|
||||||
|
print("TestShell is already running!")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Num_nodes parameter must be set
|
||||||
|
# by BitcoinTestFramework child class.
|
||||||
|
self.num_nodes = 1
|
||||||
|
|
||||||
|
# User parameters override default values.
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
if hasattr(self, key):
|
||||||
|
setattr(self, key, value)
|
||||||
|
elif hasattr(self.options, key):
|
||||||
|
setattr(self.options, key, value)
|
||||||
|
else:
|
||||||
|
raise KeyError(key + " not a valid parameter key!")
|
||||||
|
|
||||||
|
super().setup()
|
||||||
|
self.running = True
|
||||||
|
return self
|
||||||
|
|
||||||
|
def shutdown(self):
|
||||||
|
if not self.running:
|
||||||
|
print("TestShell is not running!")
|
||||||
|
else:
|
||||||
|
super().shutdown()
|
||||||
|
self.running = False
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
if self.running:
|
||||||
|
print("Shutdown TestShell before resetting!")
|
||||||
|
else:
|
||||||
|
self.num_nodes = None
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
instance = None
|
||||||
|
|
||||||
|
def __new__(cls):
|
||||||
|
# This implementation enforces singleton pattern, and will return the
|
||||||
|
# previously initialized instance if available
|
||||||
|
if not TestShell.instance:
|
||||||
|
TestShell.instance = TestShell.__TestShell()
|
||||||
|
TestShell.instance.running = False
|
||||||
|
return TestShell.instance
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return getattr(self.instance, name)
|
||||||
|
|
||||||
|
def __setattr__(self, name, value):
|
||||||
|
return setattr(self.instance, name, value)
|
||||||
629
miner_imports/test_framework/util.py
Normal file
629
miner_imports/test_framework/util.py
Normal file
@@ -0,0 +1,629 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2014-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Helpful routines for regression testing."""
|
||||||
|
|
||||||
|
from base64 import b64encode
|
||||||
|
from decimal import Decimal, ROUND_DOWN
|
||||||
|
from subprocess import CalledProcessError
|
||||||
|
import hashlib
|
||||||
|
import inspect
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from . import coverage
|
||||||
|
from .authproxy import AuthServiceProxy, JSONRPCException
|
||||||
|
from typing import Callable, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger("TestFramework.utils")
|
||||||
|
|
||||||
|
# Assert functions
|
||||||
|
##################
|
||||||
|
|
||||||
|
|
||||||
|
def assert_approx(v, vexp, vspan=0.00001):
|
||||||
|
"""Assert that `v` is within `vspan` of `vexp`"""
|
||||||
|
if v < vexp - vspan:
|
||||||
|
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
|
||||||
|
if v > vexp + vspan:
|
||||||
|
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
|
||||||
|
|
||||||
|
|
||||||
|
def assert_fee_amount(fee, tx_size, feerate_BTC_kvB):
|
||||||
|
"""Assert the fee is in range."""
|
||||||
|
assert isinstance(tx_size, int)
|
||||||
|
target_fee = get_fee(tx_size, feerate_BTC_kvB)
|
||||||
|
if fee < target_fee:
|
||||||
|
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
|
||||||
|
# allow the wallet's estimation to be at most 2 bytes off
|
||||||
|
high_fee = get_fee(tx_size + 2, feerate_BTC_kvB)
|
||||||
|
if fee > high_fee:
|
||||||
|
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
|
||||||
|
|
||||||
|
|
||||||
|
def assert_equal(thing1, thing2, *args):
|
||||||
|
if thing1 != thing2 or any(thing1 != arg for arg in args):
|
||||||
|
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
|
||||||
|
|
||||||
|
|
||||||
|
def assert_greater_than(thing1, thing2):
|
||||||
|
if thing1 <= thing2:
|
||||||
|
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
|
||||||
|
|
||||||
|
|
||||||
|
def assert_greater_than_or_equal(thing1, thing2):
|
||||||
|
if thing1 < thing2:
|
||||||
|
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
|
||||||
|
|
||||||
|
|
||||||
|
def assert_raises(exc, fun, *args, **kwds):
|
||||||
|
assert_raises_message(exc, None, fun, *args, **kwds)
|
||||||
|
|
||||||
|
|
||||||
|
def assert_raises_message(exc, message, fun, *args, **kwds):
|
||||||
|
try:
|
||||||
|
fun(*args, **kwds)
|
||||||
|
except JSONRPCException:
|
||||||
|
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
|
||||||
|
except exc as e:
|
||||||
|
if message is not None and message not in e.error['message']:
|
||||||
|
raise AssertionError(
|
||||||
|
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
|
||||||
|
message, e.error['message']))
|
||||||
|
except Exception as e:
|
||||||
|
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
|
||||||
|
else:
|
||||||
|
raise AssertionError("No exception raised")
|
||||||
|
|
||||||
|
|
||||||
|
def assert_raises_process_error(returncode: int, output: str, fun: Callable, *args, **kwds):
|
||||||
|
"""Execute a process and asserts the process return code and output.
|
||||||
|
|
||||||
|
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
|
||||||
|
and verifies that the return code and output are as expected. Throws AssertionError if
|
||||||
|
no CalledProcessError was raised or if the return code and output are not as expected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
returncode: the process return code.
|
||||||
|
output: [a substring of] the process output.
|
||||||
|
fun: the function to call. This should execute a process.
|
||||||
|
args*: positional arguments for the function.
|
||||||
|
kwds**: named arguments for the function.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
fun(*args, **kwds)
|
||||||
|
except CalledProcessError as e:
|
||||||
|
if returncode != e.returncode:
|
||||||
|
raise AssertionError("Unexpected returncode %i" % e.returncode)
|
||||||
|
if output not in e.output:
|
||||||
|
raise AssertionError("Expected substring not found:" + e.output)
|
||||||
|
else:
|
||||||
|
raise AssertionError("No exception raised")
|
||||||
|
|
||||||
|
|
||||||
|
def assert_raises_rpc_error(code: Optional[int], message: Optional[str], fun: Callable, *args, **kwds):
|
||||||
|
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
|
||||||
|
|
||||||
|
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
|
||||||
|
and verifies that the error code and message are as expected. Throws AssertionError if
|
||||||
|
no JSONRPCException was raised or if the error code/message are not as expected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
code: the error code returned by the RPC call (defined in src/rpc/protocol.h).
|
||||||
|
Set to None if checking the error code is not required.
|
||||||
|
message: [a substring of] the error string returned by the RPC call.
|
||||||
|
Set to None if checking the error string is not required.
|
||||||
|
fun: the function to call. This should be the name of an RPC.
|
||||||
|
args*: positional arguments for the function.
|
||||||
|
kwds**: named arguments for the function.
|
||||||
|
"""
|
||||||
|
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
|
||||||
|
|
||||||
|
|
||||||
|
def try_rpc(code, message, fun, *args, **kwds):
|
||||||
|
"""Tries to run an rpc command.
|
||||||
|
|
||||||
|
Test against error code and message if the rpc fails.
|
||||||
|
Returns whether a JSONRPCException was raised."""
|
||||||
|
try:
|
||||||
|
fun(*args, **kwds)
|
||||||
|
except JSONRPCException as e:
|
||||||
|
# JSONRPCException was thrown as expected. Check the code and message values are correct.
|
||||||
|
if (code is not None) and (code != e.error["code"]):
|
||||||
|
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
|
||||||
|
if (message is not None) and (message not in e.error['message']):
|
||||||
|
raise AssertionError(
|
||||||
|
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
|
||||||
|
message, e.error['message']))
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def assert_is_hex_string(string):
|
||||||
|
try:
|
||||||
|
int(string, 16)
|
||||||
|
except Exception as e:
|
||||||
|
raise AssertionError("Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
|
||||||
|
|
||||||
|
|
||||||
|
def assert_is_hash_string(string, length=64):
|
||||||
|
if not isinstance(string, str):
|
||||||
|
raise AssertionError("Expected a string, got type %r" % type(string))
|
||||||
|
elif length and len(string) != length:
|
||||||
|
raise AssertionError("String of length %d expected; got %d" % (length, len(string)))
|
||||||
|
elif not re.match('[abcdef0-9]+$', string):
|
||||||
|
raise AssertionError("String %r contains invalid characters for a hash." % string)
|
||||||
|
|
||||||
|
|
||||||
|
def assert_array_result(object_array, to_match, expected, should_not_find=False):
|
||||||
|
"""
|
||||||
|
Pass in array of JSON objects, a dictionary with key/value pairs
|
||||||
|
to match against, and another dictionary with expected key/value
|
||||||
|
pairs.
|
||||||
|
If the should_not_find flag is true, to_match should not be found
|
||||||
|
in object_array
|
||||||
|
"""
|
||||||
|
if should_not_find:
|
||||||
|
assert_equal(expected, {})
|
||||||
|
num_matched = 0
|
||||||
|
for item in object_array:
|
||||||
|
all_match = True
|
||||||
|
for key, value in to_match.items():
|
||||||
|
if item[key] != value:
|
||||||
|
all_match = False
|
||||||
|
if not all_match:
|
||||||
|
continue
|
||||||
|
elif should_not_find:
|
||||||
|
num_matched = num_matched + 1
|
||||||
|
for key, value in expected.items():
|
||||||
|
if item[key] != value:
|
||||||
|
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
|
||||||
|
num_matched = num_matched + 1
|
||||||
|
if num_matched == 0 and not should_not_find:
|
||||||
|
raise AssertionError("No objects matched %s" % (str(to_match)))
|
||||||
|
if num_matched > 0 and should_not_find:
|
||||||
|
raise AssertionError("Objects were found %s" % (str(to_match)))
|
||||||
|
|
||||||
|
|
||||||
|
# Utility functions
|
||||||
|
###################
|
||||||
|
|
||||||
|
|
||||||
|
def check_json_precision():
|
||||||
|
"""Make sure json library being used does not lose precision converting BTC values"""
|
||||||
|
n = Decimal("20000000.00000003")
|
||||||
|
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
|
||||||
|
if satoshis != 2000000000000003:
|
||||||
|
raise RuntimeError("JSON encode/decode loses precision")
|
||||||
|
|
||||||
|
|
||||||
|
def EncodeDecimal(o):
|
||||||
|
if isinstance(o, Decimal):
|
||||||
|
return str(o)
|
||||||
|
raise TypeError(repr(o) + " is not JSON serializable")
|
||||||
|
|
||||||
|
|
||||||
|
def count_bytes(hex_string):
|
||||||
|
return len(bytearray.fromhex(hex_string))
|
||||||
|
|
||||||
|
|
||||||
|
def str_to_b64str(string):
|
||||||
|
return b64encode(string.encode('utf-8')).decode('ascii')
|
||||||
|
|
||||||
|
|
||||||
|
def ceildiv(a, b):
|
||||||
|
"""
|
||||||
|
Divide 2 ints and round up to next int rather than round down
|
||||||
|
Implementation requires python integers, which have a // operator that does floor division.
|
||||||
|
Other types like decimal.Decimal whose // operator truncates towards 0 will not work.
|
||||||
|
"""
|
||||||
|
assert isinstance(a, int)
|
||||||
|
assert isinstance(b, int)
|
||||||
|
return -(-a // b)
|
||||||
|
|
||||||
|
|
||||||
|
def get_fee(tx_size, feerate_btc_kvb):
|
||||||
|
"""Calculate the fee in BTC given a feerate is BTC/kvB. Reflects CFeeRate::GetFee"""
|
||||||
|
feerate_sat_kvb = int(feerate_btc_kvb * Decimal(1e8)) # Fee in sat/kvb as an int to avoid float precision errors
|
||||||
|
target_fee_sat = ceildiv(feerate_sat_kvb * tx_size, 1000) # Round calculated fee up to nearest sat
|
||||||
|
return target_fee_sat / Decimal(1e8) # Return result in BTC
|
||||||
|
|
||||||
|
|
||||||
|
def satoshi_round(amount):
|
||||||
|
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
|
||||||
|
|
||||||
|
|
||||||
|
def wait_until_helper(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0):
|
||||||
|
"""Sleep until the predicate resolves to be True.
|
||||||
|
|
||||||
|
Warning: Note that this method is not recommended to be used in tests as it is
|
||||||
|
not aware of the context of the test framework. Using the `wait_until()` members
|
||||||
|
from `BitcoinTestFramework` or `P2PInterface` class ensures the timeout is
|
||||||
|
properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in
|
||||||
|
`p2p.py` has a preset lock.
|
||||||
|
"""
|
||||||
|
if attempts == float('inf') and timeout == float('inf'):
|
||||||
|
timeout = 60
|
||||||
|
timeout = timeout * timeout_factor
|
||||||
|
attempt = 0
|
||||||
|
time_end = time.time() + timeout
|
||||||
|
|
||||||
|
while attempt < attempts and time.time() < time_end:
|
||||||
|
if lock:
|
||||||
|
with lock:
|
||||||
|
if predicate():
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
if predicate():
|
||||||
|
return
|
||||||
|
attempt += 1
|
||||||
|
time.sleep(0.05)
|
||||||
|
|
||||||
|
# Print the cause of the timeout
|
||||||
|
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
|
||||||
|
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
|
||||||
|
if attempt >= attempts:
|
||||||
|
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
|
||||||
|
elif time.time() >= time_end:
|
||||||
|
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
|
||||||
|
raise RuntimeError('Unreachable')
|
||||||
|
|
||||||
|
|
||||||
|
def sha256sum_file(filename):
|
||||||
|
h = hashlib.sha256()
|
||||||
|
with open(filename, 'rb') as f:
|
||||||
|
d = f.read(4096)
|
||||||
|
while len(d) > 0:
|
||||||
|
h.update(d)
|
||||||
|
d = f.read(4096)
|
||||||
|
return h.digest()
|
||||||
|
|
||||||
|
# RPC/P2P connection constants and functions
|
||||||
|
############################################
|
||||||
|
|
||||||
|
# The maximum number of nodes a single test can spawn
|
||||||
|
MAX_NODES = 12
|
||||||
|
# Don't assign rpc or p2p ports lower than this
|
||||||
|
PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000))
|
||||||
|
# The number of ports to "reserve" for p2p and rpc, each
|
||||||
|
PORT_RANGE = 5000
|
||||||
|
|
||||||
|
|
||||||
|
class PortSeed:
|
||||||
|
# Must be initialized with a unique integer for each process
|
||||||
|
n = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_rpc_proxy(url: str, node_number: int, *, timeout: int=None, coveragedir: str=None) -> coverage.AuthServiceProxyWrapper:
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
url: URL of the RPC server to call
|
||||||
|
node_number: the node number (or id) that this calls to
|
||||||
|
|
||||||
|
Kwargs:
|
||||||
|
timeout: HTTP timeout in seconds
|
||||||
|
coveragedir: Directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
AuthServiceProxy. convenience object for making RPC calls.
|
||||||
|
|
||||||
|
"""
|
||||||
|
proxy_kwargs = {}
|
||||||
|
if timeout is not None:
|
||||||
|
proxy_kwargs['timeout'] = int(timeout)
|
||||||
|
|
||||||
|
proxy = AuthServiceProxy(url, **proxy_kwargs)
|
||||||
|
|
||||||
|
coverage_logfile = coverage.get_filename(coveragedir, node_number) if coveragedir else None
|
||||||
|
|
||||||
|
return coverage.AuthServiceProxyWrapper(proxy, url, coverage_logfile)
|
||||||
|
|
||||||
|
|
||||||
|
def p2p_port(n):
|
||||||
|
assert n <= MAX_NODES
|
||||||
|
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
|
||||||
|
|
||||||
|
|
||||||
|
def rpc_port(n):
|
||||||
|
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
|
||||||
|
|
||||||
|
|
||||||
|
def rpc_url(datadir, i, chain, rpchost):
|
||||||
|
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
|
||||||
|
host = '127.0.0.1'
|
||||||
|
port = rpc_port(i)
|
||||||
|
if rpchost:
|
||||||
|
parts = rpchost.split(':')
|
||||||
|
if len(parts) == 2:
|
||||||
|
host, port = parts
|
||||||
|
else:
|
||||||
|
host = rpchost
|
||||||
|
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
|
||||||
|
|
||||||
|
|
||||||
|
# Node functions
|
||||||
|
################
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_datadir(dirname, n, chain, disable_autoconnect=True):
|
||||||
|
datadir = get_datadir_path(dirname, n)
|
||||||
|
if not os.path.isdir(datadir):
|
||||||
|
os.makedirs(datadir)
|
||||||
|
write_config(os.path.join(datadir, "bitcoin.conf"), n=n, chain=chain, disable_autoconnect=disable_autoconnect)
|
||||||
|
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
|
||||||
|
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
|
||||||
|
return datadir
|
||||||
|
|
||||||
|
|
||||||
|
def write_config(config_path, *, n, chain, extra_config="", disable_autoconnect=True):
|
||||||
|
# Translate chain subdirectory name to config name
|
||||||
|
if chain == 'testnet3':
|
||||||
|
chain_name_conf_arg = 'testnet'
|
||||||
|
chain_name_conf_section = 'test'
|
||||||
|
else:
|
||||||
|
chain_name_conf_arg = chain
|
||||||
|
chain_name_conf_section = chain
|
||||||
|
with open(config_path, 'w', encoding='utf8') as f:
|
||||||
|
if chain_name_conf_arg:
|
||||||
|
f.write("{}=1\n".format(chain_name_conf_arg))
|
||||||
|
if chain_name_conf_section:
|
||||||
|
f.write("[{}]\n".format(chain_name_conf_section))
|
||||||
|
f.write("port=" + str(p2p_port(n)) + "\n")
|
||||||
|
f.write("rpcport=" + str(rpc_port(n)) + "\n")
|
||||||
|
f.write("fallbackfee=0.0002\n")
|
||||||
|
f.write("server=1\n")
|
||||||
|
f.write("keypool=1\n")
|
||||||
|
f.write("discover=0\n")
|
||||||
|
f.write("dnsseed=0\n")
|
||||||
|
f.write("fixedseeds=0\n")
|
||||||
|
f.write("listenonion=0\n")
|
||||||
|
# Increase peertimeout to avoid disconnects while using mocktime.
|
||||||
|
# peertimeout is measured in mock time, so setting it large enough to
|
||||||
|
# cover any duration in mock time is sufficient. It can be overridden
|
||||||
|
# in tests.
|
||||||
|
f.write("peertimeout=999999999\n")
|
||||||
|
f.write("printtoconsole=0\n")
|
||||||
|
f.write("upnp=0\n")
|
||||||
|
f.write("natpmp=0\n")
|
||||||
|
f.write("shrinkdebugfile=0\n")
|
||||||
|
# To improve SQLite wallet performance so that the tests don't timeout, use -unsafesqlitesync
|
||||||
|
f.write("unsafesqlitesync=1\n")
|
||||||
|
if disable_autoconnect:
|
||||||
|
f.write("connect=0\n")
|
||||||
|
f.write(extra_config)
|
||||||
|
|
||||||
|
|
||||||
|
def get_datadir_path(dirname, n):
|
||||||
|
return os.path.join(dirname, "node" + str(n))
|
||||||
|
|
||||||
|
|
||||||
|
def append_config(datadir, options):
|
||||||
|
with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
|
||||||
|
for option in options:
|
||||||
|
f.write(option + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def get_auth_cookie(datadir, chain):
|
||||||
|
user = None
|
||||||
|
password = None
|
||||||
|
if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
|
||||||
|
with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
|
||||||
|
for line in f:
|
||||||
|
if line.startswith("rpcuser="):
|
||||||
|
assert user is None # Ensure that there is only one rpcuser line
|
||||||
|
user = line.split("=")[1].strip("\n")
|
||||||
|
if line.startswith("rpcpassword="):
|
||||||
|
assert password is None # Ensure that there is only one rpcpassword line
|
||||||
|
password = line.split("=")[1].strip("\n")
|
||||||
|
try:
|
||||||
|
with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
|
||||||
|
userpass = f.read()
|
||||||
|
split_userpass = userpass.split(':')
|
||||||
|
user = split_userpass[0]
|
||||||
|
password = split_userpass[1]
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
if user is None or password is None:
|
||||||
|
raise ValueError("No RPC credentials")
|
||||||
|
return user, password
|
||||||
|
|
||||||
|
|
||||||
|
# If a cookie file exists in the given datadir, delete it.
|
||||||
|
def delete_cookie_file(datadir, chain):
|
||||||
|
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
|
||||||
|
logger.debug("Deleting leftover cookie file")
|
||||||
|
os.remove(os.path.join(datadir, chain, ".cookie"))
|
||||||
|
|
||||||
|
|
||||||
|
def softfork_active(node, key):
|
||||||
|
"""Return whether a softfork is active."""
|
||||||
|
return node.getdeploymentinfo()['deployments'][key]['active']
|
||||||
|
|
||||||
|
|
||||||
|
def set_node_times(nodes, t):
|
||||||
|
for node in nodes:
|
||||||
|
node.setmocktime(t)
|
||||||
|
|
||||||
|
|
||||||
|
def check_node_connections(*, node, num_in, num_out):
|
||||||
|
info = node.getnetworkinfo()
|
||||||
|
assert_equal(info["connections_in"], num_in)
|
||||||
|
assert_equal(info["connections_out"], num_out)
|
||||||
|
|
||||||
|
|
||||||
|
# Transaction/Block functions
|
||||||
|
#############################
|
||||||
|
|
||||||
|
|
||||||
|
def find_output(node, txid, amount, *, blockhash=None):
|
||||||
|
"""
|
||||||
|
Return index to output of txid with value amount
|
||||||
|
Raises exception if there is none.
|
||||||
|
"""
|
||||||
|
txdata = node.getrawtransaction(txid, 1, blockhash)
|
||||||
|
for i in range(len(txdata["vout"])):
|
||||||
|
if txdata["vout"][i]["value"] == amount:
|
||||||
|
return i
|
||||||
|
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
|
||||||
|
|
||||||
|
|
||||||
|
# Helper to create at least "count" utxos
|
||||||
|
# Pass in a fee that is sufficient for relay and mining new transactions.
|
||||||
|
def create_confirmed_utxos(test_framework, fee, node, count, **kwargs):
|
||||||
|
to_generate = int(0.5 * count) + 101
|
||||||
|
while to_generate > 0:
|
||||||
|
test_framework.generate(node, min(25, to_generate), **kwargs)
|
||||||
|
to_generate -= 25
|
||||||
|
utxos = node.listunspent()
|
||||||
|
iterations = count - len(utxos)
|
||||||
|
addr1 = node.getnewaddress()
|
||||||
|
addr2 = node.getnewaddress()
|
||||||
|
if iterations <= 0:
|
||||||
|
return utxos
|
||||||
|
for _ in range(iterations):
|
||||||
|
t = utxos.pop()
|
||||||
|
inputs = []
|
||||||
|
inputs.append({"txid": t["txid"], "vout": t["vout"]})
|
||||||
|
outputs = {}
|
||||||
|
send_value = t['amount'] - fee
|
||||||
|
outputs[addr1] = satoshi_round(send_value / 2)
|
||||||
|
outputs[addr2] = satoshi_round(send_value / 2)
|
||||||
|
raw_tx = node.createrawtransaction(inputs, outputs)
|
||||||
|
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
|
||||||
|
node.sendrawtransaction(signed_tx)
|
||||||
|
|
||||||
|
while (node.getmempoolinfo()['size'] > 0):
|
||||||
|
test_framework.generate(node, 1, **kwargs)
|
||||||
|
|
||||||
|
utxos = node.listunspent()
|
||||||
|
assert len(utxos) >= count
|
||||||
|
return utxos
|
||||||
|
|
||||||
|
|
||||||
|
def chain_transaction(node, parent_txids, vouts, value, fee, num_outputs):
|
||||||
|
"""Build and send a transaction that spends the given inputs (specified
|
||||||
|
by lists of parent_txid:vout each), with the desired total value and fee,
|
||||||
|
equally divided up to the desired number of outputs.
|
||||||
|
|
||||||
|
Returns a tuple with the txid and the amount sent per output.
|
||||||
|
"""
|
||||||
|
send_value = satoshi_round((value - fee)/num_outputs)
|
||||||
|
inputs = []
|
||||||
|
for (txid, vout) in zip(parent_txids, vouts):
|
||||||
|
inputs.append({'txid' : txid, 'vout' : vout})
|
||||||
|
outputs = {}
|
||||||
|
for _ in range(num_outputs):
|
||||||
|
outputs[node.getnewaddress()] = send_value
|
||||||
|
rawtx = node.createrawtransaction(inputs, outputs, 0, True)
|
||||||
|
signedtx = node.signrawtransactionwithwallet(rawtx)
|
||||||
|
txid = node.sendrawtransaction(signedtx['hex'])
|
||||||
|
fulltx = node.getrawtransaction(txid, 1)
|
||||||
|
assert len(fulltx['vout']) == num_outputs # make sure we didn't generate a change output
|
||||||
|
return (txid, send_value)
|
||||||
|
|
||||||
|
|
||||||
|
# Create large OP_RETURN txouts that can be appended to a transaction
|
||||||
|
# to make it large (helper for constructing large transactions).
|
||||||
|
def gen_return_txouts():
|
||||||
|
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
|
||||||
|
# So we have big transactions (and therefore can't fit very many into each block)
|
||||||
|
# create one script_pubkey
|
||||||
|
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
|
||||||
|
for _ in range(512):
|
||||||
|
script_pubkey = script_pubkey + "01"
|
||||||
|
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
|
||||||
|
txouts = []
|
||||||
|
from .messages import CTxOut
|
||||||
|
txout = CTxOut()
|
||||||
|
txout.nValue = 0
|
||||||
|
txout.scriptPubKey = bytes.fromhex(script_pubkey)
|
||||||
|
for _ in range(128):
|
||||||
|
txouts.append(txout)
|
||||||
|
return txouts
|
||||||
|
|
||||||
|
|
||||||
|
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
|
||||||
|
# transaction to make it large. See gen_return_txouts() above.
|
||||||
|
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
|
||||||
|
addr = node.getnewaddress()
|
||||||
|
txids = []
|
||||||
|
from .messages import tx_from_hex
|
||||||
|
for _ in range(num):
|
||||||
|
t = utxos.pop()
|
||||||
|
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
|
||||||
|
outputs = {}
|
||||||
|
change = t['amount'] - fee
|
||||||
|
outputs[addr] = satoshi_round(change)
|
||||||
|
rawtx = node.createrawtransaction(inputs, outputs)
|
||||||
|
tx = tx_from_hex(rawtx)
|
||||||
|
for txout in txouts:
|
||||||
|
tx.vout.append(txout)
|
||||||
|
newtx = tx.serialize().hex()
|
||||||
|
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
|
||||||
|
txid = node.sendrawtransaction(signresult["hex"], 0)
|
||||||
|
txids.append(txid)
|
||||||
|
return txids
|
||||||
|
|
||||||
|
|
||||||
|
def mine_large_block(test_framework, node, utxos=None):
|
||||||
|
# generate a 66k transaction,
|
||||||
|
# and 14 of them is close to the 1MB block limit
|
||||||
|
num = 14
|
||||||
|
txouts = gen_return_txouts()
|
||||||
|
utxos = utxos if utxos is not None else []
|
||||||
|
if len(utxos) < num:
|
||||||
|
utxos.clear()
|
||||||
|
utxos.extend(node.listunspent())
|
||||||
|
fee = 100 * node.getnetworkinfo()["relayfee"]
|
||||||
|
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
|
||||||
|
test_framework.generate(node, 1)
|
||||||
|
|
||||||
|
|
||||||
|
def find_vout_for_address(node, txid, addr):
|
||||||
|
"""
|
||||||
|
Locate the vout index of the given transaction sending to the
|
||||||
|
given address. Raises runtime error exception if not found.
|
||||||
|
"""
|
||||||
|
tx = node.getrawtransaction(txid, True)
|
||||||
|
for i in range(len(tx["vout"])):
|
||||||
|
if addr == tx["vout"][i]["scriptPubKey"]["address"]:
|
||||||
|
return i
|
||||||
|
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
|
||||||
|
|
||||||
|
def modinv(a, n):
|
||||||
|
"""Compute the modular inverse of a modulo n using the extended Euclidean
|
||||||
|
Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
|
||||||
|
"""
|
||||||
|
# TODO: Change to pow(a, -1, n) available in Python 3.8
|
||||||
|
t1, t2 = 0, 1
|
||||||
|
r1, r2 = n, a
|
||||||
|
while r2 != 0:
|
||||||
|
q = r1 // r2
|
||||||
|
t1, t2 = t2, t1 - q * t2
|
||||||
|
r1, r2 = r2, r1 - q * r2
|
||||||
|
if r1 > 1:
|
||||||
|
return None
|
||||||
|
if t1 < 0:
|
||||||
|
t1 += n
|
||||||
|
return t1
|
||||||
|
|
||||||
|
class TestFrameworkUtil(unittest.TestCase):
|
||||||
|
def test_modinv(self):
|
||||||
|
test_vectors = [
|
||||||
|
[7, 11],
|
||||||
|
[11, 29],
|
||||||
|
[90, 13],
|
||||||
|
[1891, 3797],
|
||||||
|
[6003722857, 77695236973],
|
||||||
|
]
|
||||||
|
|
||||||
|
for a, n in test_vectors:
|
||||||
|
self.assertEqual(modinv(a, n), pow(a, n-2, n))
|
||||||
329
miner_imports/test_framework/wallet.py
Normal file
329
miner_imports/test_framework/wallet.py
Normal file
@@ -0,0 +1,329 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2020-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""A limited-functionality wallet, which may replace a real wallet in tests"""
|
||||||
|
|
||||||
|
from copy import deepcopy
|
||||||
|
from decimal import Decimal
|
||||||
|
from enum import Enum
|
||||||
|
from random import choice
|
||||||
|
from typing import Optional
|
||||||
|
from test_framework.address import (
|
||||||
|
base58_to_byte,
|
||||||
|
create_deterministic_address_bcrt1_p2tr_op_true,
|
||||||
|
key_to_p2pkh,
|
||||||
|
key_to_p2sh_p2wpkh,
|
||||||
|
key_to_p2wpkh,
|
||||||
|
)
|
||||||
|
from test_framework.descriptors import descsum_create
|
||||||
|
from test_framework.key import ECKey
|
||||||
|
from test_framework.messages import (
|
||||||
|
COIN,
|
||||||
|
COutPoint,
|
||||||
|
CTransaction,
|
||||||
|
CTxIn,
|
||||||
|
CTxInWitness,
|
||||||
|
CTxOut,
|
||||||
|
tx_from_hex,
|
||||||
|
)
|
||||||
|
from test_framework.script import (
|
||||||
|
CScript,
|
||||||
|
LegacySignatureHash,
|
||||||
|
LEAF_VERSION_TAPSCRIPT,
|
||||||
|
OP_NOP,
|
||||||
|
OP_TRUE,
|
||||||
|
SIGHASH_ALL,
|
||||||
|
)
|
||||||
|
from test_framework.script_util import (
|
||||||
|
key_to_p2pk_script,
|
||||||
|
key_to_p2pkh_script,
|
||||||
|
key_to_p2sh_p2wpkh_script,
|
||||||
|
key_to_p2wpkh_script,
|
||||||
|
keyhash_to_p2pkh_script,
|
||||||
|
scripthash_to_p2sh_script,
|
||||||
|
)
|
||||||
|
from test_framework.util import (
|
||||||
|
assert_equal,
|
||||||
|
assert_greater_than_or_equal,
|
||||||
|
)
|
||||||
|
|
||||||
|
DEFAULT_FEE = Decimal("0.0001")
|
||||||
|
|
||||||
|
class MiniWalletMode(Enum):
|
||||||
|
"""Determines the transaction type the MiniWallet is creating and spending.
|
||||||
|
|
||||||
|
For most purposes, the default mode ADDRESS_OP_TRUE should be sufficient;
|
||||||
|
it simply uses a fixed bech32m P2TR address whose coins are spent with a
|
||||||
|
witness stack of OP_TRUE, i.e. following an anyone-can-spend policy.
|
||||||
|
However, if the transactions need to be modified by the user (e.g. prepending
|
||||||
|
scriptSig for testing opcodes that are activated by a soft-fork), or the txs
|
||||||
|
should contain an actual signature, the raw modes RAW_OP_TRUE and RAW_P2PK
|
||||||
|
can be useful. Summary of modes:
|
||||||
|
|
||||||
|
| output | | tx is | can modify | needs
|
||||||
|
mode | description | address | standard | scriptSig | signing
|
||||||
|
----------------+-------------------+-----------+----------+------------+----------
|
||||||
|
ADDRESS_OP_TRUE | anyone-can-spend | bech32m | yes | no | no
|
||||||
|
RAW_OP_TRUE | anyone-can-spend | - (raw) | no | yes | no
|
||||||
|
RAW_P2PK | pay-to-public-key | - (raw) | yes | yes | yes
|
||||||
|
"""
|
||||||
|
ADDRESS_OP_TRUE = 1
|
||||||
|
RAW_OP_TRUE = 2
|
||||||
|
RAW_P2PK = 3
|
||||||
|
|
||||||
|
|
||||||
|
class MiniWallet:
|
||||||
|
def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE):
|
||||||
|
self._test_node = test_node
|
||||||
|
self._utxos = []
|
||||||
|
self._priv_key = None
|
||||||
|
self._address = None
|
||||||
|
|
||||||
|
assert isinstance(mode, MiniWalletMode)
|
||||||
|
if mode == MiniWalletMode.RAW_OP_TRUE:
|
||||||
|
self._scriptPubKey = bytes(CScript([OP_TRUE]))
|
||||||
|
elif mode == MiniWalletMode.RAW_P2PK:
|
||||||
|
# use simple deterministic private key (k=1)
|
||||||
|
self._priv_key = ECKey()
|
||||||
|
self._priv_key.set((1).to_bytes(32, 'big'), True)
|
||||||
|
pub_key = self._priv_key.get_pubkey()
|
||||||
|
self._scriptPubKey = key_to_p2pk_script(pub_key.get_bytes())
|
||||||
|
elif mode == MiniWalletMode.ADDRESS_OP_TRUE:
|
||||||
|
self._address, self._internal_key = create_deterministic_address_bcrt1_p2tr_op_true()
|
||||||
|
self._scriptPubKey = bytes.fromhex(self._test_node.validateaddress(self._address)['scriptPubKey'])
|
||||||
|
|
||||||
|
def rescan_utxos(self):
|
||||||
|
"""Drop all utxos and rescan the utxo set"""
|
||||||
|
self._utxos = []
|
||||||
|
res = self._test_node.scantxoutset(action="start", scanobjects=[self.get_descriptor()])
|
||||||
|
assert_equal(True, res['success'])
|
||||||
|
for utxo in res['unspents']:
|
||||||
|
self._utxos.append({'txid': utxo['txid'], 'vout': utxo['vout'], 'value': utxo['amount'], 'height': utxo['height']})
|
||||||
|
|
||||||
|
def scan_tx(self, tx):
|
||||||
|
"""Scan the tx for self._scriptPubKey outputs and add them to self._utxos"""
|
||||||
|
for out in tx['vout']:
|
||||||
|
if out['scriptPubKey']['hex'] == self._scriptPubKey.hex():
|
||||||
|
self._utxos.append({'txid': tx['txid'], 'vout': out['n'], 'value': out['value'], 'height': 0})
|
||||||
|
|
||||||
|
def sign_tx(self, tx, fixed_length=True):
|
||||||
|
"""Sign tx that has been created by MiniWallet in P2PK mode"""
|
||||||
|
assert self._priv_key is not None
|
||||||
|
(sighash, err) = LegacySignatureHash(CScript(self._scriptPubKey), tx, 0, SIGHASH_ALL)
|
||||||
|
assert err is None
|
||||||
|
# for exact fee calculation, create only signatures with fixed size by default (>49.89% probability):
|
||||||
|
# 65 bytes: high-R val (33 bytes) + low-S val (32 bytes)
|
||||||
|
# with the DER header/skeleton data of 6 bytes added, this leads to a target size of 71 bytes
|
||||||
|
der_sig = b''
|
||||||
|
while not len(der_sig) == 71:
|
||||||
|
der_sig = self._priv_key.sign_ecdsa(sighash)
|
||||||
|
if not fixed_length:
|
||||||
|
break
|
||||||
|
tx.vin[0].scriptSig = CScript([der_sig + bytes(bytearray([SIGHASH_ALL]))])
|
||||||
|
|
||||||
|
def generate(self, num_blocks, **kwargs):
|
||||||
|
"""Generate blocks with coinbase outputs to the internal address, and append the outputs to the internal list"""
|
||||||
|
blocks = self._test_node.generatetodescriptor(num_blocks, self.get_descriptor(), **kwargs)
|
||||||
|
for b in blocks:
|
||||||
|
block_info = self._test_node.getblock(blockhash=b, verbosity=2)
|
||||||
|
cb_tx = block_info['tx'][0]
|
||||||
|
self._utxos.append({'txid': cb_tx['txid'], 'vout': 0, 'value': cb_tx['vout'][0]['value'], 'height': block_info['height']})
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
def get_descriptor(self):
|
||||||
|
return descsum_create(f'raw({self._scriptPubKey.hex()})')
|
||||||
|
|
||||||
|
def get_address(self):
|
||||||
|
return self._address
|
||||||
|
|
||||||
|
def get_utxo(self, *, txid: Optional[str]='', mark_as_spent=True):
|
||||||
|
"""
|
||||||
|
Returns a utxo and marks it as spent (pops it from the internal list)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txid: get the first utxo we find from a specific transaction
|
||||||
|
"""
|
||||||
|
index = -1 # by default the last utxo
|
||||||
|
self._utxos = sorted(self._utxos, key=lambda k: (k['value'], -k['height'])) # Put the largest utxo last
|
||||||
|
if txid:
|
||||||
|
utxo = next(filter(lambda utxo: txid == utxo['txid'], self._utxos))
|
||||||
|
index = self._utxos.index(utxo)
|
||||||
|
if mark_as_spent:
|
||||||
|
return self._utxos.pop(index)
|
||||||
|
else:
|
||||||
|
return self._utxos[index]
|
||||||
|
|
||||||
|
def send_self_transfer(self, **kwargs):
|
||||||
|
"""Create and send a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed."""
|
||||||
|
tx = self.create_self_transfer(**kwargs)
|
||||||
|
self.sendrawtransaction(from_node=kwargs['from_node'], tx_hex=tx['hex'])
|
||||||
|
return tx
|
||||||
|
|
||||||
|
def send_to(self, *, from_node, scriptPubKey, amount, fee=1000):
|
||||||
|
"""
|
||||||
|
Create and send a tx with an output to a given scriptPubKey/amount,
|
||||||
|
plus a change output to our internal address. To keep things simple, a
|
||||||
|
fixed fee given in Satoshi is used.
|
||||||
|
|
||||||
|
Note that this method fails if there is no single internal utxo
|
||||||
|
available that can cover the cost for the amount and the fixed fee
|
||||||
|
(the utxo with the largest value is taken).
|
||||||
|
|
||||||
|
Returns a tuple (txid, n) referring to the created external utxo outpoint.
|
||||||
|
"""
|
||||||
|
tx = self.create_self_transfer(from_node=from_node, fee_rate=0, mempool_valid=False)['tx']
|
||||||
|
assert_greater_than_or_equal(tx.vout[0].nValue, amount + fee)
|
||||||
|
tx.vout[0].nValue -= (amount + fee) # change output -> MiniWallet
|
||||||
|
tx.vout.append(CTxOut(amount, scriptPubKey)) # arbitrary output -> to be returned
|
||||||
|
txid = self.sendrawtransaction(from_node=from_node, tx_hex=tx.serialize().hex())
|
||||||
|
return txid, 1
|
||||||
|
|
||||||
|
def create_self_transfer(self, *, fee_rate=Decimal("0.003"), from_node=None, utxo_to_spend=None, mempool_valid=True, locktime=0, sequence=0):
|
||||||
|
"""Create and return a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed."""
|
||||||
|
from_node = from_node or self._test_node
|
||||||
|
utxo_to_spend = utxo_to_spend or self.get_utxo()
|
||||||
|
if self._priv_key is None:
|
||||||
|
vsize = Decimal(104) # anyone-can-spend
|
||||||
|
else:
|
||||||
|
vsize = Decimal(168) # P2PK (73 bytes scriptSig + 35 bytes scriptPubKey + 60 bytes other)
|
||||||
|
send_value = int(COIN * (utxo_to_spend['value'] - fee_rate * (vsize / 1000)))
|
||||||
|
assert send_value > 0
|
||||||
|
|
||||||
|
tx = CTransaction()
|
||||||
|
tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), utxo_to_spend['vout']), nSequence=sequence)]
|
||||||
|
tx.vout = [CTxOut(send_value, self._scriptPubKey)]
|
||||||
|
tx.nLockTime = locktime
|
||||||
|
if not self._address:
|
||||||
|
# raw script
|
||||||
|
if self._priv_key is not None:
|
||||||
|
# P2PK, need to sign
|
||||||
|
self.sign_tx(tx)
|
||||||
|
else:
|
||||||
|
# anyone-can-spend
|
||||||
|
tx.vin[0].scriptSig = CScript([OP_NOP] * 43) # pad to identical size
|
||||||
|
else:
|
||||||
|
tx.wit.vtxinwit = [CTxInWitness()]
|
||||||
|
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE]), bytes([LEAF_VERSION_TAPSCRIPT]) + self._internal_key]
|
||||||
|
tx_hex = tx.serialize().hex()
|
||||||
|
|
||||||
|
tx_info = from_node.testmempoolaccept([tx_hex])[0]
|
||||||
|
assert_equal(mempool_valid, tx_info['allowed'])
|
||||||
|
if mempool_valid:
|
||||||
|
assert_equal(tx_info['vsize'], vsize)
|
||||||
|
assert_equal(tx_info['fees']['base'], utxo_to_spend['value'] - Decimal(send_value) / COIN)
|
||||||
|
return {'txid': tx_info['txid'], 'wtxid': tx_info['wtxid'], 'hex': tx_hex, 'tx': tx}
|
||||||
|
|
||||||
|
def sendrawtransaction(self, *, from_node, tx_hex, **kwargs):
|
||||||
|
txid = from_node.sendrawtransaction(hexstring=tx_hex, **kwargs)
|
||||||
|
self.scan_tx(from_node.decoderawtransaction(tx_hex))
|
||||||
|
return txid
|
||||||
|
|
||||||
|
|
||||||
|
def getnewdestination(address_type='bech32'):
|
||||||
|
"""Generate a random destination of the specified type and return the
|
||||||
|
corresponding public key, scriptPubKey and address. Supported types are
|
||||||
|
'legacy', 'p2sh-segwit' and 'bech32'. Can be used when a random
|
||||||
|
destination is needed, but no compiled wallet is available (e.g. as
|
||||||
|
replacement to the getnewaddress/getaddressinfo RPCs)."""
|
||||||
|
key = ECKey()
|
||||||
|
key.generate()
|
||||||
|
pubkey = key.get_pubkey().get_bytes()
|
||||||
|
if address_type == 'legacy':
|
||||||
|
scriptpubkey = key_to_p2pkh_script(pubkey)
|
||||||
|
address = key_to_p2pkh(pubkey)
|
||||||
|
elif address_type == 'p2sh-segwit':
|
||||||
|
scriptpubkey = key_to_p2sh_p2wpkh_script(pubkey)
|
||||||
|
address = key_to_p2sh_p2wpkh(pubkey)
|
||||||
|
elif address_type == 'bech32':
|
||||||
|
scriptpubkey = key_to_p2wpkh_script(pubkey)
|
||||||
|
address = key_to_p2wpkh(pubkey)
|
||||||
|
# TODO: also support bech32m (need to generate x-only-pubkey)
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
return pubkey, scriptpubkey, address
|
||||||
|
|
||||||
|
|
||||||
|
def address_to_scriptpubkey(address):
|
||||||
|
"""Converts a given address to the corresponding output script (scriptPubKey)."""
|
||||||
|
payload, version = base58_to_byte(address)
|
||||||
|
if version == 111: # testnet pubkey hash
|
||||||
|
return keyhash_to_p2pkh_script(payload)
|
||||||
|
elif version == 196: # testnet script hash
|
||||||
|
return scripthash_to_p2sh_script(payload)
|
||||||
|
# TODO: also support other address formats
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
|
||||||
|
def make_chain(node, address, privkeys, parent_txid, parent_value, n=0, parent_locking_script=None, fee=DEFAULT_FEE):
|
||||||
|
"""Build a transaction that spends parent_txid.vout[n] and produces one output with
|
||||||
|
amount = parent_value with a fee deducted.
|
||||||
|
Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created).
|
||||||
|
"""
|
||||||
|
inputs = [{"txid": parent_txid, "vout": n}]
|
||||||
|
my_value = parent_value - fee
|
||||||
|
outputs = {address : my_value}
|
||||||
|
rawtx = node.createrawtransaction(inputs, outputs)
|
||||||
|
prevtxs = [{
|
||||||
|
"txid": parent_txid,
|
||||||
|
"vout": n,
|
||||||
|
"scriptPubKey": parent_locking_script,
|
||||||
|
"amount": parent_value,
|
||||||
|
}] if parent_locking_script else None
|
||||||
|
signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs)
|
||||||
|
assert signedtx["complete"]
|
||||||
|
tx = tx_from_hex(signedtx["hex"])
|
||||||
|
return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex())
|
||||||
|
|
||||||
|
def create_child_with_parents(node, address, privkeys, parents_tx, values, locking_scripts, fee=DEFAULT_FEE):
|
||||||
|
"""Creates a transaction that spends the first output of each parent in parents_tx."""
|
||||||
|
num_parents = len(parents_tx)
|
||||||
|
total_value = sum(values)
|
||||||
|
inputs = [{"txid": tx.rehash(), "vout": 0} for tx in parents_tx]
|
||||||
|
outputs = {address : total_value - fee}
|
||||||
|
rawtx_child = node.createrawtransaction(inputs, outputs)
|
||||||
|
prevtxs = []
|
||||||
|
for i in range(num_parents):
|
||||||
|
prevtxs.append({"txid": parents_tx[i].rehash(), "vout": 0, "scriptPubKey": locking_scripts[i], "amount": values[i]})
|
||||||
|
signedtx_child = node.signrawtransactionwithkey(hexstring=rawtx_child, privkeys=privkeys, prevtxs=prevtxs)
|
||||||
|
assert signedtx_child["complete"]
|
||||||
|
return signedtx_child["hex"]
|
||||||
|
|
||||||
|
def create_raw_chain(node, first_coin, address, privkeys, chain_length=25):
|
||||||
|
"""Helper function: create a "chain" of chain_length transactions. The nth transaction in the
|
||||||
|
chain is a child of the n-1th transaction and parent of the n+1th transaction.
|
||||||
|
"""
|
||||||
|
parent_locking_script = None
|
||||||
|
txid = first_coin["txid"]
|
||||||
|
chain_hex = []
|
||||||
|
chain_txns = []
|
||||||
|
value = first_coin["amount"]
|
||||||
|
|
||||||
|
for _ in range(chain_length):
|
||||||
|
(tx, txhex, value, parent_locking_script) = make_chain(node, address, privkeys, txid, value, 0, parent_locking_script)
|
||||||
|
txid = tx.rehash()
|
||||||
|
chain_hex.append(txhex)
|
||||||
|
chain_txns.append(tx)
|
||||||
|
|
||||||
|
return (chain_hex, chain_txns)
|
||||||
|
|
||||||
|
def bulk_transaction(tx, node, target_weight, privkeys, prevtxs=None):
|
||||||
|
"""Pad a transaction with extra outputs until it reaches a target weight (or higher).
|
||||||
|
returns CTransaction object
|
||||||
|
"""
|
||||||
|
tx_heavy = deepcopy(tx)
|
||||||
|
assert_greater_than_or_equal(target_weight, tx_heavy.get_weight())
|
||||||
|
while tx_heavy.get_weight() < target_weight:
|
||||||
|
random_spk = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
|
||||||
|
for _ in range(512*2):
|
||||||
|
random_spk += choice("0123456789ABCDEF")
|
||||||
|
tx_heavy.vout.append(CTxOut(0, bytes.fromhex(random_spk)))
|
||||||
|
# Re-sign the transaction
|
||||||
|
if privkeys:
|
||||||
|
signed = node.signrawtransactionwithkey(tx_heavy.serialize().hex(), privkeys, prevtxs)
|
||||||
|
return tx_from_hex(signed["hex"])
|
||||||
|
# OP_TRUE
|
||||||
|
tx_heavy.wit.vtxinwit = [CTxInWitness()]
|
||||||
|
tx_heavy.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
|
||||||
|
return tx_heavy
|
||||||
121
miner_imports/test_framework/wallet_util.py
Executable file
121
miner_imports/test_framework/wallet_util.py
Executable file
@@ -0,0 +1,121 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2018-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
"""Useful util functions for testing the wallet"""
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
from test_framework.address import (
|
||||||
|
byte_to_base58,
|
||||||
|
key_to_p2pkh,
|
||||||
|
key_to_p2sh_p2wpkh,
|
||||||
|
key_to_p2wpkh,
|
||||||
|
script_to_p2sh,
|
||||||
|
script_to_p2sh_p2wsh,
|
||||||
|
script_to_p2wsh,
|
||||||
|
)
|
||||||
|
from test_framework.key import ECKey
|
||||||
|
from test_framework.script_util import (
|
||||||
|
key_to_p2pkh_script,
|
||||||
|
key_to_p2wpkh_script,
|
||||||
|
keys_to_multisig_script,
|
||||||
|
script_to_p2sh_script,
|
||||||
|
script_to_p2wsh_script,
|
||||||
|
)
|
||||||
|
|
||||||
|
Key = namedtuple('Key', ['privkey',
|
||||||
|
'pubkey',
|
||||||
|
'p2pkh_script',
|
||||||
|
'p2pkh_addr',
|
||||||
|
'p2wpkh_script',
|
||||||
|
'p2wpkh_addr',
|
||||||
|
'p2sh_p2wpkh_script',
|
||||||
|
'p2sh_p2wpkh_redeem_script',
|
||||||
|
'p2sh_p2wpkh_addr'])
|
||||||
|
|
||||||
|
Multisig = namedtuple('Multisig', ['privkeys',
|
||||||
|
'pubkeys',
|
||||||
|
'p2sh_script',
|
||||||
|
'p2sh_addr',
|
||||||
|
'redeem_script',
|
||||||
|
'p2wsh_script',
|
||||||
|
'p2wsh_addr',
|
||||||
|
'p2sh_p2wsh_script',
|
||||||
|
'p2sh_p2wsh_addr'])
|
||||||
|
|
||||||
|
def get_key(node):
|
||||||
|
"""Generate a fresh key on node
|
||||||
|
|
||||||
|
Returns a named tuple of privkey, pubkey and all address and scripts."""
|
||||||
|
addr = node.getnewaddress()
|
||||||
|
pubkey = node.getaddressinfo(addr)['pubkey']
|
||||||
|
return Key(privkey=node.dumpprivkey(addr),
|
||||||
|
pubkey=pubkey,
|
||||||
|
p2pkh_script=key_to_p2pkh_script(pubkey).hex(),
|
||||||
|
p2pkh_addr=key_to_p2pkh(pubkey),
|
||||||
|
p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(),
|
||||||
|
p2wpkh_addr=key_to_p2wpkh(pubkey),
|
||||||
|
p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(),
|
||||||
|
p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(),
|
||||||
|
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
|
||||||
|
|
||||||
|
def get_generate_key():
|
||||||
|
"""Generate a fresh key
|
||||||
|
|
||||||
|
Returns a named tuple of privkey, pubkey and all address and scripts."""
|
||||||
|
eckey = ECKey()
|
||||||
|
eckey.generate()
|
||||||
|
privkey = bytes_to_wif(eckey.get_bytes())
|
||||||
|
pubkey = eckey.get_pubkey().get_bytes().hex()
|
||||||
|
return Key(privkey=privkey,
|
||||||
|
pubkey=pubkey,
|
||||||
|
p2pkh_script=key_to_p2pkh_script(pubkey).hex(),
|
||||||
|
p2pkh_addr=key_to_p2pkh(pubkey),
|
||||||
|
p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(),
|
||||||
|
p2wpkh_addr=key_to_p2wpkh(pubkey),
|
||||||
|
p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(),
|
||||||
|
p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(),
|
||||||
|
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
|
||||||
|
|
||||||
|
def get_multisig(node):
|
||||||
|
"""Generate a fresh 2-of-3 multisig on node
|
||||||
|
|
||||||
|
Returns a named tuple of privkeys, pubkeys and all address and scripts."""
|
||||||
|
addrs = []
|
||||||
|
pubkeys = []
|
||||||
|
for _ in range(3):
|
||||||
|
addr = node.getaddressinfo(node.getnewaddress())
|
||||||
|
addrs.append(addr['address'])
|
||||||
|
pubkeys.append(addr['pubkey'])
|
||||||
|
script_code = keys_to_multisig_script(pubkeys, k=2)
|
||||||
|
witness_script = script_to_p2wsh_script(script_code)
|
||||||
|
return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs],
|
||||||
|
pubkeys=pubkeys,
|
||||||
|
p2sh_script=script_to_p2sh_script(script_code).hex(),
|
||||||
|
p2sh_addr=script_to_p2sh(script_code),
|
||||||
|
redeem_script=script_code.hex(),
|
||||||
|
p2wsh_script=witness_script.hex(),
|
||||||
|
p2wsh_addr=script_to_p2wsh(script_code),
|
||||||
|
p2sh_p2wsh_script=script_to_p2sh_script(witness_script).hex(),
|
||||||
|
p2sh_p2wsh_addr=script_to_p2sh_p2wsh(script_code))
|
||||||
|
|
||||||
|
def test_address(node, address, **kwargs):
|
||||||
|
"""Get address info for `address` and test whether the returned values are as expected."""
|
||||||
|
addr_info = node.getaddressinfo(address)
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
if value is None:
|
||||||
|
if key in addr_info.keys():
|
||||||
|
raise AssertionError("key {} unexpectedly returned in getaddressinfo.".format(key))
|
||||||
|
elif addr_info[key] != value:
|
||||||
|
raise AssertionError("key {} value {} did not match expected value {}".format(key, addr_info[key], value))
|
||||||
|
|
||||||
|
def bytes_to_wif(b, compressed=True):
|
||||||
|
if compressed:
|
||||||
|
b += b'\x01'
|
||||||
|
return byte_to_base58(b, 239)
|
||||||
|
|
||||||
|
def generate_wif_key():
|
||||||
|
# Makes a WIF privkey for imports
|
||||||
|
k = ECKey()
|
||||||
|
k.generate()
|
||||||
|
return bytes_to_wif(k.get_bytes(), k.is_compressed)
|
||||||
43
rpcauth.py
Executable file
43
rpcauth.py
Executable file
@@ -0,0 +1,43 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) 2015-2021 The Bitcoin Core developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
from base64 import urlsafe_b64encode
|
||||||
|
from getpass import getpass
|
||||||
|
from os import urandom
|
||||||
|
|
||||||
|
import hmac
|
||||||
|
|
||||||
|
def generate_salt(size):
|
||||||
|
"""Create size byte hex salt"""
|
||||||
|
return urandom(size).hex()
|
||||||
|
|
||||||
|
def generate_password():
|
||||||
|
"""Create 32 byte b64 password"""
|
||||||
|
return urlsafe_b64encode(urandom(32)).decode('utf-8')
|
||||||
|
|
||||||
|
def password_to_hmac(salt, password):
|
||||||
|
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
|
||||||
|
return m.hexdigest()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
|
||||||
|
parser.add_argument('username', help='the username for authentication')
|
||||||
|
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if not args.password:
|
||||||
|
args.password = generate_password()
|
||||||
|
elif args.password == '-':
|
||||||
|
args.password = getpass()
|
||||||
|
|
||||||
|
# Create 16 byte hex salt
|
||||||
|
salt = generate_salt(16)
|
||||||
|
password_hmac = password_to_hmac(salt, args.password)
|
||||||
|
|
||||||
|
print('{0}:{1}${2}'.format(args.username, salt, password_hmac))
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
14
run.sh
Executable file
14
run.sh
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# run bitcoind
|
||||||
|
bitcoind --daemonwait
|
||||||
|
sleep 5
|
||||||
|
echo "get magic"
|
||||||
|
magic=$(cat /root/.bitcoin/signet/debug.log | grep -m1 magic)
|
||||||
|
magic=${magic:(-8)}
|
||||||
|
echo $magic > /root/.bitcoin/MAGIC.txt
|
||||||
|
|
||||||
|
# if in mining mode
|
||||||
|
if [[ "$MINERENABLED" == "1" ]]; then
|
||||||
|
mine.sh
|
||||||
|
fi
|
||||||
14
setup-signet.sh
Executable file
14
setup-signet.sh
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
PRIVKEY=${PRIVKEY:-$(cat ~/.bitcoin/PRIVKEY.txt)}
|
||||||
|
DATADIR=${DATADIR:-~/.bitcoin/}
|
||||||
|
bitcoind -datadir=$DATADIR --daemonwait -persistmempool
|
||||||
|
bitcoin-cli -datadir=$DATADIR -named createwallet wallet_name="custom_signet" load_on_startup=true descriptors=false
|
||||||
|
|
||||||
|
#only used in case of mining node
|
||||||
|
if [[ "$MINERENABLED" == "1" ]]; then
|
||||||
|
bitcoin-cli -datadir=$DATADIR importprivkey $PRIVKEY
|
||||||
|
## for future with descriptor wallets, cannot seem to get it working yet
|
||||||
|
# descinfo=$(bitcoin-cli getdescriptorinfo "wpkh(${PRIVKEY})")
|
||||||
|
# checksum=$(echo "$descinfo" | jq .checksum | tr -d '"' | tr -d "\n")
|
||||||
|
# desc='[{"desc":"wpkh('$PRIVKEY')#'$checksum'","timestamp":0,"internal":false}]'
|
||||||
|
# bitcoin-cli -datadir=$DATADIR importdescriptors $desc
|
||||||
|
fi
|
||||||
Reference in New Issue
Block a user