From db3707f957199d27fd69d65394efe944ed981379 Mon Sep 17 00:00:00 2001 From: Christian Decker Date: Tue, 11 Apr 2023 06:57:45 +0930 Subject: [PATCH] pytest: Highlight the re-entrancy issue for cln-plugin events This was pointed out by Daywalker [1]: we are synchronously processing events from `lightningd` which means that if processing one of the hooks or requests was slow or delayed, we would not get notifications, hooks, or RPC requests, massively impacting the flexbility. This highlights the issue with a failing test (it times out), and in the next commit we will isolate event processing into their own task, so to free the event loop from having to wait for an eventual response. [1] https://community.corelightning.org/c/developers/hold-invoice-plugin#comment_wrapper_16754493 --- cln-rpc/Makefile | 3 ++ plugins/Makefile | 7 ++-- plugins/examples/cln-plugin-reentrant.rs | 44 ++++++++++++++++++++++++ tests/test_cln_rs.py | 40 +++++++++++++++++++++ 4 files changed, 91 insertions(+), 3 deletions(-) create mode 100644 plugins/examples/cln-plugin-reentrant.rs diff --git a/cln-rpc/Makefile b/cln-rpc/Makefile index 339894ae1..eff4fdbd4 100644 --- a/cln-rpc/Makefile +++ b/cln-rpc/Makefile @@ -16,5 +16,8 @@ target/${RUST_PROFILE}/examples/cln-rpc-getinfo: $(shell find cln-rpc -name *.rs target/${RUST_PROFILE}/examples/cln-plugin-startup: $(shell find cln-rpc -name *.rs) cargo build ${CARGO_OPTS} --example cln-plugin-startup +target/${RUST_PROFILE}/examples/cln-plugin-reentrant: $(shell find plugins/examples -name *.rs) + cargo build ${CARGO_OPTS} --example cln-plugin-reentrant + cln-rpc-all: ${CLN_RPC_GEN_ALL} ${CLN_RPC_EXAMPLES} diff --git a/plugins/Makefile b/plugins/Makefile index 0928c3b6d..9a7b44eb9 100644 --- a/plugins/Makefile +++ b/plugins/Makefile @@ -1,5 +1,5 @@ PLUGIN_PAY_SRC := plugins/pay.c -PLUGIN_PAY_HEADER := +PLUGIN_PAY_HEADER := PLUGIN_PAY_OBJS := $(PLUGIN_PAY_SRC:.c=.o) PLUGIN_AUTOCLEAN_SRC := plugins/autoclean.c @@ -40,7 +40,7 @@ PLUGIN_FETCHINVOICE_OBJS := $(PLUGIN_FETCHINVOICE_SRC:.c=.o) PLUGIN_FETCHINVOICE_HEADER := PLUGIN_SQL_SRC := plugins/sql.c -PLUGIN_SQL_HEADER := +PLUGIN_SQL_HEADER := PLUGIN_SQL_OBJS := $(PLUGIN_SQL_SRC:.c=.o) PLUGIN_SPENDER_SRC := \ @@ -188,7 +188,7 @@ plugins/autoclean: $(PLUGIN_AUTOCLEAN_OBJS) $(PLUGIN_LIB_OBJS) $(PLUGIN_COMMON_O plugins/chanbackup: $(PLUGIN_chanbackup_OBJS) $(PLUGIN_LIB_OBJS) $(PLUGIN_COMMON_OBJS) $(JSMN_OBJS) -plugins/commando: $(PLUGIN_COMMANDO_OBJS) $(PLUGIN_LIB_OBJS) $(PLUGIN_COMMON_OBJS) $(JSMN_OBJS) +plugins/commando: $(PLUGIN_COMMANDO_OBJS) $(PLUGIN_LIB_OBJS) $(PLUGIN_COMMON_OBJS) $(JSMN_OBJS) # Topology wants to decode node_announcement, and peer_wiregen which # pulls in some of bitcoin/. @@ -232,6 +232,7 @@ plugins/list_of_builtin_plugins_gen.h: plugins/Makefile Makefile config.vars CLN_PLUGIN_EXAMPLES := \ target/${RUST_PROFILE}/examples/cln-plugin-startup \ + target/${RUST_PROFILE}/examples/cln-plugin-reentrant \ target/${RUST_PROFILE}/examples/cln-rpc-getinfo CLN_PLUGIN_SRC = $(shell find plugins/src -name "*.rs") diff --git a/plugins/examples/cln-plugin-reentrant.rs b/plugins/examples/cln-plugin-reentrant.rs new file mode 100644 index 000000000..c42c1d0ad --- /dev/null +++ b/plugins/examples/cln-plugin-reentrant.rs @@ -0,0 +1,44 @@ +use anyhow::Error; +use cln_plugin::{Builder, Plugin}; +use serde_json::json; +use tokio::sync::broadcast; + +#[derive(Clone)] +struct State { + tx: broadcast::Sender<()>, +} + +#[tokio::main] +async fn main() -> Result<(), Error> { + let (tx, _) = broadcast::channel(4); + let state = State { tx }; + + if let Some(plugin) = Builder::new(tokio::io::stdin(), tokio::io::stdout()) + .hook("htlc_accepted", htlc_accepted_handler) + .rpcmethod("release", "Release all HTLCs we currently hold", release) + .start(state) + .await? + { + plugin.join().await?; + Ok(()) + } else { + Ok(()) + } +} + +/// Release all waiting HTLCs +async fn release(p: Plugin, _v: serde_json::Value) -> Result { + p.state().tx.send(()).unwrap(); + Ok(json!("Released!")) +} + +async fn htlc_accepted_handler( + p: Plugin, + v: serde_json::Value, +) -> Result { + log::info!("Holding on to incoming HTLC {:?}", v); + // Wait for `release` to be called. + p.state().tx.subscribe().recv().await.unwrap(); + + Ok(json!({"result": "continue"})) +} diff --git a/tests/test_cln_rs.py b/tests/test_cln_rs.py index c2a6a4a39..49ea388ae 100644 --- a/tests/test_cln_rs.py +++ b/tests/test_cln_rs.py @@ -249,6 +249,46 @@ def test_grpc_wrong_auth(node_factory): stub.Getinfo(nodepb.GetinfoRequest()) +@pytest.mark.xfail( + reason="Times out because we can't call the RPC method while currently holding on to HTLCs", + strict=True, +) +def test_cln_plugin_reentrant(node_factory, executor): + """Ensure that we continue processing events while already handling. + + We should be continuing to handle incoming events even though a + prior event has not completed. This is important for things like + the `htlc_accepted` hook which needs to hold on to multiple + incoming HTLCs. + + Scenario: l1 uses an `htlc_accepted` to hold on to incoming HTLCs, + and we release them using an RPC method. + + """ + bin_path = Path.cwd() / "target" / RUST_PROFILE / "examples" / "cln-plugin-reentrant" + l1 = node_factory.get_node(options={"plugin": str(bin_path)}) + l2 = node_factory.get_node() + l2.connect(l1) + l2.fundchannel(l1) + + # Now create two invoices, and pay them both. Neither should + # succeed, but we should queue them on the plugin. + i1 = l1.rpc.invoice(label='lbl1', msatoshi='42sat', description='desc')['bolt11'] + i2 = l1.rpc.invoice(label='lbl2', msatoshi='31337sat', description='desc')['bolt11'] + + f1 = executor.submit(l2.rpc.pay, i1) + f2 = executor.submit(l2.rpc.pay, i2) + + import time + time.sleep(3) + + print("Releasing HTLCs after holding them") + l1.rpc.call('release') + + assert f1.result() + assert f2.result() + + def test_grpc_keysend_routehint(bitcoind, node_factory): """The routehints are a bit special, test that conversions work.