From a7cbe93fb8f112c5c5d0952d16593228bce49294 Mon Sep 17 00:00:00 2001 From: darosior Date: Sun, 8 Sep 2019 17:26:44 +0200 Subject: [PATCH] closingd: retransmit 'funding_locked' if we reconnect without any update As per BOLT02 #message-retransmission : if `next_commitment_number` is 1 in both the `channel_reestablish` it sent and received: - MUST retransmit `funding_locked` --- CHANGELOG.md | 1 + channeld/channeld.c | 2 ++ closingd/closingd.c | 56 ++++++++++++++++++++++++++++------------ tests/test_connection.py | 27 +++++++++++++++++++ 4 files changed, 69 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 824477a06..3e734d152 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - bolt11: support for parsing feature bits (field `9`). +- Protocol: we now retransmit `funding_locked` upon reconnection while closing if there was no update ### Changed - JSON API: `txprepare` now uses `outputs` as parameter other than `destination` and `satoshi` diff --git a/channeld/channeld.c b/channeld/channeld.c index 91edd331a..d9af23120 100644 --- a/channeld/channeld.c +++ b/channeld/channeld.c @@ -2315,6 +2315,8 @@ static void peer_reconnect(struct peer *peer, && next_commitment_number == 1) { u8 *msg; + status_debug("Retransmitting funding_locked for channel %s", + type_to_string(tmpctx, struct channel_id, &peer->channel_id)); /* Contains per commit point #1, for first post-opening commit */ msg = towire_funding_locked(NULL, &peer->channel_id, diff --git a/closingd/closingd.c b/closingd/closingd.c index d0e1b481b..319ddbc99 100644 --- a/closingd/closingd.c +++ b/closingd/closingd.c @@ -108,25 +108,17 @@ static u8 *closing_read_peer_msg(const tal_t *ctx, } } -static void do_reconnect(struct per_peer_state *pps, - const struct channel_id *channel_id, - const u64 next_index[NUM_SIDES], - u64 revocations_received, - const u8 *channel_reestablish, - const u8 *final_scriptpubkey, - const struct secret *last_remote_per_commit_secret) +static struct pubkey get_per_commitment_point(u64 commitment_number) { u8 *msg; - struct channel_id their_channel_id; - u64 next_local_commitment_number, next_remote_revocation_number; - struct pubkey my_current_per_commitment_point; + struct pubkey commitment_point; struct secret *s; /* Our current per-commitment point is the commitment point in the last * received signed commitment; HSM gives us that and the previous * secret (which we don't need). */ msg = towire_hsm_get_per_commitment_point(NULL, - next_index[LOCAL]-1); + commitment_number); if (!wire_sync_write(HSM_FD, take(msg))) status_failed(STATUS_FAIL_HSM_IO, "Writing get_per_commitment_point to HSM: %s", @@ -138,11 +130,29 @@ static void do_reconnect(struct per_peer_state *pps, "Reading resp get_per_commitment_point reply: %s", strerror(errno)); if (!fromwire_hsm_get_per_commitment_point_reply(tmpctx, msg, - &my_current_per_commitment_point, - &s)) + &commitment_point, + &s)) status_failed(STATUS_FAIL_HSM_IO, - "Bad per_commitment_point reply %s", - tal_hex(tmpctx, msg)); + "Bad per_commitment_point reply %s", + tal_hex(tmpctx, msg)); + + return commitment_point; +} + +static void do_reconnect(struct per_peer_state *pps, + const struct channel_id *channel_id, + const u64 next_index[NUM_SIDES], + u64 revocations_received, + const u8 *channel_reestablish, + const u8 *final_scriptpubkey, + const struct secret *last_remote_per_commit_secret) +{ + u8 *msg; + struct channel_id their_channel_id; + u64 next_local_commitment_number, next_remote_revocation_number; + struct pubkey my_current_per_commitment_point, next_commitment_point; + + my_current_per_commitment_point = get_per_commitment_point(next_index[LOCAL]-1); /* BOLT #2: * @@ -207,8 +217,20 @@ static void do_reconnect(struct per_peer_state *pps, msg = towire_shutdown(NULL, channel_id, final_scriptpubkey); sync_crypto_write(pps, take(msg)); - /* FIXME: Spec says to re-xmit funding_locked here if we haven't - * done any updates. */ + /* BOLT #2: + * + * A node: + *... + * - if `next_commitment_number` is 1 in both the `channel_reestablish` it sent and received: + * - MUST retransmit `funding_locked`. + */ + if (next_index[REMOTE] == 1 && next_index[LOCAL] == 1) { + status_debug("Retransmitting funding_locked for channel %s", + type_to_string(tmpctx, struct channel_id, channel_id)); + next_commitment_point = get_per_commitment_point(next_index[LOCAL]); + msg = towire_funding_locked(NULL, channel_id, &next_commitment_point); + sync_crypto_write(pps, take(msg)); + } } static void send_offer(struct per_peer_state *pps, diff --git a/tests/test_connection.py b/tests/test_connection.py index a024fde92..0b76011b3 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -391,6 +391,33 @@ def test_reconnect_gossiping(node_factory): l2.daemon.wait_for_log('processing now old peer gone') +@unittest.skipIf(not DEVELOPER, "needs dev-disconnect") +def test_reconnect_no_update(node_factory, executor): + """ + This tests if the `funding_locked` is sent if we receive a + `channel_reestablish` message with `next_commitment_number` == 1 and + our `next_commitment_number` == 1. + """ + disconnects = ["@WIRE_FUNDING_LOCKED", "@WIRE_SHUTDOWN"] + # Allow bad gossip because it might receive WIRE_CHANNEL_UPDATE before + # announcement before of the disconnection + l1 = node_factory.get_node(may_reconnect=True, allow_bad_gossip=True) + l2 = node_factory.get_node(disconnect=disconnects, may_reconnect=True) + + # For channeld reconnection + l1.rpc.connect(l2.info["id"], "localhost", l2.port) + fundchannel_exec = executor.submit(l1.fund_channel, l2, 10**6, False) + l1.daemon.wait_for_log(r"channeld.* Retransmitting funding_locked for channel") + l1.stop() + + # For closingd reconnection + scid = fundchannel_exec.result() + l1.daemon.start() + executor.submit(l1.rpc.close, scid, 0) + l2.daemon.wait_for_log(r"closingd.* Retransmitting funding_locked for channel") + l1.daemon.wait_for_log(r"CLOSINGD_COMPLETE") + + def test_connect_stresstest(node_factory, executor): # This test is unreliable, but it's better than nothing. l1 = node_factory.get_node(may_reconnect=True)