From 866ac798501e618545aa2497d057d5ab8d800a99 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 26 Jun 2017 10:46:43 +0930 Subject: [PATCH] channeld: shutdown support. We keep the scriptpubkey to send until after a commitment_signed (or, in the corner case, if there's no pending commitment). When we receive a shutdown from the peer, we pass it up to the master. It's up to the master not to add any more HTLCs, which works because we move from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN. Signed-off-by: Rusty Russell --- lightningd/channel/channel.c | 75 +++++++++++++- lightningd/channel/channel_wire.csv | 12 +++ lightningd/peer_control.c | 152 +++++++++++++++++++++++++++- lightningd/peer_control.h | 8 +- lightningd/peer_state.h | 4 +- tests/test_lightningd.py | 23 +++++ 6 files changed, 264 insertions(+), 10 deletions(-) diff --git a/lightningd/channel/channel.c b/lightningd/channel/channel.c index be157fef3..31cac7a62 100644 --- a/lightningd/channel/channel.c +++ b/lightningd/channel/channel.c @@ -122,6 +122,10 @@ struct peer { /* We save calculated commit sigs while waiting for master approval */ struct commit_sigs *next_commit_sigs; + /* If master told us to shut down, this contains scriptpubkey until + * we're ready to send it. */ + u8 *unsent_shutdown_scriptpubkey; + /* Information used for reestablishment. */ bool last_was_revoke; struct changed_htlc *last_sent_commit; @@ -428,6 +432,26 @@ static u8 *sending_commitsig_msg(const tal_t *ctx, return msg; } +/* BOLT #2: + * + * A node MUST NOT send a `shutdown` if there are updates pending on + * the receiving node's commitment transaction. + */ +/* So we only call this after reestablish or immediately after sending commit */ +static void maybe_send_shutdown(struct peer *peer) +{ + u8 *msg; + + if (!peer->unsent_shutdown_scriptpubkey) + return; + + msg = towire_shutdown(peer, &peer->channel_id, + peer->unsent_shutdown_scriptpubkey); + msg_enqueue(&peer->peer_out, take(msg)); + peer->unsent_shutdown_scriptpubkey + = tal_free(peer->unsent_shutdown_scriptpubkey); +} + /* Master has acknowledged that we're sending commitment, so send it. */ static void handle_sending_commitsig_reply(struct peer *peer, const u8 *msg) { @@ -442,10 +466,10 @@ static void handle_sending_commitsig_reply(struct peer *peer, const u8 *msg) msg_enqueue(&peer->peer_out, take(msg)); peer->next_commit_sigs = tal_free(peer->next_commit_sigs); + maybe_send_shutdown(peer); + /* Timer now considered expired, you can add a new one. */ peer->commit_timer = NULL; - - /* FIXME: In case we had outstanding commits, restart timer */ start_commit_timer(peer); } @@ -573,6 +597,9 @@ static void send_commit(struct peer *peer) if (!channel_sending_commit(peer->channel, &changed_htlcs)) { status_trace("Can't send commit: nothing to send"); + /* Covers the case where we've just been told to shutdown. */ + maybe_send_shutdown(peer); + peer->commit_timer = NULL; tal_free(tmpctx); return; @@ -1200,6 +1227,22 @@ static struct io_plan *handle_pong(struct io_conn *conn, return peer_read_message(conn, &peer->pcs, peer_in); } +static struct io_plan *handle_peer_shutdown(struct io_conn *conn, + struct peer *peer, + const u8 *shutdown) +{ + struct channel_id channel_id; + u8 *scriptpubkey; + + if (!fromwire_shutdown(peer, shutdown, NULL, &channel_id, &scriptpubkey)) + status_failed(WIRE_CHANNEL_PEER_READ_FAILED, "Bad shutdown"); + + /* Tell master, it will tell us what to send. */ + daemon_conn_send(&peer->master, + take(towire_channel_got_shutdown(peer, scriptpubkey))); + return peer_read_message(conn, &peer->pcs, peer_in); +} + static struct io_plan *peer_in(struct io_conn *conn, struct peer *peer, u8 *msg) { enum wire_type type = fromwire_peektype(msg); @@ -1248,6 +1291,8 @@ static struct io_plan *peer_in(struct io_conn *conn, struct peer *peer, u8 *msg) return handle_ping(conn, peer, msg); case WIRE_PONG: return handle_pong(conn, peer, msg); + case WIRE_SHUTDOWN: + return handle_peer_shutdown(conn, peer, msg); case WIRE_INIT: case WIRE_ERROR: @@ -1258,7 +1303,6 @@ static struct io_plan *peer_in(struct io_conn *conn, struct peer *peer, u8 *msg) case WIRE_CHANNEL_REESTABLISH: goto badmessage; - case WIRE_SHUTDOWN: case WIRE_CLOSING_SIGNED: case WIRE_UPDATE_FEE: peer_failed(io_conn_fd(peer->peer_conn), @@ -1499,6 +1543,13 @@ static struct io_plan *handle_peer_reestablish(struct io_conn *conn, if (retransmit_revoke_and_ack && peer->last_was_revoke) resend_revoke(peer); + /* BOLT #2: + * + * On reconnection if the node has sent a previous `shutdown` it MUST + * retransmit it + */ + maybe_send_shutdown(peer); + /* Start commit timer: if we sent revoke we might need it. */ start_commit_timer(peer); @@ -1606,6 +1657,7 @@ static void init_channel(struct peer *peer) &peer->funding_locked[REMOTE], &peer->short_channel_ids[LOCAL], &reconnected, + &peer->unsent_shutdown_scriptpubkey, &funding_signed)) status_failed(WIRE_CHANNEL_BAD_COMMAND, "Init: %s", tal_hex(msg, msg)); @@ -1903,6 +1955,18 @@ static void handle_ping_cmd(struct peer *peer, const u8 *inmsg) peer->num_pings_outstanding++; } +static void handle_shutdown_cmd(struct peer *peer, const u8 *inmsg) +{ + u8 *scriptpubkey; + + if (!fromwire_channel_send_shutdown(peer, inmsg, NULL, &scriptpubkey)) + status_failed(WIRE_CHANNEL_BAD_COMMAND, "Bad send_shutdown"); + + /* We can't send this until commit (if any) is done, so start timer<. */ + peer->unsent_shutdown_scriptpubkey = scriptpubkey; + start_commit_timer(peer); +} + static struct io_plan *req_in(struct io_conn *conn, struct daemon_conn *master) { struct peer *peer = container_of(master, struct peer, master); @@ -1946,6 +2010,9 @@ static struct io_plan *req_in(struct io_conn *conn, struct daemon_conn *master) case WIRE_CHANNEL_PING: handle_ping_cmd(peer, master->msg_in); goto out; + case WIRE_CHANNEL_SEND_SHUTDOWN: + handle_shutdown_cmd(peer, master->msg_in); + goto out; case WIRE_CHANNEL_BAD_COMMAND: case WIRE_CHANNEL_HSM_FAILED: @@ -1967,6 +2034,7 @@ static struct io_plan *req_in(struct io_conn *conn, struct daemon_conn *master) case WIRE_CHANNEL_GOT_COMMITSIG_REPLY: case WIRE_CHANNEL_GOT_REVOKE_REPLY: case WIRE_CHANNEL_GOT_FUNDING_LOCKED: + case WIRE_CHANNEL_GOT_SHUTDOWN: break; } status_failed(WIRE_CHANNEL_BAD_COMMAND, "%u %s", t, @@ -2030,6 +2098,7 @@ int main(int argc, char *argv[]) msg_queue_init(&peer->master_deferred, peer); msg_queue_init(&peer->peer_out, peer); peer->next_commit_sigs = NULL; + peer->unsent_shutdown_scriptpubkey = NULL; /* We send these to HSM to get real signatures; don't have valgrind * complain. */ diff --git a/lightningd/channel/channel_wire.csv b/lightningd/channel/channel_wire.csv index 64ae9d3cc..4e89929c0 100644 --- a/lightningd/channel/channel_wire.csv +++ b/lightningd/channel/channel_wire.csv @@ -64,6 +64,8 @@ channel_init,,local_funding_locked,bool channel_init,,remote_funding_locked,bool channel_init,,funding_short_id,struct short_channel_id channel_init,,reestablish,bool +channel_init,,shutdown_scriptpubkey_len,u16 +channel_init,,shutdown_scriptpubkey,shutdown_scriptpubkey_len*u8 channel_init,,init_peer_pkt_len,u16 channel_init,,init_peer_pkt,init_peer_pkt_len*u8 @@ -166,3 +168,13 @@ channel_got_revoke,,changed,num_changed*struct changed_htlc # Wait for reply, to make sure it's on disk before we continue # (eg. if we sent another commitment_signed, that would implicitly ack). channel_got_revoke_reply,122 + +# Tell peer that channel is shutting down +channel_send_shutdown,23 +channel_send_shutdown,,scriptpubkey_len,u16 +channel_send_shutdown,,scriptpubkey,scriptpubkey_len*u8 + +# Peer told us that channel is shutting down +channel_got_shutdown,24 +channel_got_shutdown,,scriptpubkey_len,u16 +channel_got_shutdown,,scriptpubkey,scriptpubkey_len*u8 diff --git a/lightningd/peer_control.c b/lightningd/peer_control.c index 64b478534..45f22c5e1 100644 --- a/lightningd/peer_control.c +++ b/lightningd/peer_control.c @@ -164,6 +164,7 @@ void peer_set_condition(struct peer *peer, enum peer_state old_state, fatal("peer state %s should be %s", peer_state_name(peer->state), peer_state_name(old_state)); + /* FIXME: save to db */ peer->state = state; } @@ -316,12 +317,11 @@ static bool peer_reconnected(struct lightningd *ld, case CHANNELD_AWAITING_LOCKIN: case CHANNELD_NORMAL: + case CHANNELD_SHUTTING_DOWN: /* We need the gossipfd now */ get_gossip_fd_for_reconnect(ld, id, peer->unique_id, fd, cs); return true; - case SHUTDOWND_SENT: - case SHUTDOWND_RCVD: case CLOSINGD_SIGEXCHANGE: case ONCHAIND_CHEATED: case ONCHAIND_THEIR_UNILATERAL: @@ -375,6 +375,8 @@ void add_peer(struct lightningd *ld, u64 unique_id, peer->channel_info = NULL; peer->last_was_revoke = false; peer->last_sent_commit = NULL; + peer->remote_shutdown_scriptpubkey = NULL; + peer->local_shutdown_idx = -1; peer->commit_index[LOCAL] = peer->commit_index[REMOTE] = peer->num_revocations_received = 0; @@ -942,6 +944,75 @@ static int peer_got_funding_locked(struct peer *peer, const u8 *msg) return 0; } +static u8 *p2wpkh_for_keyidx(const tal_t *ctx, struct lightningd *ld, u64 keyidx) +{ + struct pubkey shutdownkey; + + if (!bip32_pubkey(ld->bip32_base, &shutdownkey, keyidx)) + return NULL; + + return scriptpubkey_p2wpkh(ctx, &shutdownkey); +} + +static int peer_got_shutdown(struct peer *peer, const u8 *msg) +{ + u8 *scriptpubkey; + + if (!fromwire_channel_got_shutdown(peer, msg, NULL, &scriptpubkey)) { + log_broken(peer->log, "bad channel_got_funding_locked %s", + tal_hex(peer, msg)); + return -1; + } + + /* FIXME: Add to spec that we must allow repeated shutdown! */ + peer->remote_shutdown_scriptpubkey + = tal_free(peer->remote_shutdown_scriptpubkey); + peer->remote_shutdown_scriptpubkey = scriptpubkey; + /* FIXME: Save to db */ + + if (peer->local_shutdown_idx == -1) { + u8 *scriptpubkey; + + peer->local_shutdown_idx = wallet_get_newindex(peer->ld); + if (peer->local_shutdown_idx == -1) { + peer_internal_error(peer, + "Can't get local shutdown index"); + return -1; + } + /* FIXME: Save to db */ + + peer_set_condition(peer, CHANNELD_NORMAL, CHANNELD_SHUTTING_DOWN); + + /* BOLT #2: + * + * A sending node MUST set `scriptpubkey` to one of the + * following forms: + * + * ...3. `OP_0` `20` 20-bytes (version 0 pay to witness pubkey), + */ + scriptpubkey = p2wpkh_for_keyidx(msg, peer->ld, + peer->local_shutdown_idx); + if (!scriptpubkey) { + peer_internal_error(peer, + "Can't get shutdown script %"PRIu64, + peer->local_shutdown_idx); + return -1; + } + + /* BOLT #2: + * + * A receiving node MUST reply to a `shutdown` message with a + * `shutdown` once there are no outstanding updates on the + * peer, unless it has already sent a `shutdown`. + */ + subd_send_msg(peer->owner, + take(towire_channel_send_shutdown(peer, + scriptpubkey))); + } + + return 0; +} + static int peer_got_bad_message(struct peer *peer, const u8 *msg) { u8 *err; @@ -975,6 +1046,8 @@ static int channel_msg(struct subd *sd, const u8 *msg, const int *unused) return peer_channel_announced(sd->peer, msg); case WIRE_CHANNEL_GOT_FUNDING_LOCKED: return peer_got_funding_locked(sd->peer, msg); + case WIRE_CHANNEL_GOT_SHUTDOWN: + return peer_got_shutdown(sd->peer, msg); /* We let peer_owner_finished handle these as transient errors. */ case WIRE_CHANNEL_BAD_COMMAND: @@ -1001,6 +1074,7 @@ static int channel_msg(struct subd *sd, const u8 *msg, const int *unused) case WIRE_CHANNEL_GOT_COMMITSIG_REPLY: case WIRE_CHANNEL_GOT_REVOKE_REPLY: case WIRE_CHANNEL_SENDING_COMMITSIG_REPLY: + case WIRE_CHANNEL_SEND_SHUTDOWN: /* Replies go to requests. */ case WIRE_CHANNEL_OFFER_HTLC_REPLY: case WIRE_CHANNEL_PING_REPLY: @@ -1027,6 +1101,7 @@ static bool peer_start_channeld(struct peer *peer, struct failed_htlc *failed_htlcs; enum side *failed_sides; struct short_channel_id funding_channel_id; + const u8 *shutdown_scriptpubkey; /* Now we can consider balance set. */ peer->balance = tal(peer, u64); @@ -1075,6 +1150,13 @@ static bool peer_start_channeld(struct peer *peer, memset(&funding_channel_id, 0, sizeof(funding_channel_id)); } + if (peer->local_shutdown_idx != -1) { + shutdown_scriptpubkey + = p2wpkh_for_keyidx(tmpctx, peer->ld, + peer->local_shutdown_idx); + } else + shutdown_scriptpubkey = NULL; + initmsg = towire_channel_init(tmpctx, peer->funding_txid, peer->funding_outnum, @@ -1111,6 +1193,7 @@ static bool peer_start_channeld(struct peer *peer, peer->remote_funding_locked, &funding_channel_id, peer->reconnected, + shutdown_scriptpubkey, funding_signed); /* We don't expect a response: we are triggered by funding_depth_cb. */ @@ -1525,6 +1608,71 @@ static const struct json_command fund_channel_command = { }; AUTODATA(json_command, &fund_channel_command); +static void json_close(struct command *cmd, + const char *buffer, const jsmntok_t *params) +{ + struct lightningd *ld = ld_from_dstate(cmd->dstate); + jsmntok_t *peertok; + struct peer *peer; + + if (!json_get_params(buffer, params, + "id", &peertok, + NULL)) { + command_fail(cmd, "Need id"); + return; + } + + peer = peer_from_json(ld, buffer, peertok); + if (!peer) { + command_fail(cmd, "Could not find peer with that id"); + return; + } + + /* Easy case: peer can simply be forgotten. */ + if (!peer_persists(peer)) { + peer_fail_permanent(peer, NULL); + command_success(cmd, null_response(cmd)); + return; + } + + /* Normal case. */ + if (peer->state == CHANNELD_NORMAL) { + u8 *shutdown_scriptpubkey; + + peer->local_shutdown_idx = wallet_get_newindex(peer->ld); + if (peer->local_shutdown_idx == -1) { + command_fail(cmd, "Failed to get new key for shutdown"); + return; + } + shutdown_scriptpubkey = p2wpkh_for_keyidx(cmd, peer->ld, + peer->local_shutdown_idx); + if (!shutdown_scriptpubkey) { + command_fail(cmd, "Failed to get script for shutdown"); + return; + } + + peer_set_condition(peer, CHANNELD_NORMAL, CHANNELD_SHUTTING_DOWN); + + if (peer->owner) + subd_send_msg(peer->owner, + take(towire_channel_send_shutdown(peer, + shutdown_scriptpubkey))); + + command_success(cmd, null_response(cmd)); + } else + command_fail(cmd, "Peer is in state %s", + peer_state_name(peer->state)); +} + +static const struct json_command close_command = { + "close", + json_close, + "Close the channel with peer {id}", + "Returns an empty result on success" +}; +AUTODATA(json_command, &close_command); + + const char *peer_state_name(enum peer_state state) { size_t i; diff --git a/lightningd/peer_control.h b/lightningd/peer_control.h index daea9e73e..9e238ed4e 100644 --- a/lightningd/peer_control.h +++ b/lightningd/peer_control.h @@ -80,6 +80,11 @@ struct peer { /* Secret seed (FIXME: Move to hsm!) */ struct privkey *seed; + /* Their scriptpubkey if they sent shutdown. */ + u8 *remote_shutdown_scriptpubkey; + /* Our key for shutdown (-1 if not chosen yet) */ + s64 local_shutdown_idx; + /* Reestablishment stuff: last sent commit and revocation details. */ bool last_was_revoke; struct changed_htlc *last_sent_commit; @@ -93,8 +98,7 @@ static inline bool peer_can_add_htlc(const struct peer *peer) static inline bool peer_can_remove_htlc(const struct peer *peer) { return peer->state == CHANNELD_NORMAL - || peer->state == SHUTDOWND_SENT - || peer->state == SHUTDOWND_RCVD + || peer->state == CHANNELD_SHUTTING_DOWN || peer->state == ONCHAIND_THEIR_UNILATERAL || peer->state == ONCHAIND_OUR_UNILATERAL; } diff --git a/lightningd/peer_state.h b/lightningd/peer_state.h index 0cbac893c..3a6e8d4c3 100644 --- a/lightningd/peer_state.h +++ b/lightningd/peer_state.h @@ -18,9 +18,7 @@ enum peer_state { CHANNELD_NORMAL, /* We are closing, pending HTLC resolution. */ - SHUTDOWND_SENT, - /* Both are closing, pending HTLC resolution. */ - SHUTDOWND_RCVD, + CHANNELD_SHUTTING_DOWN, /* Exchanging signatures on closing tx. */ CLOSINGD_SIGEXCHANGE, diff --git a/tests/test_lightningd.py b/tests/test_lightningd.py index 3bab21388..0692a9142 100644 --- a/tests/test_lightningd.py +++ b/tests/test_lightningd.py @@ -294,6 +294,29 @@ class LightningDTests(BaseLightningDTests): l1.rpc.sendpay(to_json([routestep]), rhash) assert l2.rpc.listinvoice('testpayment3')[0]['complete'] == True + def test_closing(self): + l1,l2 = self.connect() + + self.fund_channel(l1, l2, 10**6) + amt = 200000000 + rhash = l2.rpc.invoice(amt, 'testpayment2')['rhash'] + assert l2.rpc.listinvoice('testpayment2')[0]['complete'] == False + + routestep = { + 'msatoshi' : amt, + 'id' : l2.info['id'], + 'delay' : 5, + 'channel': '1:1:1' + } + + # This works. + l1.rpc.sendpay(to_json([routestep]), rhash) + assert l2.rpc.listinvoice('testpayment2')[0]['complete'] == True + + # This should return, then close. + l1.rpc.close(l2.info['id']); + l1.daemon.wait_for_log('-> CHANNELD_SHUTTING_DOWN') + l2.daemon.wait_for_log('-> CHANNELD_SHUTTING_DOWN') def test_gossip_jsonrpc(self): l1,l2 = self.connect()