lightningd: don't explicitly tell connectd to disconnect, have it do it on sending error/warning.

Connectd already does this when we *receive* an error or warning, but
now do it on send.  This causes some slight behavior change: we don't
disconnect when we close a channel, for example (our behaviour here
has been inconsistent across versions, depending on the code).

When connectd is told to disconnect, it now does so immediately, and
doesn't wait for subds to drain etc.  That simplifies the manual
disconnect case, which now cleans up as it would from any other
disconnection when connectd says it's disconnected.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Rusty Russell
2022-07-18 21:42:28 +09:30
committed by neil saitug
parent 2962b93199
commit a3c4908f4a
16 changed files with 75 additions and 146 deletions

View File

@@ -1868,7 +1868,7 @@ static void peer_discard(struct daemon *daemon, const u8 *msg)
if (peer->counter != counter) if (peer->counter != counter)
return; return;
status_peer_debug(&id, "discard_peer"); status_peer_debug(&id, "discard_peer");
drain_peer(peer); tal_free(peer);
} }
/* lightningd tells us to send a msg and disconnect. */ /* lightningd tells us to send a msg and disconnect. */

View File

@@ -109,7 +109,7 @@ static void close_subd_timeout(struct subd *subd)
io_close(subd->conn); io_close(subd->conn);
} }
void drain_peer(struct peer *peer) static void drain_peer(struct peer *peer)
{ {
status_debug("drain_peer"); status_debug("drain_peer");
assert(!peer->draining); assert(!peer->draining);
@@ -403,6 +403,12 @@ static bool is_urgent(enum peer_wire type)
return false; return false;
} }
/* io_sock_shutdown, but in format suitable for an io_plan callback */
static struct io_plan *io_sock_shutdown_cb(struct io_conn *conn, struct peer *unused)
{
return io_sock_shutdown(conn);
}
static struct io_plan *encrypt_and_send(struct peer *peer, static struct io_plan *encrypt_and_send(struct peer *peer,
const u8 *msg TAKES, const u8 *msg TAKES,
struct io_plan *(*next) struct io_plan *(*next)
@@ -438,6 +444,21 @@ static struct io_plan *encrypt_and_send(struct peer *peer,
#endif #endif
set_urgent_flag(peer, is_urgent(type)); set_urgent_flag(peer, is_urgent(type));
/* BOLT #1:
*
* A sending node:
*...
* - MAY close the connection after sending.
*/
if (type == WIRE_ERROR || type == WIRE_WARNING) {
/* Might already be draining... */
if (!peer->draining)
drain_peer(peer);
/* Close as soon as we've sent this. */
next = io_sock_shutdown_cb;
}
/* We free this and the encrypted version in next write_to_peer */ /* We free this and the encrypted version in next write_to_peer */
peer->sent_to_peer = cryptomsg_encrypt_msg(peer, &peer->cs, msg); peer->sent_to_peer = cryptomsg_encrypt_msg(peer, &peer->cs, msg);
return io_write(peer->to_peer, return io_write(peer->to_peer,

View File

@@ -22,10 +22,6 @@ void multiplex_final_msg(struct peer *peer,
* this does io logging. */ * this does io logging. */
void inject_peer_msg(struct peer *peer, const u8 *msg TAKES); void inject_peer_msg(struct peer *peer, const u8 *msg TAKES);
/* Start closing the peer: removes itself from hash table, frees itself
* once done. */
void drain_peer(struct peer *peer);
void setup_peer_gossip_store(struct peer *peer, void setup_peer_gossip_store(struct peer *peer,
const struct feature_set *our_features, const struct feature_set *our_features,
const u8 *their_features); const u8 *their_features);

View File

@@ -22,14 +22,10 @@
void channel_set_owner(struct channel *channel, struct subd *owner) void channel_set_owner(struct channel *channel, struct subd *owner)
{ {
struct subd *old_owner = channel->owner; struct subd *old_owner = channel->owner;
bool was_connected = channel_is_connected(channel);
channel->owner = owner; channel->owner = owner;
if (old_owner) { if (old_owner)
subd_release_channel(old_owner, channel); subd_release_channel(old_owner, channel);
if (was_connected && !channel_is_connected(channel))
maybe_disconnect_peer(channel->peer->ld, channel->peer);
}
} }
struct htlc_out *channel_has_htlc_out(struct channel *channel) struct htlc_out *channel_has_htlc_out(struct channel *channel)

View File

@@ -641,33 +641,6 @@ void connectd_activate(struct lightningd *ld)
assert(ret == ld->connectd); assert(ret == ld->connectd);
} }
void maybe_disconnect_peer(struct lightningd *ld, struct peer *peer)
{
struct channel *channel;
/* Any channels left which want to talk? */
if (peer->uncommitted_channel)
return;
list_for_each(&peer->channels, channel, list)
if (channel_is_connected(channel))
return;
/* If shutting down, connectd no longer exists.
* FIXME: Call peer_disconnect_done(), but nobody cares. */
if (!ld->connectd) {
peer->connected = PEER_DISCONNECTED;
return;
}
/* If connectd was the one who told us to cleanup peer, don't
* tell it to discard again: it might have reconnected! */
if (peer->connected == PEER_CONNECTED)
subd_send_msg(ld->connectd,
take(towire_connectd_discard_peer(NULL, &peer->id,
peer->connectd_counter)));
}
static struct command_result *json_sendcustommsg(struct command *cmd, static struct command_result *json_sendcustommsg(struct command *cmd,
const char *buffer, const char *buffer,
const jsmntok_t *obj UNNEEDED, const jsmntok_t *obj UNNEEDED,

View File

@@ -24,7 +24,4 @@ void connect_failed_disconnect(struct lightningd *ld,
const struct node_id *id, const struct node_id *id,
const struct wireaddr_internal *addr); const struct wireaddr_internal *addr);
/* Disconnect a peer (if no subds want to talk any more) */
void maybe_disconnect_peer(struct lightningd *ld, struct peer *peer);
#endif /* LIGHTNING_LIGHTNINGD_CONNECT_CONTROL_H */ #endif /* LIGHTNING_LIGHTNINGD_CONNECT_CONTROL_H */

View File

@@ -31,7 +31,6 @@ static void destroy_uncommitted_channel(struct uncommitted_channel *uc)
uc->peer->uncommitted_channel = NULL; uc->peer->uncommitted_channel = NULL;
maybe_disconnect_peer(uc->peer->ld, uc->peer);
maybe_delete_peer(uc->peer); maybe_delete_peer(uc->peer);
} }

View File

@@ -1539,11 +1539,6 @@ void peer_disconnect_done(struct lightningd *ld, const u8 *msg)
assert(p->connectd_counter == connectd_counter); assert(p->connectd_counter == connectd_counter);
log_peer_debug(ld->log, &id, "peer_disconnect_done"); log_peer_debug(ld->log, &id, "peer_disconnect_done");
p->connected = PEER_DISCONNECTED; p->connected = PEER_DISCONNECTED;
/* If there are literally no channels, might as well
* free immediately. */
if (!p->uncommitted_channel && list_empty(&p->channels))
p = tal_free(p);
} }
/* If you were trying to connect, it failed. */ /* If you were trying to connect, it failed. */
@@ -1561,6 +1556,10 @@ void peer_disconnect_done(struct lightningd *ld, const u8 *msg)
was_pending(command_success(i->cmd, was_pending(command_success(i->cmd,
json_stream_success(i->cmd))); json_stream_success(i->cmd)));
} }
/* If connection was only thing keeping it, this will delete it. */
if (p)
maybe_delete_peer(p);
} }
static bool check_funding_details(const struct bitcoin_tx *tx, static bool check_funding_details(const struct bitcoin_tx *tx,
@@ -2084,9 +2083,8 @@ static struct command_result *json_disconnect(struct command *cmd,
struct node_id *id; struct node_id *id;
struct disconnect_command *dc; struct disconnect_command *dc;
struct peer *peer; struct peer *peer;
struct channel *channel, **channels; struct channel *channel;
bool *force; bool *force;
bool disconnected = false;
if (!param(cmd, buffer, params, if (!param(cmd, buffer, params,
p_req("id", param_node_id, &id), p_req("id", param_node_id, &id),
@@ -2109,58 +2107,11 @@ static struct command_result *json_disconnect(struct command *cmd,
channel_state_name(channel)); channel_state_name(channel));
} }
/* Careful here! Disconnecting can free peer! */ /* If it's not already disconnecting, tell connectd to disconnect */
channels = tal_arr(cmd, struct channel *, 0); if (peer->connected == PEER_CONNECTED)
list_for_each(&peer->channels, channel, list) { subd_send_msg(peer->ld->connectd,
if (!channel->owner) take(towire_connectd_discard_peer(NULL, &peer->id,
continue; peer->connectd_counter)));
if (!channel->owner->talks_to_peer)
continue;
switch (channel->state) {
case DUALOPEND_OPEN_INIT:
case CHANNELD_AWAITING_LOCKIN:
case CHANNELD_NORMAL:
case CHANNELD_SHUTTING_DOWN:
case DUALOPEND_AWAITING_LOCKIN:
case CLOSINGD_SIGEXCHANGE:
tal_arr_expand(&channels, channel);
continue;
case CLOSINGD_COMPLETE:
case AWAITING_UNILATERAL:
case FUNDING_SPEND_SEEN:
case ONCHAIN:
case CLOSED:
/* We don't expect these to have owners who connect! */
log_broken(channel->log,
"Don't expect owner %s in state %s",
channel->owner->name,
channel_state_name(channel));
continue;
}
abort();
}
/* This can free peer too! */
if (peer->uncommitted_channel) {
kill_uncommitted_channel(peer->uncommitted_channel,
"disconnect command");
disconnected = true;
}
for (size_t i = 0; i < tal_count(channels); i++) {
if (channel_unsaved(channels[i]))
channel_unsaved_close_conn(channels[i],
"disconnect command");
else
channel_fail_reconnect(channels[i],
"disconnect command");
disconnected = true;
}
/* It's just sitting in connectd? */
if (!disconnected)
maybe_disconnect_peer(cmd->ld, peer);
/* Connectd tells us when it's finally disconnected */ /* Connectd tells us when it's finally disconnected */
dc = tal(cmd, struct disconnect_command); dc = tal(cmd, struct disconnect_command);

View File

@@ -387,7 +387,9 @@ void json_add_short_channel_id(struct json_stream *response UNNEEDED,
const struct short_channel_id *id UNNEEDED) const struct short_channel_id *id UNNEEDED)
{ fprintf(stderr, "json_add_short_channel_id called!\n"); abort(); } { fprintf(stderr, "json_add_short_channel_id called!\n"); abort(); }
/* Generated stub for json_add_string */ /* Generated stub for json_add_string */
void json_add_string(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED, const char *value TAKES UNNEEDED) void json_add_string(struct json_stream *js UNNEEDED,
const char *fieldname UNNEEDED,
const char *str TAKES UNNEEDED)
{ fprintf(stderr, "json_add_string called!\n"); abort(); } { fprintf(stderr, "json_add_string called!\n"); abort(); }
/* Generated stub for json_add_stringn */ /* Generated stub for json_add_stringn */
void json_add_stringn(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED, void json_add_stringn(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED,
@@ -509,9 +511,6 @@ void log_(struct log *log UNNEEDED, enum log_level level UNNEEDED,
const char *fmt UNNEEDED, ...) const char *fmt UNNEEDED, ...)
{ fprintf(stderr, "log_ called!\n"); abort(); } { fprintf(stderr, "log_ called!\n"); abort(); }
/* Generated stub for maybe_disconnect_peer */
void maybe_disconnect_peer(struct lightningd *ld UNNEEDED, struct peer *peer UNNEEDED)
{ fprintf(stderr, "maybe_disconnect_peer called!\n"); abort(); }
/* Generated stub for merkle_tlv */ /* Generated stub for merkle_tlv */
void merkle_tlv(const struct tlv_field *fields UNNEEDED, struct sha256 *merkle UNNEEDED) void merkle_tlv(const struct tlv_field *fields UNNEEDED, struct sha256 *merkle UNNEEDED)
{ fprintf(stderr, "merkle_tlv called!\n"); abort(); } { fprintf(stderr, "merkle_tlv called!\n"); abort(); }
@@ -716,6 +715,9 @@ u8 *towire_channeld_dev_memleak(const tal_t *ctx UNNEEDED)
/* Generated stub for towire_channeld_dev_reenable_commit */ /* Generated stub for towire_channeld_dev_reenable_commit */
u8 *towire_channeld_dev_reenable_commit(const tal_t *ctx UNNEEDED) u8 *towire_channeld_dev_reenable_commit(const tal_t *ctx UNNEEDED)
{ fprintf(stderr, "towire_channeld_dev_reenable_commit called!\n"); abort(); } { fprintf(stderr, "towire_channeld_dev_reenable_commit called!\n"); abort(); }
/* Generated stub for towire_connectd_discard_peer */
u8 *towire_connectd_discard_peer(const tal_t *ctx UNNEEDED, const struct node_id *id UNNEEDED, u64 counter UNNEEDED)
{ fprintf(stderr, "towire_connectd_discard_peer called!\n"); abort(); }
/* Generated stub for towire_connectd_peer_connect_subd */ /* Generated stub for towire_connectd_peer_connect_subd */
u8 *towire_connectd_peer_connect_subd(const tal_t *ctx UNNEEDED, const struct node_id *id UNNEEDED, u64 counter UNNEEDED, const struct channel_id *channel_id UNNEEDED) u8 *towire_connectd_peer_connect_subd(const tal_t *ctx UNNEEDED, const struct node_id *id UNNEEDED, u64 counter UNNEEDED, const struct channel_id *channel_id UNNEEDED)
{ fprintf(stderr, "towire_connectd_peer_connect_subd called!\n"); abort(); } { fprintf(stderr, "towire_connectd_peer_connect_subd called!\n"); abort(); }

View File

@@ -47,7 +47,9 @@ void json_add_str_fmt(struct json_stream *js UNNEEDED,
const char *fmt UNNEEDED, ...) const char *fmt UNNEEDED, ...)
{ fprintf(stderr, "json_add_str_fmt called!\n"); abort(); } { fprintf(stderr, "json_add_str_fmt called!\n"); abort(); }
/* Generated stub for json_add_string */ /* Generated stub for json_add_string */
void json_add_string(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED, const char *value TAKES UNNEEDED) void json_add_string(struct json_stream *js UNNEEDED,
const char *fieldname UNNEEDED,
const char *str TAKES UNNEEDED)
{ fprintf(stderr, "json_add_string called!\n"); abort(); } { fprintf(stderr, "json_add_string called!\n"); abort(); }
/* Generated stub for json_add_time */ /* Generated stub for json_add_time */
void json_add_time(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED, void json_add_time(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED,

View File

@@ -75,7 +75,9 @@ void json_add_short_channel_id(struct json_stream *response UNNEEDED,
const struct short_channel_id *id UNNEEDED) const struct short_channel_id *id UNNEEDED)
{ fprintf(stderr, "json_add_short_channel_id called!\n"); abort(); } { fprintf(stderr, "json_add_short_channel_id called!\n"); abort(); }
/* Generated stub for json_add_string */ /* Generated stub for json_add_string */
void json_add_string(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED, const char *value TAKES UNNEEDED) void json_add_string(struct json_stream *js UNNEEDED,
const char *fieldname UNNEEDED,
const char *str TAKES UNNEEDED)
{ fprintf(stderr, "json_add_string called!\n"); abort(); } { fprintf(stderr, "json_add_string called!\n"); abort(); }
/* Generated stub for json_add_timeabs */ /* Generated stub for json_add_timeabs */
void json_add_timeabs(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED, void json_add_timeabs(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED,
@@ -155,12 +157,10 @@ bool json_to_u16(const char *buffer UNNEEDED, const jsmntok_t *tok UNNEEDED,
uint16_t *num UNNEEDED) uint16_t *num UNNEEDED)
{ fprintf(stderr, "json_to_u16 called!\n"); abort(); } { fprintf(stderr, "json_to_u16 called!\n"); abort(); }
/* Generated stub for json_to_u32 */ /* Generated stub for json_to_u32 */
bool json_to_u32(const char *buffer UNNEEDED, const jsmntok_t *tok UNNEEDED, bool json_to_u32(const char *buffer UNNEEDED, const jsmntok_t *tok UNNEEDED, u32 *num UNNEEDED)
uint32_t *num UNNEEDED)
{ fprintf(stderr, "json_to_u32 called!\n"); abort(); } { fprintf(stderr, "json_to_u32 called!\n"); abort(); }
/* Generated stub for json_to_u64 */ /* Generated stub for json_to_u64 */
bool json_to_u64(const char *buffer UNNEEDED, const jsmntok_t *tok UNNEEDED, bool json_to_u64(const char *buffer UNNEEDED, const jsmntok_t *tok UNNEEDED, u64 *num UNNEEDED)
uint64_t *num UNNEEDED)
{ fprintf(stderr, "json_to_u64 called!\n"); abort(); } { fprintf(stderr, "json_to_u64 called!\n"); abort(); }
/* Generated stub for json_tok_bin_from_hex */ /* Generated stub for json_tok_bin_from_hex */
u8 *json_tok_bin_from_hex(const tal_t *ctx UNNEEDED, const char *buffer UNNEEDED, const jsmntok_t *tok UNNEEDED) u8 *json_tok_bin_from_hex(const tal_t *ctx UNNEEDED, const char *buffer UNNEEDED, const jsmntok_t *tok UNNEEDED)

View File

@@ -550,7 +550,7 @@ def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
bitcoind.generate_block(100) bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2]) sync_blockheight(bitcoind, [l1, l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0) wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'] == [])
# Do one last pass over the logs to extract the reactions l2 sent # Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle l2.daemon.logsearch_start = needle
@@ -679,7 +679,7 @@ def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
bitcoind.generate_block(100) bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2]) sync_blockheight(bitcoind, [l1, l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0) wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'] == [])
# Do one last pass over the logs to extract the reactions l2 sent # Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle l2.daemon.logsearch_start = needle
@@ -3447,10 +3447,8 @@ def test_you_forgot_closed_channel(node_factory, executor):
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE') wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE' assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l2 closes on us. # l1 won't send anything else until we reconnect, then it should succeed.
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False) l1.rpc.disconnect(l2.info['id'], force=True)
# l1 reconnects, it should succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT) fut.result(TIMEOUT)
@@ -3486,8 +3484,7 @@ def test_you_forgot_closed_channel_onchain(node_factory, bitcoind, executor):
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'ONCHAIN') wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'ONCHAIN')
# l1 reconnects, it should succeed. # l1 reconnects, it should succeed.
# l1 will disconnect once it sees block l1.rpc.disconnect(l2.info['id'], force=True)
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT) fut.result(TIMEOUT)

View File

@@ -956,8 +956,10 @@ def test_shutdown_awaiting_lockin(node_factory, bitcoind):
l2.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN')
bitcoind.generate_block(100) bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == []) # Won't disconnect!
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'] == [])
@pytest.mark.openchannel('v1') @pytest.mark.openchannel('v1')
@@ -1308,12 +1310,7 @@ def test_funding_external_wallet_corners(node_factory, bitcoind):
assert l1.rpc.fundchannel_cancel(l2.info['id'])['cancelled'] assert l1.rpc.fundchannel_cancel(l2.info['id'])['cancelled']
assert len(l1.rpc.listpeers()['peers']) == 0 assert len(l1.rpc.listpeers()['peers']) == 0
# l2 still has the channel open/waiting
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state']
== 'CHANNELD_AWAITING_LOCKIN')
# on reconnect, channel should get destroyed # on reconnect, channel should get destroyed
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.daemon.wait_for_log('Unknown channel .* for WIRE_CHANNEL_REESTABLISH') l1.daemon.wait_for_log('Unknown channel .* for WIRE_CHANNEL_REESTABLISH')
wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 0) wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 0)
@@ -2535,13 +2532,10 @@ def test_multiple_channels(node_factory):
l1 = node_factory.get_node() l1 = node_factory.get_node()
l2 = node_factory.get_node() l2 = node_factory.get_node()
for i in range(3): ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# FIXME: we shouldn't disconnect on close? assert ret['id'] == l2.info['id']
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert ret['id'] == l2.info['id']
l1.daemon.wait_for_log('Handed peer, entering loop') for i in range(3):
l2.daemon.wait_for_log('Handed peer, entering loop')
chan, _ = l1.fundchannel(l2, 10**6) chan, _ = l1.fundchannel(l2, 10**6)
l1.rpc.close(chan) l1.rpc.close(chan)
@@ -2551,7 +2545,6 @@ def test_multiple_channels(node_factory):
l2.daemon.wait_for_log( l2.daemon.wait_for_log(
r'State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE' r'State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE'
) )
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'] is False)
channels = only_one(l1.rpc.listpeers()['peers'])['channels'] channels = only_one(l1.rpc.listpeers()['peers'])['channels']
assert len(channels) == 3 assert len(channels) == 3
@@ -2581,7 +2574,7 @@ def test_forget_channel(node_factory):
# Forcing should work # Forcing should work
l1.rpc.dev_forget_channel(l2.info['id'], True) l1.rpc.dev_forget_channel(l2.info['id'], True)
wait_for(lambda: l1.rpc.listpeers()['peers'] == []) wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'] == [])
# And restarting should keep that peer forgotten # And restarting should keep that peer forgotten
l1.restart() l1.restart()
@@ -2637,13 +2630,12 @@ def test_peerinfo(node_factory, bitcoind):
# Close the channel to forget the peer # Close the channel to forget the peer
l1.rpc.close(chan) l1.rpc.close(chan)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Make sure close tx hits mempool before we mine blocks. # Make sure close tx hits mempool before we mine blocks.
bitcoind.generate_block(100, wait_for_mempool=1) bitcoind.generate_block(100, wait_for_mempool=1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer') l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer') l2.daemon.wait_for_log('onchaind complete, forgetting peer')
assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'] == []
assert only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels'] == []
# The only channel was closed, everybody should have forgotten the nodes # The only channel was closed, everybody should have forgotten the nodes
assert l1.rpc.listnodes()['nodes'] == [] assert l1.rpc.listnodes()['nodes'] == []
@@ -2728,8 +2720,8 @@ def test_fundee_forget_funding_tx_unconfirmed(node_factory, bitcoind):
# (Note that we let the last number be anything (hence the {}\d) # (Note that we let the last number be anything (hence the {}\d)
l2.daemon.wait_for_log(r'Forgetting channel: It has been {}\d blocks'.format(str(blocks)[:-1])) l2.daemon.wait_for_log(r'Forgetting channel: It has been {}\d blocks'.format(str(blocks)[:-1]))
# fundee will also forget and disconnect from peer. # fundee will also forget, but not disconnect from peer.
wait_for(lambda: l2.rpc.listpeers(l1.info['id'])['peers'] == []) wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels'] == [])
@pytest.mark.developer("needs --dev-max-funding-unconfirmed-blocks") @pytest.mark.developer("needs --dev-max-funding-unconfirmed-blocks")

View File

@@ -117,8 +117,8 @@ def test_max_channel_id(node_factory, bitcoind):
l2.wait_for_channel_onchain(l1.info['id']) l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(101) bitcoind.generate_block(101)
wait_for(lambda: l1.rpc.listpeers()['peers'] == []) wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == []) wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'] == [])
# Stop l2, and restart # Stop l2, and restart
l2.stop() l2.stop()

View File

@@ -740,10 +740,11 @@ def test_openchannel_hook_chaining(node_factory, bitcoind):
# the third plugin must now not be called anymore # the third plugin must now not be called anymore
assert not l2.daemon.is_in_log("reject on principle") assert not l2.daemon.is_in_log("reject on principle")
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# 100000sat is good for hook_accepter, so it should fail 'on principle' # 100000sat is good for hook_accepter, so it should fail 'on principle'
# at third hook openchannel_reject.py # at third hook openchannel_reject.py
with pytest.raises(RpcError, match=r'reject on principle'): with pytest.raises(RpcError, match=r'reject on principle'):
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 100000) l1.rpc.fundchannel(l2.info['id'], 100000)
assert l2.daemon.wait_for_log(hook_msg + "reject on principle") assert l2.daemon.wait_for_log(hook_msg + "reject on principle")

View File

@@ -361,7 +361,9 @@ void json_add_short_channel_id(struct json_stream *response UNNEEDED,
const struct short_channel_id *id UNNEEDED) const struct short_channel_id *id UNNEEDED)
{ fprintf(stderr, "json_add_short_channel_id called!\n"); abort(); } { fprintf(stderr, "json_add_short_channel_id called!\n"); abort(); }
/* Generated stub for json_add_string */ /* Generated stub for json_add_string */
void json_add_string(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED, const char *value TAKES UNNEEDED) void json_add_string(struct json_stream *js UNNEEDED,
const char *fieldname UNNEEDED,
const char *str TAKES UNNEEDED)
{ fprintf(stderr, "json_add_string called!\n"); abort(); } { fprintf(stderr, "json_add_string called!\n"); abort(); }
/* Generated stub for json_add_timeabs */ /* Generated stub for json_add_timeabs */
void json_add_timeabs(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED, void json_add_timeabs(struct json_stream *result UNNEEDED, const char *fieldname UNNEEDED,
@@ -454,9 +456,6 @@ bool json_tok_streq(const char *buffer UNNEEDED, const jsmntok_t *tok UNNEEDED,
void kill_uncommitted_channel(struct uncommitted_channel *uc UNNEEDED, void kill_uncommitted_channel(struct uncommitted_channel *uc UNNEEDED,
const char *why UNNEEDED) const char *why UNNEEDED)
{ fprintf(stderr, "kill_uncommitted_channel called!\n"); abort(); } { fprintf(stderr, "kill_uncommitted_channel called!\n"); abort(); }
/* Generated stub for maybe_disconnect_peer */
void maybe_disconnect_peer(struct lightningd *ld UNNEEDED, struct peer *peer UNNEEDED)
{ fprintf(stderr, "maybe_disconnect_peer called!\n"); abort(); }
/* Generated stub for new_channel_mvt_invoice_hin */ /* Generated stub for new_channel_mvt_invoice_hin */
struct channel_coin_mvt *new_channel_mvt_invoice_hin(const tal_t *ctx UNNEEDED, struct channel_coin_mvt *new_channel_mvt_invoice_hin(const tal_t *ctx UNNEEDED,
struct htlc_in *hin UNNEEDED, struct htlc_in *hin UNNEEDED,
@@ -743,6 +742,9 @@ u8 *towire_channeld_offer_htlc(const tal_t *ctx UNNEEDED, struct amount_msat amo
/* Generated stub for towire_channeld_sending_commitsig_reply */ /* Generated stub for towire_channeld_sending_commitsig_reply */
u8 *towire_channeld_sending_commitsig_reply(const tal_t *ctx UNNEEDED) u8 *towire_channeld_sending_commitsig_reply(const tal_t *ctx UNNEEDED)
{ fprintf(stderr, "towire_channeld_sending_commitsig_reply called!\n"); abort(); } { fprintf(stderr, "towire_channeld_sending_commitsig_reply called!\n"); abort(); }
/* Generated stub for towire_connectd_discard_peer */
u8 *towire_connectd_discard_peer(const tal_t *ctx UNNEEDED, const struct node_id *id UNNEEDED, u64 counter UNNEEDED)
{ fprintf(stderr, "towire_connectd_discard_peer called!\n"); abort(); }
/* Generated stub for towire_connectd_peer_connect_subd */ /* Generated stub for towire_connectd_peer_connect_subd */
u8 *towire_connectd_peer_connect_subd(const tal_t *ctx UNNEEDED, const struct node_id *id UNNEEDED, u64 counter UNNEEDED, const struct channel_id *channel_id UNNEEDED) u8 *towire_connectd_peer_connect_subd(const tal_t *ctx UNNEEDED, const struct node_id *id UNNEEDED, u64 counter UNNEEDED, const struct channel_id *channel_id UNNEEDED)
{ fprintf(stderr, "towire_connectd_peer_connect_subd called!\n"); abort(); } { fprintf(stderr, "towire_connectd_peer_connect_subd called!\n"); abort(); }