mirror of
https://github.com/aljazceru/lightning.git
synced 2025-12-19 15:14:23 +01:00
gossip: Disable local channels after loading the gossip_store
We don't have any connection yet, so how could they be active? Disable both sides to avoid trying to route through them or telling others to use them as `contact_points` in invoices. Signed-off-by: Christian Decker <decker.christian@gmail.com>
This commit is contained in:
committed by
Rusty Russell
parent
f2dc406172
commit
c17848a3f3
@@ -1900,6 +1900,26 @@ static void gossip_disable_channel(struct routing_state *rstate, struct chan *ch
|
||||
tal_hex(tmpctx, err));
|
||||
}
|
||||
|
||||
static void gossip_disable_local_channels(struct daemon *daemon)
|
||||
{
|
||||
struct node *local_node =
|
||||
get_node(daemon->rstate, &daemon->rstate->local_id);
|
||||
struct chan *c;
|
||||
size_t i;
|
||||
|
||||
/* We don't have a local_node, so we don't have any channels yet
|
||||
* either */
|
||||
if (!local_node)
|
||||
return;
|
||||
|
||||
for (i=0; i<tal_count(local_node->chans); i++) {
|
||||
c = local_node->chans[i];
|
||||
c->half[0].flags |= ROUTING_FLAGS_DISABLED;
|
||||
c->half[1].flags |= ROUTING_FLAGS_DISABLED;
|
||||
gossip_disable_channel(daemon->rstate, c);
|
||||
}
|
||||
}
|
||||
|
||||
/* Parse an incoming gossip init message and assign config variables
|
||||
* to the daemon.
|
||||
*/
|
||||
@@ -1939,6 +1959,10 @@ static struct io_plan *gossip_init(struct daemon_conn *master,
|
||||
/* Load stored gossip messages */
|
||||
gossip_store_load(daemon->rstate, daemon->rstate->store);
|
||||
|
||||
/* Now disable all local channels, they can't be connected yet. */
|
||||
gossip_disable_local_channels(daemon);
|
||||
|
||||
|
||||
new_reltimer(&daemon->timers, daemon,
|
||||
time_from_sec(daemon->rstate->prune_timeout/4),
|
||||
gossip_refresh_network, daemon);
|
||||
|
||||
@@ -58,3 +58,39 @@ def test_gossip_pruning(node_factory, bitcoind):
|
||||
assert scid2 not in [c['short_channel_id'] for c in l2.rpc.listchannels()['channels']]
|
||||
assert l3.info['id'] not in [n['nodeid'] for n in l1.rpc.listnodes()['nodes']]
|
||||
assert l3.info['id'] not in [n['nodeid'] for n in l2.rpc.listnodes()['nodes']]
|
||||
|
||||
|
||||
@unittest.skipIf(not DEVELOPER, "needs --dev-broadcast-interval, --dev-no-reconnect")
|
||||
def test_gossip_disable_channels(node_factory, bitcoind):
|
||||
"""Simple test to check that channels get disabled correctly on disconnect and
|
||||
reenabled upon reconnecting
|
||||
|
||||
"""
|
||||
opts = {'dev-no-reconnect': None, 'may_reconnect': True}
|
||||
l1, l2 = node_factory.get_nodes(2, opts=opts)
|
||||
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
scid = l1.fund_channel(l2, 10**6)
|
||||
bitcoind.rpc.generate(5)
|
||||
|
||||
def count_active(node):
|
||||
chans = node.rpc.listchannels()['channels']
|
||||
active = [c for c in chans if c['active']]
|
||||
return len(active)
|
||||
|
||||
l1.wait_channel_active(scid)
|
||||
l2.wait_channel_active(scid)
|
||||
|
||||
assert(count_active(l1) == 2)
|
||||
assert(count_active(l2) == 2)
|
||||
|
||||
l2.restart()
|
||||
|
||||
wait_for(lambda: count_active(l1) == 0)
|
||||
assert(count_active(l2) == 0)
|
||||
|
||||
# Now reconnect, they should re-enable the channels
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
|
||||
wait_for(lambda: count_active(l1) == 2)
|
||||
wait_for(lambda: count_active(l2) == 2)
|
||||
|
||||
@@ -2505,10 +2505,10 @@ class LightningDTests(BaseLightningDTests):
|
||||
too.
|
||||
"""
|
||||
opts = {'dev-no-reconnect': None}
|
||||
l1 = self.node_factory.get_node(options=opts)
|
||||
l2 = self.node_factory.get_node(options=opts)
|
||||
l3 = self.node_factory.get_node(options=opts)
|
||||
l4 = self.node_factory.get_node(options=opts)
|
||||
l1 = self.node_factory.get_node(options=opts, may_reconnect=True)
|
||||
l2 = self.node_factory.get_node(options=opts, may_reconnect=True)
|
||||
l3 = self.node_factory.get_node(options=opts, may_reconnect=True)
|
||||
l4 = self.node_factory.get_node(options=opts, may_reconnect=True)
|
||||
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
||||
@@ -2532,8 +2532,14 @@ class LightningDTests(BaseLightningDTests):
|
||||
wait_for(lambda: count_active(l2) == 4)
|
||||
wait_for(lambda: count_active(l3) == 6) # 4 public + 2 local
|
||||
|
||||
# l1 restarts and doesn't connect, but loads from persisted store
|
||||
# l1 restarts and doesn't connect, but loads from persisted store, all
|
||||
# local channels should be disabled, leaving only the two l2 <-> l3
|
||||
# directions
|
||||
l1.restart()
|
||||
wait_for(lambda: count_active(l1) == 2)
|
||||
|
||||
# Now reconnect, they should re-enable the two l1 <-> l2 directions
|
||||
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
|
||||
wait_for(lambda: count_active(l1) == 4)
|
||||
|
||||
# Now spend the funding tx, generate a block and see others deleting the
|
||||
@@ -2563,6 +2569,8 @@ class LightningDTests(BaseLightningDTests):
|
||||
# Finally, it should also remember the deletion after a restart
|
||||
l3.restart()
|
||||
l4.restart()
|
||||
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
|
||||
l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
|
||||
wait_for(lambda: count_active(l3) == 4) # 2 public + 2 local
|
||||
|
||||
# Both l3 and l4 should remember their local-only channel
|
||||
|
||||
Reference in New Issue
Block a user