mirror of
https://github.com/aljazceru/lightning.git
synced 2025-12-22 16:44:20 +01:00
channel: Queue a channel_update to the peer upon funding_locked
Signed-off-by: Christian Decker <decker.christian@gmail.com>
This commit is contained in:
committed by
Rusty Russell
parent
9cfd09dc4a
commit
7aa13cc949
@@ -298,11 +298,54 @@ static void enqueue_peer_msg(struct peer *peer, const u8 *msg TAKES)
|
|||||||
msg_enqueue(&peer->peer_out, msg);
|
msg_enqueue(&peer->peer_out, msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Send a temporary `channel_announcement` and `channel_update`. These
|
static u8 *create_channel_update(const tal_t *ctx,
|
||||||
* are unsigned and mainly used to tell gossip about the channel
|
struct peer *peer, bool disabled)
|
||||||
* before we have reached the `announcement_depth`, not being signed
|
{
|
||||||
* means they will not be relayed, but we can already rely on them for
|
u32 timestamp = time_now().ts.tv_sec;
|
||||||
* our own outgoing payments */
|
u16 flags;
|
||||||
|
u8 *cupdate, *msg;
|
||||||
|
|
||||||
|
/* Identical timestamps will be ignored. */
|
||||||
|
if (timestamp <= peer->last_update_timestamp)
|
||||||
|
timestamp = peer->last_update_timestamp + 1;
|
||||||
|
peer->last_update_timestamp = timestamp;
|
||||||
|
|
||||||
|
/* Set the signature to empty so that valgrind doesn't complain */
|
||||||
|
secp256k1_ecdsa_signature *sig =
|
||||||
|
talz(tmpctx, secp256k1_ecdsa_signature);
|
||||||
|
|
||||||
|
flags = peer->channel_direction | (disabled << 1);
|
||||||
|
cupdate = towire_channel_update(
|
||||||
|
tmpctx, sig, &peer->chain_hash,
|
||||||
|
&peer->short_channel_ids[LOCAL], timestamp, flags,
|
||||||
|
peer->cltv_delta, peer->conf[REMOTE].htlc_minimum_msat,
|
||||||
|
peer->fee_base, peer->fee_per_satoshi);
|
||||||
|
|
||||||
|
msg = towire_hsm_cupdate_sig_req(tmpctx, cupdate);
|
||||||
|
|
||||||
|
if (!wire_sync_write(HSM_FD, msg))
|
||||||
|
status_failed(STATUS_FAIL_HSM_IO,
|
||||||
|
"Writing cupdate_sig_req: %s",
|
||||||
|
strerror(errno));
|
||||||
|
|
||||||
|
msg = wire_sync_read(tmpctx, HSM_FD);
|
||||||
|
if (!msg || !fromwire_hsm_cupdate_sig_reply(ctx, msg, &cupdate))
|
||||||
|
status_failed(STATUS_FAIL_HSM_IO,
|
||||||
|
"Reading cupdate_sig_req: %s",
|
||||||
|
strerror(errno));
|
||||||
|
return cupdate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a channel locally and send a channel update to the peer
|
||||||
|
*
|
||||||
|
* Send a local_add_channel message to gossipd in order to make the channel
|
||||||
|
* usable locally, and also tell our peer about our parameters via a
|
||||||
|
* channel_update message. The peer may accept the update and use the contained
|
||||||
|
* information to route incoming payments through the channel. The
|
||||||
|
* channel_update is not preceeded by a channel_announcement and won't make much
|
||||||
|
* sense to other nodes, so we don't tell gossipd about it.
|
||||||
|
*/
|
||||||
static void send_temporary_announcement(struct peer *peer)
|
static void send_temporary_announcement(struct peer *peer)
|
||||||
{
|
{
|
||||||
u8 *msg;
|
u8 *msg;
|
||||||
@@ -319,6 +362,11 @@ static void send_temporary_announcement(struct peer *peer)
|
|||||||
peer->conf[REMOTE].htlc_minimum_msat, peer->fee_base,
|
peer->conf[REMOTE].htlc_minimum_msat, peer->fee_base,
|
||||||
peer->fee_per_satoshi);
|
peer->fee_per_satoshi);
|
||||||
wire_sync_write(GOSSIP_FD, take(msg));
|
wire_sync_write(GOSSIP_FD, take(msg));
|
||||||
|
|
||||||
|
/* Tell the other side what parameters we expect should they route
|
||||||
|
* through us */
|
||||||
|
msg = create_channel_update(NULL, peer, false);
|
||||||
|
enqueue_peer_msg(peer, take(msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void send_announcement_signatures(struct peer *peer)
|
static void send_announcement_signatures(struct peer *peer)
|
||||||
@@ -383,44 +431,6 @@ static void send_announcement_signatures(struct peer *peer)
|
|||||||
enqueue_peer_msg(peer, take(msg));
|
enqueue_peer_msg(peer, take(msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
static u8 *create_channel_update(const tal_t *ctx,
|
|
||||||
struct peer *peer, bool disabled)
|
|
||||||
{
|
|
||||||
u32 timestamp = time_now().ts.tv_sec;
|
|
||||||
u16 flags;
|
|
||||||
u8 *cupdate, *msg;
|
|
||||||
|
|
||||||
/* Identical timestamps will be ignored. */
|
|
||||||
if (timestamp <= peer->last_update_timestamp)
|
|
||||||
timestamp = peer->last_update_timestamp + 1;
|
|
||||||
peer->last_update_timestamp = timestamp;
|
|
||||||
|
|
||||||
/* Set the signature to empty so that valgrind doesn't complain */
|
|
||||||
secp256k1_ecdsa_signature *sig =
|
|
||||||
talz(tmpctx, secp256k1_ecdsa_signature);
|
|
||||||
|
|
||||||
flags = peer->channel_direction | (disabled << 1);
|
|
||||||
cupdate = towire_channel_update(
|
|
||||||
tmpctx, sig, &peer->chain_hash,
|
|
||||||
&peer->short_channel_ids[LOCAL], timestamp, flags,
|
|
||||||
peer->cltv_delta, peer->conf[REMOTE].htlc_minimum_msat,
|
|
||||||
peer->fee_base, peer->fee_per_satoshi);
|
|
||||||
|
|
||||||
msg = towire_hsm_cupdate_sig_req(tmpctx, cupdate);
|
|
||||||
|
|
||||||
if (!wire_sync_write(HSM_FD, msg))
|
|
||||||
status_failed(STATUS_FAIL_HSM_IO,
|
|
||||||
"Writing cupdate_sig_req: %s",
|
|
||||||
strerror(errno));
|
|
||||||
|
|
||||||
msg = wire_sync_read(tmpctx, HSM_FD);
|
|
||||||
if (!msg || !fromwire_hsm_cupdate_sig_reply(ctx, msg, &cupdate))
|
|
||||||
status_failed(STATUS_FAIL_HSM_IO,
|
|
||||||
"Reading cupdate_sig_req: %s",
|
|
||||||
strerror(errno));
|
|
||||||
return cupdate;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Tentatively create a channel_announcement, possibly with invalid
|
/* Tentatively create a channel_announcement, possibly with invalid
|
||||||
* signatures. The signatures need to be collected first, by asking
|
* signatures. The signatures need to be collected first, by asking
|
||||||
* the HSM and by exchanging announcement_signature messages. */
|
* the HSM and by exchanging announcement_signature messages. */
|
||||||
|
|||||||
@@ -1135,11 +1135,11 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
|
|
||||||
def test_pay(self):
|
def test_pay(self):
|
||||||
l1, l2 = self.connect()
|
l1, l2 = self.connect()
|
||||||
|
|
||||||
chanid = self.fund_channel(l1, l2, 10**6)
|
chanid = self.fund_channel(l1, l2, 10**6)
|
||||||
|
|
||||||
# Wait for route propagation.
|
# Wait for route propagation.
|
||||||
self.wait_for_routes(l1, [chanid])
|
self.wait_for_routes(l1, [chanid])
|
||||||
|
sync_blockheight([l1, l2])
|
||||||
|
|
||||||
inv = l2.rpc.invoice(123000, 'test_pay', 'description')['bolt11']
|
inv = l2.rpc.invoice(123000, 'test_pay', 'description')['bolt11']
|
||||||
before = int(time.time())
|
before = int(time.time())
|
||||||
@@ -1311,7 +1311,9 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
wait_for(lambda: len(l1.getactivechannels()) == 2)
|
wait_for(lambda: len(l1.getactivechannels()) == 2)
|
||||||
wait_for(lambda: len(l2.getactivechannels()) == 2)
|
wait_for(lambda: len(l2.getactivechannels()) == 2)
|
||||||
billboard = l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'][0]['status']
|
billboard = l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'][0]['status']
|
||||||
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked. Channel announced.']
|
# This may either be from a local_update or an announce, so just
|
||||||
|
# check for the substring
|
||||||
|
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
|
||||||
|
|
||||||
# This should return with an error, then close.
|
# This should return with an error, then close.
|
||||||
self.assertRaisesRegex(ValueError,
|
self.assertRaisesRegex(ValueError,
|
||||||
@@ -1918,8 +1920,8 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
# Now, this will get stuck due to l1 commit being disabled..
|
# Now, this will get stuck due to l1 commit being disabled..
|
||||||
t = self.pay(l1, l2, 100000000, async=True)
|
t = self.pay(l1, l2, 100000000, async=True)
|
||||||
|
|
||||||
assert len(l1.getactivechannels()) == 1
|
assert len(l1.getactivechannels()) == 2
|
||||||
assert len(l2.getactivechannels()) == 1
|
assert len(l2.getactivechannels()) == 2
|
||||||
|
|
||||||
# They should both have commitments blocked now.
|
# They should both have commitments blocked now.
|
||||||
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
|
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
|
||||||
@@ -2346,6 +2348,7 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
bitcoind.generate_block(1)
|
bitcoind.generate_block(1)
|
||||||
wait_forget_channels(l2)
|
wait_forget_channels(l2)
|
||||||
|
|
||||||
|
@unittest.skipIf(not DEVELOPER, "DEVELOPER=1 needed to speed up gossip propagation, would be too long otherwise")
|
||||||
def test_gossip_jsonrpc(self):
|
def test_gossip_jsonrpc(self):
|
||||||
l1, l2 = self.connect()
|
l1, l2 = self.connect()
|
||||||
self.fund_channel(l1, l2, 10**6)
|
self.fund_channel(l1, l2, 10**6)
|
||||||
@@ -2354,21 +2357,22 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
assert not l1.daemon.is_in_log('peer_out WIRE_ANNOUNCEMENT_SIGNATURES')
|
assert not l1.daemon.is_in_log('peer_out WIRE_ANNOUNCEMENT_SIGNATURES')
|
||||||
|
|
||||||
# Channels should be activated locally
|
# Channels should be activated locally
|
||||||
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True])
|
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
|
||||||
|
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2)
|
||||||
|
|
||||||
# Make sure we can route through the channel, will raise on failure
|
# Make sure we can route through the channel, will raise on failure
|
||||||
l1.rpc.getroute(l2.info['id'], 100, 1)
|
l1.rpc.getroute(l2.info['id'], 100, 1)
|
||||||
|
|
||||||
# Outgoing should be active, but not public.
|
# Outgoing should be active, but not public.
|
||||||
channels = l1.rpc.listchannels()['channels']
|
channels1 = l1.rpc.listchannels()['channels']
|
||||||
assert len(channels) == 1
|
channels2 = l2.rpc.listchannels()['channels']
|
||||||
assert channels[0]['active'] is True
|
|
||||||
assert channels[0]['public'] is False
|
|
||||||
|
|
||||||
channels = l2.rpc.listchannels()['channels']
|
assert [c['active'] for c in channels1] == [True, True]
|
||||||
assert len(channels) == 1
|
assert [c['active'] for c in channels2] == [True, True]
|
||||||
assert channels[0]['active'] is True
|
# The incoming direction will be considered public, hence check for out
|
||||||
assert channels[0]['public'] is False
|
# outgoing only
|
||||||
|
assert len([c for c in channels1 if not c['public']]) == 2
|
||||||
|
assert len([c for c in channels2 if not c['public']]) == 2
|
||||||
|
|
||||||
# Now proceed to funding-depth and do a full gossip round
|
# Now proceed to funding-depth and do a full gossip round
|
||||||
l1.bitcoin.generate_block(5)
|
l1.bitcoin.generate_block(5)
|
||||||
@@ -2516,13 +2520,12 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
def count_active(node):
|
def count_active(node):
|
||||||
chans = node.rpc.listchannels()['channels']
|
chans = node.rpc.listchannels()['channels']
|
||||||
active = [c for c in chans if c['active']]
|
active = [c for c in chans if c['active']]
|
||||||
print(len(active), active)
|
|
||||||
return len(active)
|
return len(active)
|
||||||
|
|
||||||
# Channels should be activated
|
# Channels should be activated
|
||||||
wait_for(lambda: count_active(l1) == 4)
|
wait_for(lambda: count_active(l1) == 4)
|
||||||
wait_for(lambda: count_active(l2) == 4)
|
wait_for(lambda: count_active(l2) == 4)
|
||||||
wait_for(lambda: count_active(l3) == 5) # 4 public + 1 local
|
wait_for(lambda: count_active(l3) == 6) # 4 public + 2 local
|
||||||
|
|
||||||
# l1 restarts and doesn't connect, but loads from persisted store
|
# l1 restarts and doesn't connect, but loads from persisted store
|
||||||
l1.restart()
|
l1.restart()
|
||||||
@@ -2538,7 +2541,7 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
|
|
||||||
wait_for(lambda: count_active(l1) == 2)
|
wait_for(lambda: count_active(l1) == 2)
|
||||||
wait_for(lambda: count_active(l2) == 2)
|
wait_for(lambda: count_active(l2) == 2)
|
||||||
wait_for(lambda: count_active(l3) == 3) # 2 public + 1 local
|
wait_for(lambda: count_active(l3) == 4) # 2 public + 2 local
|
||||||
|
|
||||||
# We should have one local-only channel
|
# We should have one local-only channel
|
||||||
def count_non_public(node):
|
def count_non_public(node):
|
||||||
@@ -2549,17 +2552,17 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
# The channel l3 -> l4 should be known only to them
|
# The channel l3 -> l4 should be known only to them
|
||||||
assert count_non_public(l1) == 0
|
assert count_non_public(l1) == 0
|
||||||
assert count_non_public(l2) == 0
|
assert count_non_public(l2) == 0
|
||||||
wait_for(lambda: count_non_public(l3) == 1)
|
wait_for(lambda: count_non_public(l3) == 2)
|
||||||
wait_for(lambda: count_non_public(l4) == 1)
|
wait_for(lambda: count_non_public(l4) == 2)
|
||||||
|
|
||||||
# Finally, it should also remember the deletion after a restart
|
# Finally, it should also remember the deletion after a restart
|
||||||
l3.restart()
|
l3.restart()
|
||||||
l4.restart()
|
l4.restart()
|
||||||
wait_for(lambda: count_active(l3) == 3) # 2 public + 1 local
|
wait_for(lambda: count_active(l3) == 4) # 2 public + 2 local
|
||||||
|
|
||||||
# Both l3 and l4 should remember their local-only channel
|
# Both l3 and l4 should remember their local-only channel
|
||||||
wait_for(lambda: count_non_public(l3) == 1)
|
wait_for(lambda: count_non_public(l3) == 2)
|
||||||
wait_for(lambda: count_non_public(l4) == 1)
|
wait_for(lambda: count_non_public(l4) == 2)
|
||||||
|
|
||||||
def ping_tests(self, l1, l2):
|
def ping_tests(self, l1, l2):
|
||||||
# 0-byte pong gives just type + length field.
|
# 0-byte pong gives just type + length field.
|
||||||
@@ -3032,6 +3035,7 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
chanid = self.fund_channel(l1, l2, 10**6)
|
chanid = self.fund_channel(l1, l2, 10**6)
|
||||||
|
|
||||||
self.wait_for_routes(l1, [chanid])
|
self.wait_for_routes(l1, [chanid])
|
||||||
|
sync_blockheight([l1, l2])
|
||||||
|
|
||||||
amt = 200000000
|
amt = 200000000
|
||||||
inv = l2.rpc.invoice(amt, 'test_htlc_in_timeout', 'desc')['bolt11']
|
inv = l2.rpc.invoice(amt, 'test_htlc_in_timeout', 'desc')['bolt11']
|
||||||
@@ -4096,6 +4100,7 @@ class LightningDTests(BaseLightningDTests):
|
|||||||
# Now make sure an HTLC works.
|
# Now make sure an HTLC works.
|
||||||
# (First wait for route propagation.)
|
# (First wait for route propagation.)
|
||||||
self.wait_for_routes(l1, [chanid])
|
self.wait_for_routes(l1, [chanid])
|
||||||
|
sync_blockheight([l1, l2])
|
||||||
|
|
||||||
# Make payments.
|
# Make payments.
|
||||||
self.pay(l1, l2, 200000000)
|
self.pay(l1, l2, 200000000)
|
||||||
|
|||||||
Reference in New Issue
Block a user