mirror of
https://github.com/aljazceru/lightning.git
synced 2026-01-09 00:54:22 +01:00
lightningd: change connect RPC args.
We're going to make the ip/port optional, so they should go at the end. In addition, using ip:port is nicer, for gethostbyaddr(). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
committed by
Christian Decker
parent
c9828d146a
commit
98ad6b9231
@@ -79,7 +79,7 @@ Once `lightningd` has funds, we can connect to a node and open a channel.
|
||||
Let's assume the remote node is accepting connections at `<ip>:<port>` and has the node ID `<node_id>`:
|
||||
|
||||
```
|
||||
cli/lightning-cli connect <ip> <port> <node_id>
|
||||
cli/lightning-cli connect <node_id> <ip>:<port>
|
||||
cli/lightning-cli fundchannel <node_id> <amount>
|
||||
```
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
* The default TCP port is 9735. This corresponds to hexadecimal
|
||||
* `0x2607`, the Unicode code point for LIGHTNING.
|
||||
*/
|
||||
#define DEFAULT_PORT 0x2607
|
||||
#define DEFAULT_PORT 9735
|
||||
|
||||
/* Various adjustable things. */
|
||||
struct config {
|
||||
|
||||
@@ -794,17 +794,16 @@ static void json_connect(struct command *cmd,
|
||||
const char *buffer, const jsmntok_t *params)
|
||||
{
|
||||
struct connection *c;
|
||||
jsmntok_t *host, *porttok, *idtok;
|
||||
jsmntok_t *hosttok, *idtok;
|
||||
const tal_t *tmpctx = tal_tmpctx(cmd);
|
||||
struct pubkey id;
|
||||
char *name, *port;
|
||||
const char *name, *port, *colon;
|
||||
|
||||
if (!json_get_params(buffer, params,
|
||||
"host", &host,
|
||||
"port", &porttok,
|
||||
"id", &idtok,
|
||||
"host", &hosttok,
|
||||
NULL)) {
|
||||
command_fail(cmd, "Need host, port and id to connect");
|
||||
command_fail(cmd, "Need id and host to connect");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -815,12 +814,20 @@ static void json_connect(struct command *cmd,
|
||||
return;
|
||||
}
|
||||
|
||||
colon = memchr(buffer + hosttok->start, ':',
|
||||
hosttok->end - hosttok->start);
|
||||
if (colon) {
|
||||
name = tal_strndup(cmd, buffer + hosttok->start,
|
||||
colon - (buffer + hosttok->start));
|
||||
port = tal_strndup(cmd, colon + 1,
|
||||
(buffer + hosttok->end) - colon - 1);
|
||||
} else {
|
||||
name = tal_strndup(cmd, buffer + hosttok->start,
|
||||
hosttok->end - hosttok->start);
|
||||
port = tal_strdup(cmd, stringify(DEFAULT_PORT));
|
||||
}
|
||||
|
||||
c = new_connection(cmd, cmd->ld, cmd, &id);
|
||||
name = tal_strndup(tmpctx,
|
||||
buffer + host->start, host->end - host->start);
|
||||
port = tal_strndup(tmpctx,
|
||||
buffer + porttok->start,
|
||||
porttok->end - porttok->start);
|
||||
if (!dns_resolve_and_connect(cmd->ld, name, port,
|
||||
connection_out, connect_failed, c)) {
|
||||
command_fail(cmd, "DNS failed");
|
||||
@@ -833,7 +840,7 @@ static void json_connect(struct command *cmd,
|
||||
static const struct json_command connect_command = {
|
||||
"connect",
|
||||
json_connect,
|
||||
"Connect to a {host} at {port} expecting node {id}",
|
||||
"Connect to {id} at {host} (which can end in ':port' if not default)",
|
||||
"Returns the {id} on success (once channel established)"
|
||||
};
|
||||
AUTODATA(json_command, &connect_command);
|
||||
|
||||
@@ -208,7 +208,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
def connect(self):
|
||||
l1 = self.node_factory.get_node()
|
||||
l2 = self.node_factory.get_node()
|
||||
ret = l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
assert ret['id'] == l2.info['id']
|
||||
|
||||
@@ -399,7 +399,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
# l1 asks for a too-long locktime
|
||||
l1 = self.node_factory.get_node(options=['--locktime-blocks=100'])
|
||||
l2 = self.node_factory.get_node(options=['--max-locktime-blocks=99'])
|
||||
ret = l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
assert ret['id'] == l2.info['id']
|
||||
|
||||
@@ -488,7 +488,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1 = self.node_factory.get_node(disconnect=disconnects)
|
||||
l2 = self.node_factory.get_node()
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
# Like fundchannel, but we'll probably fail before CHANNELD_NORMAL.
|
||||
addr = l1.rpc.newaddr()['address']
|
||||
@@ -529,7 +529,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1 = self.node_factory.get_node(disconnect=disconnects)
|
||||
l2 = self.node_factory.get_node()
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
|
||||
# Must be dust!
|
||||
@@ -581,7 +581,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1 = self.node_factory.get_node(disconnect=disconnects)
|
||||
l2 = self.node_factory.get_node()
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
|
||||
rhash = l2.rpc.invoice(10**8, 'onchain_timeout')['rhash']
|
||||
@@ -637,8 +637,8 @@ class LightningDTests(BaseLightningDTests):
|
||||
l3 = self.node_factory.get_node()
|
||||
|
||||
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
|
||||
l2.rpc.connect('localhost', l1.info['port'], l1.info['id'])
|
||||
l2.rpc.connect('localhost', l3.info['port'], l3.info['id'])
|
||||
l2.rpc.connect(l1.info['id'], 'localhost:{}'.format(l1.info['port']))
|
||||
l2.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
|
||||
self.fund_channel(l2, l1, 10**6)
|
||||
self.fund_channel(l2, l3, 10**6)
|
||||
|
||||
@@ -709,7 +709,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED'], may_fail=True)
|
||||
l2 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED'])
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
|
||||
# Now, this will get stuck due to l1 commit being disabled..
|
||||
@@ -768,7 +768,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED*3'], may_fail=True)
|
||||
l2 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED*3'])
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
|
||||
# Move some across to l2.
|
||||
@@ -830,7 +830,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1 = self.node_factory.get_node()
|
||||
l2 = self.node_factory.get_node(disconnect=disconnects)
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
|
||||
# This will fail at l2's end.
|
||||
@@ -865,7 +865,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1 = self.node_factory.get_node()
|
||||
l2 = self.node_factory.get_node(disconnect=disconnects)
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
|
||||
# This will fail at l2's end.
|
||||
@@ -906,7 +906,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1 = self.node_factory.get_node()
|
||||
l2 = self.node_factory.get_node(disconnect=disconnects)
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
self.fund_channel(l2, l1, 10**6)
|
||||
|
||||
# This will fail at l2's end.
|
||||
@@ -1016,11 +1016,11 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1 = self.node_factory.get_node(disconnect=disconnects)
|
||||
l2 = self.node_factory.get_node()
|
||||
l3 = self.node_factory.get_node()
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
l1.openchannel(l2, 20000)
|
||||
|
||||
# Now open new channels and everybody should sync
|
||||
l2.rpc.connect('localhost', l3.info['port'], l3.info['id'])
|
||||
l2.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
|
||||
l2.openchannel(l3, 20000)
|
||||
|
||||
# Settle the gossip
|
||||
@@ -1032,8 +1032,8 @@ class LightningDTests(BaseLightningDTests):
|
||||
l2 = self.node_factory.get_node()
|
||||
l3 = self.node_factory.get_node()
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect('localhost', l3.info['port'], l3.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
l1.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
self.fund_channel(l1, l3, 10**6)
|
||||
|
||||
@@ -1044,7 +1044,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
|
||||
for i in range(len(nodes)-1):
|
||||
src, dst = nodes[i], nodes[i+1]
|
||||
src.rpc.connect('localhost', dst.info['port'], dst.info['id'])
|
||||
src.rpc.connect(dst.info['id'], 'localhost:{}'.format(dst.info['port']))
|
||||
src.openchannel(dst, 20000)
|
||||
|
||||
# Allow announce messages.
|
||||
@@ -1083,7 +1083,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
# Connect 1 -> 2 -> 3.
|
||||
l1,l2 = self.connect()
|
||||
l3 = self.node_factory.get_node()
|
||||
ret = l2.rpc.connect('localhost', l3.info['port'], l3.info['id'])
|
||||
ret = l2.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
|
||||
|
||||
assert ret['id'] == l3.info['id']
|
||||
|
||||
@@ -1146,11 +1146,11 @@ class LightningDTests(BaseLightningDTests):
|
||||
l2 = self.node_factory.get_node()
|
||||
for d in disconnects:
|
||||
self.assertRaises(ValueError, l1.rpc.connect,
|
||||
'localhost', l2.info['port'], l2.info['id'])
|
||||
l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
assert l1.rpc.getpeer(l2.info['id']) == None
|
||||
|
||||
# Now we should connect normally.
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
def test_disconnect_funder(self):
|
||||
# Now error on funder side duringchannel open.
|
||||
@@ -1168,7 +1168,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1.rpc.addfunds(tx)
|
||||
|
||||
for d in disconnects:
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000)
|
||||
assert l1.rpc.getpeer(l2.info['id']) == None
|
||||
|
||||
@@ -1186,7 +1186,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
l1.rpc.addfunds(tx)
|
||||
|
||||
for d in disconnects:
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000)
|
||||
assert l1.rpc.getpeer(l2.info['id']) == None
|
||||
|
||||
@@ -1202,7 +1202,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
tx = l1.bitcoin.rpc.getrawtransaction(txid)
|
||||
l1.rpc.addfunds(tx)
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
self.assertRaises(ValueError, l1.rpc.fundchannel, l2.info['id'], 20000)
|
||||
|
||||
# Fundee remembers, funder doesn't.
|
||||
@@ -1220,7 +1220,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
tx = l1.bitcoin.rpc.getrawtransaction(txid)
|
||||
l1.rpc.addfunds(tx)
|
||||
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
l1.rpc.fundchannel(l2.info['id'], 20000)
|
||||
|
||||
# They haven't forgotten each other.
|
||||
@@ -1243,7 +1243,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
disconnects = ['0WIRE_ACCEPT_CHANNEL']
|
||||
l1 = self.node_factory.get_node()
|
||||
l2 = self.node_factory.get_node(disconnect=disconnects)
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
addr = l1.rpc.newaddr()['address']
|
||||
txid = l1.bitcoin.rpc.sendtoaddress(addr, 20000 / 10**6)
|
||||
@@ -1255,7 +1255,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
assert l1.rpc.getpeer(l2.info['id']) == None
|
||||
|
||||
# Reconnect.
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
# We should get a message about old one exiting.
|
||||
l2.daemon.wait_for_log('Peer has reconnected, state OPENINGD')
|
||||
@@ -1275,7 +1275,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
'+WIRE_FUNDING_LOCKED']
|
||||
l1 = self.node_factory.get_node(disconnect=disconnects)
|
||||
l2 = self.node_factory.get_node()
|
||||
ret = l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
|
||||
@@ -1292,7 +1292,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
'+WIRE_REVOKE_AND_ACK']
|
||||
l1 = self.node_factory.get_node(disconnect=disconnects)
|
||||
l2 = self.node_factory.get_node()
|
||||
ret = l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
|
||||
@@ -1333,7 +1333,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
'+WIRE_REVOKE_AND_ACK']
|
||||
l1 = self.node_factory.get_node()
|
||||
l2 = self.node_factory.get_node(disconnect=disconnects)
|
||||
ret = l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
|
||||
@@ -1359,7 +1359,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
'+WIRE_REVOKE_AND_ACK']
|
||||
l1 = self.node_factory.get_node()
|
||||
l2 = self.node_factory.get_node(disconnect=disconnects)
|
||||
ret = l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
ret = l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
|
||||
@@ -1379,7 +1379,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
'+WIRE_SHUTDOWN']
|
||||
l1 = self.node_factory.get_node(disconnect=disconnects)
|
||||
l2 = self.node_factory.get_node()
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
self.pay(l1,l2,200000000)
|
||||
@@ -1406,7 +1406,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
'+WIRE_CLOSING_SIGNED']
|
||||
l1 = self.node_factory.get_node(disconnect=disconnects)
|
||||
l2 = self.node_factory.get_node()
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
self.fund_channel(l1, l2, 10**6)
|
||||
self.pay(l1,l2,200000000)
|
||||
@@ -1504,7 +1504,7 @@ class LightningDTests(BaseLightningDTests):
|
||||
# check that HTLCs reloaded from the DB work.
|
||||
l1 = self.node_factory.get_node()
|
||||
l2 = self.node_factory.get_node(disconnect=['_WIRE_COMMITMENT_SIGNED'])
|
||||
l1.rpc.connect('localhost', l2.info['port'], l2.info['id'])
|
||||
l1.rpc.connect(l2.info['id'], 'localhost:{}'.format(l2.info['port']))
|
||||
|
||||
# Neither node should have a channel open, they are just connected
|
||||
for n in (l1, l2):
|
||||
@@ -1562,8 +1562,8 @@ class LightningDTests(BaseLightningDTests):
|
||||
l3 = self.node_factory.get_node()
|
||||
|
||||
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
|
||||
l2.rpc.connect('localhost', l1.info['port'], l1.info['id'])
|
||||
l2.rpc.connect('localhost', l3.info['port'], l3.info['id'])
|
||||
l2.rpc.connect(l1.info['id'], 'localhost:{}'.format(l1.info['port']))
|
||||
l2.rpc.connect(l3.info['id'], 'localhost:{}'.format(l3.info['port']))
|
||||
self.fund_channel(l2, l1, 10**6)
|
||||
self.fund_channel(l2, l3, 10**6)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user