mirror of
https://github.com/aljazceru/ThunderCloud.git
synced 2025-12-17 06:14:20 +01:00
Add SG for rpc ports
This commit is contained in:
10
README.md
10
README.md
@@ -26,6 +26,8 @@ Copy and paste that first command (`aws secretsmanager ...`) to download the SSH
|
||||
|
||||
You can use `lncli` to open channels, create invoices, do all the fun lightning things. `bos` is also installed if you want to use it for bos-flavored channel balancing, batch channel opens, etc.
|
||||
|
||||
By default, the grpc port will not be accessible. There's a security group that get's created for it, but isn't attached to the node. If you uncomment the line that says `// instance.addSecurityGroup(rpcSg);` and run `cdk deploy`, it'll attach that security group and you'll be able to get to the grpc ports. Want to close them up? comment that line out and do `cdk deploy` again and it'll detach the security group.
|
||||
|
||||
## Shutting down the node
|
||||
1. go into the project root and do `cdk destroy`
|
||||
There is no step 2. You can also go find the stack in CloudFormation and delete it there. either way works.
|
||||
@@ -44,12 +46,12 @@ I wanted to make the instance small and cheap. If you want to run a full `bitcoi
|
||||
- Does this run RTL or Thunderhub?
|
||||
No. Maybe it will in the future. Right now it's lncli and bos only.
|
||||
|
||||
- How do I connect lnd's GRPC ports?
|
||||
You'll need to add a security group rule for it. I'll add it to the stack soon.
|
||||
|
||||
## Possible future enhancements (PR's welcome!)
|
||||
- automatically backup channel state to S3
|
||||
- set up RTL or Thunderhub
|
||||
- use an elastic-ip for the node
|
||||
- would be cool to do a 1/1 autoscaling group, but need to make sure channel backup is SOLID
|
||||
- second instance for a watchtower
|
||||
|
||||
## Credits
|
||||
Much of the lnd.conf and the systemd unit script were cribbed from Alex Bosworth's run-lnd repo (https://github.com/alexbosworth/run-lnd)
|
||||
Most of the lnd.conf and the systemd unit script were cribbed from Alex Bosworth's run-lnd repo (https://github.com/alexbosworth/run-lnd)
|
||||
@@ -7,39 +7,53 @@ import * as path from 'path';
|
||||
|
||||
export class LightningNode extends cdk.Stack {
|
||||
get availabilityZones(): string[] {
|
||||
// Change this list if you wand to use different AZs
|
||||
return ['us-east-1b', 'us-east-1c', 'us-east-1d', 'us-east-1e', 'us-east-1f']
|
||||
}
|
||||
constructor(scope: cdk.Construct, id: string, props?: cdk.StackProps) {
|
||||
super(scope, id, props);
|
||||
|
||||
// Set up a VPC with public and isolated subnets in 3 AZs (out of the list above)
|
||||
const vpc = new ec2.Vpc(this, "vpc", {
|
||||
cidr: "10.0.0.0/16",
|
||||
natGateways: 0,
|
||||
maxAzs: 3,
|
||||
});
|
||||
|
||||
// SSH key for the node
|
||||
const key = new KeyPair(this, 'KeyPair', {
|
||||
name: 'cdk-keypair',
|
||||
description: 'Key Pair created with CDK Deployment',
|
||||
});
|
||||
key.grantReadOnPublicKey
|
||||
const securityGroup = new ec2.SecurityGroup(this, 'SecurityGroup', {
|
||||
|
||||
// Security groups. I made three different ones because adding/removing SGs from instances
|
||||
// is easier to do through automation than changing rules on a single SG.
|
||||
const sshSg = new ec2.SecurityGroup(this, 'sshSecurityGroup', {
|
||||
vpc,
|
||||
description: 'Allow SSH (TCP port 22) in',
|
||||
allowAllOutbound: true
|
||||
});
|
||||
securityGroup.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.tcp(22), 'Allow SSH Access')
|
||||
sshSg.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.tcp(22), 'Allow SSH Access')
|
||||
const lightningSg = new ec2.SecurityGroup(this, "LightningSecurityGroup", {
|
||||
vpc,
|
||||
description: 'Allow lightning protocol (port 9735) traffic from the Internet',
|
||||
allowAllOutbound: true
|
||||
});
|
||||
lightningSg.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.tcp(9735));
|
||||
const setupScript = new Asset(this, "SetupScript", {
|
||||
path: path.join(__dirname, 'configure-node.sh')
|
||||
});
|
||||
const rpcSg = new ec2.SecurityGroup(this, "RpcSecurityGroup", {
|
||||
vpc,
|
||||
description: 'Allow access to lnd grpc interface',
|
||||
});
|
||||
rpcSg.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.tcp(10009));
|
||||
|
||||
// grab the latest hvm arm64 AL2 AMI
|
||||
const ami = new ec2.AmazonLinuxImage({
|
||||
generation: ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
|
||||
cpuType: ec2.AmazonLinuxCpuType.ARM_64
|
||||
});
|
||||
|
||||
const instance = new ec2.Instance(this, "lightningNode", {
|
||||
instanceType: new ec2.InstanceType("t4g.micro"),
|
||||
vpc: vpc,
|
||||
@@ -47,8 +61,13 @@ export class LightningNode extends cdk.Stack {
|
||||
vpcSubnets: {subnetType: ec2.SubnetType.PUBLIC},
|
||||
keyName: key.keyPairName,
|
||||
});
|
||||
instance.addSecurityGroup(securityGroup);
|
||||
instance.addSecurityGroup(sshSg);
|
||||
instance.addSecurityGroup(lightningSg);
|
||||
// Uncomment this next line to allow access to GRPC from the world.
|
||||
// Feel free to change the ingress rule above to lock down access to a specific IP or range
|
||||
// instance.addSecurityGroup(rpcSg);
|
||||
|
||||
// Wire the bootstrap script into the instance userdata
|
||||
const localPath = instance.userData.addS3DownloadCommand({
|
||||
bucket:setupScript.bucket,
|
||||
bucketKey:setupScript.s3ObjectKey,
|
||||
@@ -58,6 +77,9 @@ export class LightningNode extends cdk.Stack {
|
||||
arguments: '--verbose -y'
|
||||
});
|
||||
setupScript.grantRead( instance.role );
|
||||
|
||||
// These outputs get printed when you are done deploying, and can be found in the "Outputs" tab
|
||||
// of the Cloudformation stack. You can also fetch them programatically. Feel free to add more
|
||||
new cdk.CfnOutput(this, 'IP Address', { value: instance.instancePublicIp });
|
||||
new cdk.CfnOutput(this, 'Key Name', { value: key.keyPairName })
|
||||
new cdk.CfnOutput(this, 'Download Key Command', { value: 'aws secretsmanager get-secret-value --secret-id ec2-ssh-key/cdk-keypair/private --query SecretString --output text > cdk-key.pem && chmod 400 cdk-key.pem' })
|
||||
|
||||
Reference in New Issue
Block a user