From e6b0141b108531d9d935d093566d04f2f8696977 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 26 Oct 2020 14:21:05 +0100 Subject: [PATCH 1/6] itest: fix typos, formatting --- lntest/itest/lnd_test.go | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index b4491983..835cbded 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -1518,7 +1518,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { // testPaymentFollowingChannelOpen tests that the channel transition from // 'pending' to 'open' state does not cause any inconsistencies within other -// subsystems trying to udpate the channel state in the db. We follow this +// subsystems trying to update the channel state in the db. We follow this // transition with a payment that updates the commitment state and verify that // the pending state is up to date. func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) { @@ -1550,7 +1550,7 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) t.Fatalf("Bob restart failed: %v", err) } - // We ensure that Bob reconnets to Alice. + // We ensure that Bob reconnects to Alice. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) if err := net.EnsureConnected(ctxt, net.Bob, net.Alice); err != nil { t.Fatalf("peers unable to reconnect after restart: %v", err) @@ -1559,7 +1559,7 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) // We mine one block for the channel to be confirmed. _ = mineBlocks(t, net, 6, 1)[0] - // We verify that the chanel is open from both nodes point of view. + // We verify that the channel is open from both nodes point of view. ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) defer cancel() assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0) @@ -1575,14 +1575,11 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) // Send payment to Bob so that a channel update to disk will be // executed. - sendAndAssertSuccess( - t, net.Alice, - &routerrpc.SendPaymentRequest{ - PaymentRequest: bobPayReqs[0], - TimeoutSeconds: 60, - FeeLimitSat: 1000000, - }, - ) + sendAndAssertSuccess(t, net.Alice, &routerrpc.SendPaymentRequest{ + PaymentRequest: bobPayReqs[0], + TimeoutSeconds: 60, + FeeLimitSat: 1000000, + }) // At this point we want to make sure the channel is opened and not // pending. @@ -14054,7 +14051,7 @@ func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) ( // TestLightningNetworkDaemon performs a series of integration tests amongst a // programmatically driven network of lnd nodes. func TestLightningNetworkDaemon(t *testing.T) { - // If no tests are regsitered, then we can exit early. + // If no tests are registered, then we can exit early. if len(testsCases) == 0 { t.Skip("integration tests not selected with flag 'rpctest'") } From 1714394add610f0c1c53e98045be33122fe5abab Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 26 Oct 2020 14:21:07 +0100 Subject: [PATCH 2/6] itest: fix send payment flake In some cases the router isn't yet fully aware of all newly opened channels. We need to give it some time to process the updates. Therefore we add a wait for sending payments to give it a few more changes to catch up. --- lntest/itest/lnd_test.go | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index 835cbded..b0905000 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -13984,19 +13984,26 @@ func sendAndAssertSuccess(t *harnessTest, node *lntest.HarnessNode, ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) defer cancel() - stream, err := node.RouterClient.SendPaymentV2(ctx, req) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + var result *lnrpc.Payment + err := wait.NoError(func() error { + stream, err := node.RouterClient.SendPaymentV2(ctx, req) + if err != nil { + return fmt.Errorf("unable to send payment: %v", err) + } - result, err := getPaymentResult(stream) - if err != nil { - t.Fatalf("unable to get payment result: %v", err) - } + result, err = getPaymentResult(stream) + if err != nil { + return fmt.Errorf("unable to get payment result: %v", + err) + } - if result.Status != lnrpc.Payment_SUCCEEDED { - t.Fatalf("payment failed: %v", result.Status) - } + if result.Status != lnrpc.Payment_SUCCEEDED { + return fmt.Errorf("payment failed: %v", result.Status) + } + + return nil + }, defaultTimeout) + require.NoError(t.t, err) return result } From d1b46211d8099332007ddc7f197add0184822ee8 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 26 Oct 2020 14:21:08 +0100 Subject: [PATCH 3/6] itest: replace WaitForTxBroadcast with WaitForTxInMempool --- lntest/harness.go | 136 ++++++++++----------------------------- lntest/itest/lnd_test.go | 7 +- 2 files changed, 36 insertions(+), 107 deletions(-) diff --git a/lntest/harness.go b/lntest/harness.go index aa56af72..01c960db 100644 --- a/lntest/harness.go +++ b/lntest/harness.go @@ -57,9 +57,6 @@ type NetworkHarness struct { Alice *HarnessNode Bob *HarnessNode - seenTxns chan *chainhash.Hash - bitcoinWatchRequests chan *txWatchRequest - // Channel for transmitting stderr output from failed lightning node // to main process. lndErrorChan chan error @@ -83,19 +80,16 @@ func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) ( feeService := startFeeService() n := NetworkHarness{ - activeNodes: make(map[int]*HarnessNode), - nodesByPub: make(map[string]*HarnessNode), - seenTxns: make(chan *chainhash.Hash), - bitcoinWatchRequests: make(chan *txWatchRequest), - lndErrorChan: make(chan error), - netParams: r.ActiveNet, - Miner: r, - BackendCfg: b, - feeService: feeService, - quit: make(chan struct{}), - lndBinary: lndBinary, + activeNodes: make(map[int]*HarnessNode), + nodesByPub: make(map[string]*HarnessNode), + lndErrorChan: make(chan error), + netParams: r.ActiveNet, + Miner: r, + BackendCfg: b, + feeService: feeService, + quit: make(chan struct{}), + lndBinary: lndBinary, } - go n.networkWatcher() return &n, nil } @@ -746,81 +740,12 @@ func saveProfilesPage(node *HarnessNode) error { return nil } -// TODO(roasbeef): add a WithChannel higher-order function? -// * python-like context manager w.r.t using a channel within a test -// * possibly adds more funds to the target wallet if the funds are not -// enough - -// txWatchRequest encapsulates a request to the harness' Bitcoin network -// watcher to dispatch a notification once a transaction with the target txid -// is seen within the test network. -type txWatchRequest struct { - txid chainhash.Hash - eventChan chan struct{} -} - -// networkWatcher is a goroutine which accepts async notification -// requests for the broadcast of a target transaction, and then dispatches the -// transaction once its seen on the Bitcoin network. -func (n *NetworkHarness) networkWatcher() { - seenTxns := make(map[chainhash.Hash]struct{}) - clients := make(map[chainhash.Hash][]chan struct{}) - - for { - - select { - case <-n.quit: - return - - case req := <-n.bitcoinWatchRequests: - // If we've already seen this transaction, then - // immediately dispatch the request. Otherwise, append - // to the list of clients who are watching for the - // broadcast of this transaction. - if _, ok := seenTxns[req.txid]; ok { - close(req.eventChan) - } else { - clients[req.txid] = append(clients[req.txid], req.eventChan) - } - case txid := <-n.seenTxns: - // Add this txid to our set of "seen" transactions. So - // we're able to dispatch any notifications for this - // txid which arrive *after* it's seen within the - // network. - seenTxns[*txid] = struct{}{} - - // If there isn't a registered notification for this - // transaction then ignore it. - txClients, ok := clients[*txid] - if !ok { - continue - } - - // Otherwise, dispatch the notification to all clients, - // cleaning up the now un-needed state. - for _, client := range txClients { - close(client) - } - delete(clients, *txid) - } - } -} - -// OnTxAccepted is a callback to be called each time a new transaction has been -// broadcast on the network. -func (n *NetworkHarness) OnTxAccepted(hash *chainhash.Hash) { - select { - case n.seenTxns <- hash: - case <-n.quit: - return - } -} - -// WaitForTxBroadcast blocks until the target txid is seen on the network. If +// WaitForTxInMempool blocks until the target txid is seen in the mempool. If // the transaction isn't seen within the network before the passed timeout, // then an error is returned. -// TODO(roasbeef): add another method which creates queue of all seen transactions -func (n *NetworkHarness) WaitForTxBroadcast(ctx context.Context, txid chainhash.Hash) error { +func (n *NetworkHarness) WaitForTxInMempool(ctx context.Context, + txid chainhash.Hash) error { + // Return immediately if harness has been torn down. select { case <-n.quit: @@ -828,20 +753,29 @@ func (n *NetworkHarness) WaitForTxBroadcast(ctx context.Context, txid chainhash. default: } - eventChan := make(chan struct{}) + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() - n.bitcoinWatchRequests <- &txWatchRequest{ - txid: txid, - eventChan: eventChan, - } + var mempool []*chainhash.Hash + for { + select { + case <-ctx.Done(): + return fmt.Errorf("wanted %v, found %v txs "+ + "in mempool: %v", txid, len(mempool), mempool) - select { - case <-eventChan: - return nil - case <-n.quit: - return fmt.Errorf("NetworkHarness has been torn down") - case <-ctx.Done(): - return fmt.Errorf("tx not seen before context timeout") + case <-ticker.C: + var err error + mempool, err = n.Miner.Node.GetRawMempool() + if err != nil { + return err + } + + for _, mempoolTx := range mempool { + if *mempoolTx == txid { + return nil + } + } + } } } @@ -1163,7 +1097,7 @@ func (n *NetworkHarness) CloseChannel(ctx context.Context, "%v", err) return } - if err := n.WaitForTxBroadcast(ctx, *closeTxid); err != nil { + if err := n.WaitForTxInMempool(ctx, *closeTxid); err != nil { errChan <- fmt.Errorf("error while waiting for "+ "broadcast tx: %v", err) return diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index b0905000..0e0fccf4 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -14079,14 +14079,9 @@ func TestLightningNetworkDaemon(t *testing.T) { // // We will also connect it to our chain backend. minerLogDir := "./.minerlogs" - handlers := &rpcclient.NotificationHandlers{ - OnTxAccepted: func(hash *chainhash.Hash, amt btcutil.Amount) { - lndHarness.OnTxAccepted(hash) - }, - } miner, minerCleanUp, err := lntest.NewMiner( minerLogDir, "output_btcd_miner.log", - harnessNetParams, handlers, + harnessNetParams, &rpcclient.NotificationHandlers{}, ) require.NoError(t, err, "failed to create new miner") defer func() { From 34439fbc2a06c727fd269f2882dd519f9456575b Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 26 Oct 2020 14:21:09 +0100 Subject: [PATCH 4/6] itest: cleanup multi-hop tests As a preparation to fix an issue with the mempool wait, we clean up the multi-hop itests a bit. We fix the formatting, use the require library for assertions consistently and simplify some of the wait predicates. Commonly used code is also extracted into functions. --- ...d_multi-hop_htlc_local_chain_claim_test.go | 269 +++++------------- .../lnd_multi-hop_htlc_local_timeout_test.go | 141 +++------ ...ulti-hop_htlc_receiver_chain_claim_test.go | 162 +++-------- ..._multi-hop_htlc_remote_chain_claim_test.go | 211 ++++---------- ..._force_close_on_chain_htlc_timeout_test.go | 223 +++++---------- ..._force_close_on_chain_htlc_timeout_test.go | 154 +++------- lntest/itest/lnd_test.go | 61 +++- 7 files changed, 367 insertions(+), 854 deletions(-) diff --git a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go index fcb3d0d0..d9422762 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -3,10 +3,8 @@ package itest import ( "context" "fmt" - "time" "github.com/btcsuite/btcd/wire" - "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" @@ -15,6 +13,7 @@ import ( "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" + "github.com/stretchr/testify/require" ) // testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if @@ -51,9 +50,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) defer cancel() carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } + require.NoError(t.t, err) // Now that we've created the invoice, we'll send a single payment from // Alice to Carol. We won't wait for the response however, as Carol @@ -62,32 +59,21 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, defer cancel() _, err = alice.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ + ctx, &routerrpc.SendPaymentRequest{ PaymentRequest: carolInvoice.PaymentRequest, TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat, }, ) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(t.t, err) // At this point, all 3 nodes should now have an active channel with // the created HTLC pending on all of them. - var predErr error nodes := []*lntest.HarnessNode{alice, bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash[:]) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } + err = wait.NoError(func() error { + return assertActiveHtlcs(nodes, payHash[:]) + }, defaultTimeout) + require.NoError(t.t, err) // Wait for carol to mark invoice as accepted. There is a small gap to // bridge between adding the htlc to the channel and executing the exit @@ -101,8 +87,9 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, // At this point, Bob decides that he wants to exit the channel // immediately, so he force closes his commitment transaction. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - bobForceClose := closeChannelAndAssertType(ctxt, t, net, bob, - aliceChanPoint, c == commitTypeAnchors, true) + bobForceClose := closeChannelAndAssertType( + ctxt, t, net, bob, aliceChanPoint, c == commitTypeAnchors, true, + ) // Alice will sweep her commitment output immediately. If there are // anchors, Alice will also sweep hers. @@ -113,16 +100,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, _, err = waitForNTxsInMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) - if err != nil { - t.Fatalf("unable to find alice's sweep tx in miner mempool: %v", - err) - } + require.NoError(t.t, err) // Suspend Bob to force Carol to go to chain. restartBob, err := net.SuspendNode(bob) - if err != nil { - t.Fatalf("unable to suspend bob: %v", err) - } + require.NoError(t.t, err) // Settle invoice. This will just mark the invoice as settled, as there // is no link anymore to remove the htlc from the commitment tx. For @@ -134,31 +116,24 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, _, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ Preimage: preimage[:], }) - if err != nil { - t.Fatalf("settle invoice: %v", err) - } + require.NoError(t.t, err) // We'll now mine enough blocks so Carol decides that she needs to go // on-chain to claim the HTLC as Bob has been inactive. numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta)) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks") - } + _, err = net.Miner.Node.Generate(numBlocks) + require.NoError(t.t, err) // Carol's commitment transaction should now be in the mempool. If there // is an anchor, Carol will sweep that too. _, err = waitForNTxsInMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } + require.NoError(t.t, err) bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } + require.NoError(t.t, err) carolFundingPoint := wire.OutPoint{ Hash: *bobFundingTxid, Index: bobChanPoint.OutputIndex, @@ -174,16 +149,12 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, // Mine a block that should confirm the commit tx, the anchor if present // and the coinbase. block := mineBlocks(t, net, 1, expectedTxes)[0] - if len(block.Transactions) != expectedTxes+1 { - t.Fatalf("expected %v transactions in block, got %v", - expectedTxes+1, len(block.Transactions)) - } + require.Len(t.t, block.Transactions, expectedTxes+1) assertTxInBlock(t, block, &closingTxid) // Restart bob again. - if err := restartBob(); err != nil { - t.Fatalf("unable to restart bob: %v", err) - } + err = restartBob() + require.NoError(t.t, err) // After the force close transacion is mined, Carol should broadcast her // second level HTLC transacion. Bob will broadcast a sweep tx to sweep @@ -198,9 +169,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, txes, err := getNTxsFromMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } + require.NoError(t.t, err) // Both Carol's second level transaction and Bob's sweep should be // spending from the commitment transaction. @@ -209,16 +178,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, // At this point we suspend Alice to make sure she'll handle the // on-chain settle after a restart. restartAlice, err := net.SuspendNode(alice) - if err != nil { - t.Fatalf("unable to suspend alice: %v", err) - } + require.NoError(t.t, err) // Mine a block to confirm the two transactions (+ the coinbase). block = mineBlocks(t, net, 1, expectedTxes)[0] - if len(block.Transactions) != expectedTxes+1 { - t.Fatalf("expected 3 transactions in block, got %v", - len(block.Transactions)) - } + require.Len(t.t, block.Transactions, expectedTxes+1) // Keep track of the second level tx maturity. carolSecondLevelCSV := uint32(defaultCSV) @@ -226,73 +190,48 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, // When Bob notices Carol's second level transaction in the block, he // will extract the preimage and broadcast a second level tx to claim // the HTLC in his (already closed) channel with Alice. - bobSecondLvlTx, err := waitForTxInMempool(net.Miner.Node, - minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } + bobSecondLvlTx, err := waitForTxInMempool( + net.Miner.Node, minerMempoolTimeout, + ) + require.NoError(t.t, err) // It should spend from the commitment in the channel with Alice. tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } + require.NoError(t.t, err) - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobForceClose { - t.Fatalf("tx did not spend from bob's force close tx") - } + require.Equal( + t.t, *bobForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash, + ) // At this point, Bob should have broadcast his second layer success // transaction, and should have sent it to the nursery for incubation. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false - } - - for _, forceCloseChan := range pendingChanResp.PendingForceClosingChannels { - if forceCloseChan.Channel.LocalBalance != 0 { - continue + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose( + ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error { + if c.Channel.LocalBalance != 0 { + return nil } - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false + if len(c.PendingHtlcs) != 1 { + return fmt.Errorf("bob should have pending " + + "htlc but doesn't") } - stage := forceCloseChan.PendingHtlcs[0].Stage - if stage != 1 { - predErr = fmt.Errorf("bob's htlc should have "+ + + if c.PendingHtlcs[0].Stage != 1 { + return fmt.Errorf("bob's htlc should have "+ "advanced to the first stage but was "+ - "stage: %v", stage) - return false + "stage: %v", c.PendingHtlcs[0].Stage) } - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } + + return nil + }, + ) + require.NoError(t.t, err) // We'll now mine a block which should confirm Bob's second layer // transaction. block = mineBlocks(t, net, 1, 1)[0] - if len(block.Transactions) != 2 { - t.Fatalf("expected 2 transactions in block, got %v", - len(block.Transactions)) - } + require.Len(t.t, block.Transactions, 2) assertTxInBlock(t, block, bobSecondLvlTx) // Keep track of Bob's second level maturity, and decrement our track @@ -302,21 +241,17 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, // Now that the preimage from Bob has hit the chain, restart Alice to // ensure she'll pick it up. - if err := restartAlice(); err != nil { - t.Fatalf("unable to restart alice: %v", err) - } + err = restartAlice() + require.NoError(t.t, err) // If we then mine 3 additional blocks, Carol's second level tx should // mature, and she can pull the funds from it with a sweep tx. - if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil { - t.Fatalf("unable to generate block: %v", err) - } + _, err = net.Miner.Node.Generate(carolSecondLevelCSV) + require.NoError(t.t, err) bobSecondLevelCSV -= carolSecondLevelCSV carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's sweeping transaction: %v", err) - } + require.NoError(t.t, err) // Mining one additional block, Bob's second level tx is mature, and he // can sweep the output. @@ -324,18 +259,14 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, assertTxInBlock(t, block, carolSweep) bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's sweeping transaction") - } + require.NoError(t.t, err) // Make sure it spends from the second level tx. tx, err = net.Miner.Node.GetRawTransaction(bobSweep) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobSecondLvlTx { - t.Fatalf("tx did not spend from bob's second level tx") - } + require.NoError(t.t, err) + require.Equal( + t.t, *bobSecondLvlTx, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash, + ) // When we mine one additional block, that will confirm Bob's sweep. // Now Bob should have no pending channels anymore, as this just @@ -343,77 +274,15 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, block = mineBlocks(t, net, 1, 1)[0] assertTxInBlock(t, block, bobSweep) - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - req := &lnrpc.ListChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanInfo, err := bob.ListChannels(ctxt, req) - if err != nil { - predErr = fmt.Errorf("unable to query for open "+ - "channels: %v", err) - return false - } - if len(chanInfo.Channels) != 0 { - predErr = fmt.Errorf("Bob should have no open "+ - "channels, instead he has %v", - len(chanInfo.Channels)) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil) + require.NoError(t.t, err) + assertNodeNumChannels(t, bob, 0) // Also Carol should have no channels left (open nor pending). - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := carol.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob carol has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - req := &lnrpc.ListChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanInfo, err := carol.ListChannels(ctxt, req) - if err != nil { - predErr = fmt.Errorf("unable to query for open "+ - "channels: %v", err) - return false - } - if len(chanInfo.Channels) != 0 { - predErr = fmt.Errorf("carol should have no open "+ - "channels, instead she has %v", - len(chanInfo.Channels)) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil) + require.NoError(t.t, err) + assertNodeNumChannels(t, carol, 0) // Finally, check that the Alice's payment is correctly marked // succeeded. @@ -421,7 +290,5 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, err = checkPaymentStatus( ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, ) - if err != nil { - t.Fatalf(err.Error()) - } + require.NoError(t.t, err) } diff --git a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go index 8e55a744..a86dcf0e 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go @@ -2,19 +2,18 @@ package itest import ( "context" - "fmt" "time" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" - "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/stretchr/testify/require" ) // testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the @@ -58,8 +57,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, payHash := makeFakePayHash(t) _, err := alice.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ + ctx, &routerrpc.SendPaymentRequest{ Dest: carolPubKey, Amt: int64(dustHtlcAmt), PaymentHash: dustPayHash, @@ -68,13 +66,10 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, FeeLimitMsat: noFeeLimitMsat, }, ) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } + require.NoError(t.t, err) _, err = alice.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ + ctx, &routerrpc.SendPaymentRequest{ Dest: carolPubKey, Amt: int64(htlcAmt), PaymentHash: payHash, @@ -83,21 +78,15 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, FeeLimitMsat: noFeeLimitMsat, }, ) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } + require.NoError(t.t, err) // Verify that all nodes in the path now have two HTLC's with the // proper parameters. - var predErr error nodes := []*lntest.HarnessNode{alice, bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, dustPayHash, payHash) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } + err = wait.NoError(func() error { + return assertActiveHtlcs(nodes, dustPayHash, payHash) + }, defaultTimeout) + require.NoError(t.t, err) // Increase the fee estimate so that the following force close tx will // be cpfp'ed. @@ -110,9 +99,8 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, numBlocks := padCLTV( uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), ) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } + _, err = net.Miner.Node.Generate(numBlocks) + require.NoError(t.t, err) // Bob's force close transaction should now be found in the mempool. If // there are anchors, we also expect Bob's anchor sweep. @@ -122,15 +110,11 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, } bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } + require.NoError(t.t, err) _, err = waitForNTxsInMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) - if err != nil { - t.Fatalf("unable to find closing txid: %v", err) - } + require.NoError(t.t, err) closeTx := getSpendingTxInMempool( t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ Hash: *bobFundingTxid, @@ -146,30 +130,25 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, // that we sent earlier. This means Alice should now only have a single // HTLC on her channel. nodes = []*lntest.HarnessNode{alice} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } + err = wait.NoError(func() error { + return assertActiveHtlcs(nodes, payHash) + }, defaultTimeout) + require.NoError(t.t, err) // With the closing transaction confirmed, we should expect Bob's HTLC // timeout transaction to be broadcast due to the expiry being reached. // If there are anchors, we also expect Carol's anchor sweep now. - txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's htlc timeout tx: %v", err) - } + txes, err := getNTxsFromMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) + require.NoError(t.t, err) // Lookup the timeout transaction that is expected to spend from the // closing tx. We distinguish it from a possibly anchor sweep by value. var htlcTimeout *chainhash.Hash for _, tx := range txes { prevOp := tx.TxIn[0].PreviousOutPoint - if prevOp.Hash != closeTxid { - t.Fatalf("tx not spending from closing tx") - } + require.Equal(t.t, closeTxid, prevOp.Hash) // Assume that the timeout tx doesn't spend an output of exactly // the size of the anchor. @@ -178,9 +157,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, htlcTimeout = &hash } } - if htlcTimeout == nil { - t.Fatalf("htlc timeout tx not found in mempool") - } + require.NotNil(t.t, htlcTimeout) // We'll mine the remaining blocks in order to generate the sweep // transaction of Bob's commitment output. @@ -188,9 +165,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, // Check that the sweep spends from the mined commitment. txes, err = getNTxsFromMempool(net.Miner.Node, 1, minerMempoolTimeout) - if err != nil { - t.Fatalf("sweep not found: %v", err) - } + require.NoError(t.t, err) assertAllTxesSpendFrom(t, txes, closeTxid) // Bob's pending channel report should show that he has a commitment @@ -199,21 +174,12 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - t.Fatalf("unable to query for pending channels: %v", err) - } + require.NoError(t.t, err) - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - t.Fatalf("bob should have pending for close chan but doesn't") - } + require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels)) forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.LimboBalance == 0 { - t.Fatalf("bob should have nonzero limbo balance instead "+ - "has: %v", forceCloseChan.LimboBalance) - } - if len(forceCloseChan.PendingHtlcs) == 0 { - t.Fatalf("bob should have pending htlc but doesn't") - } + require.NotZero(t.t, forceCloseChan.LimboBalance) + require.NotZero(t.t, len(forceCloseChan.PendingHtlcs)) // Now we'll mine an additional block, which should confirm Bob's commit // sweep. This block should also prompt Bob to broadcast their second @@ -230,60 +196,33 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, // Therefore, at this point, there should be no active HTLC's on the // commitment transaction from Alice -> Bob. nodes = []*lntest.HarnessNode{alice} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("alice's channel still has active htlc's: %v", predErr) - } + err = wait.NoError(func() error { + return assertNumActiveHtlcs(nodes, 0) + }, defaultTimeout) + require.NoError(t.t, err) // At this point, Bob should show that the pending HTLC has advanced to // the second stage and is to be swept. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - t.Fatalf("unable to query for pending channels: %v", err) - } + require.NoError(t.t, err) forceCloseChan = pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - t.Fatalf("bob's htlc should have advanced to the second stage: %v", err) - } + require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage) // Next, we'll mine a final block that should confirm the second-layer // sweeping transaction. - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } + _, err = net.Miner.Node.Generate(1) + require.NoError(t.t, err) // Once this transaction has been confirmed, Bob should detect that he // no longer has any pending channels. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending "+ - "channels but shouldn't: %v", - spew.Sdump(pendingChanResp)) - return false - } - - return true - - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil) + require.NoError(t.t, err) // Coop close channel, expect no anchors. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) closeChannelAndAssertType( - ctxt, t, net, alice, aliceChanPoint, false, - false, + ctxt, t, net, alice, aliceChanPoint, false, false, ) } diff --git a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go index 1e022844..1aace16d 100644 --- a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go @@ -2,11 +2,9 @@ package itest import ( "context" - "fmt" "time" "github.com/btcsuite/btcd/wire" - "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" @@ -15,6 +13,7 @@ import ( "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" + "github.com/stretchr/testify/require" ) // testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the @@ -53,9 +52,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) defer cancel() carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } + require.NoError(t.t, err) // Now that we've created the invoice, we'll send a single payment from // Alice to Carol. We won't wait for the response however, as Carol @@ -64,32 +61,21 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, defer cancel() _, err = alice.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ + ctx, &routerrpc.SendPaymentRequest{ PaymentRequest: carolInvoice.PaymentRequest, TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat, }, ) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(t.t, err) // At this point, all 3 nodes should now have an active channel with // the created HTLC pending on all of them. - var predErr error nodes := []*lntest.HarnessNode{alice, bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash[:]) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } + err = wait.NoError(func() error { + return assertActiveHtlcs(nodes, payHash[:]) + }, defaultTimeout) + require.NoError(t.t, err) // Wait for carol to mark invoice as accepted. There is a small gap to // bridge between adding the htlc to the channel and executing the exit @@ -97,9 +83,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, waitForInvoiceAccepted(t, carol, payHash) restartBob, err := net.SuspendNode(bob) - if err != nil { - t.Fatalf("unable to suspend bob: %v", err) - } + require.NoError(t.t, err) // Settle invoice. This will just mark the invoice as settled, as there // is no link anymore to remove the htlc from the commitment tx. For @@ -111,9 +95,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, _, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ Preimage: preimage[:], }) - if err != nil { - t.Fatalf("settle invoice: %v", err) - } + require.NoError(t.t, err) // Increase the fee estimate so that the following force close tx will // be cpfp'ed. @@ -125,9 +107,8 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, numBlocks := padCLTV(uint32( invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta, )) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks") - } + _, err = net.Miner.Node.Generate(numBlocks) + require.NoError(t.t, err) // At this point, Carol should broadcast her active commitment // transaction in order to go to the chain and sweep her HTLC. If there @@ -139,14 +120,10 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, _, err = getNTxsFromMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) - if err != nil { - t.Fatalf("expected transaction not found in mempool: %v", err) - } + require.NoError(t.t, err) bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } + require.NoError(t.t, err) carolFundingPoint := wire.OutPoint{ Hash: *bobFundingTxid, @@ -164,9 +141,8 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, mineBlocks(t, net, 1, expectedTxes) // Restart bob again. - if err := restartBob(); err != nil { - t.Fatalf("unable to restart bob: %v", err) - } + err = restartBob() + require.NoError(t.t, err) // After the force close transaction is mined, Carol should broadcast // her second level HTLC transaction. Bob will broadcast a sweep tx to @@ -178,20 +154,18 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, if c == commitTypeAnchors { expectedTxes = 3 } - txes, err := getNTxsFromMempool(net.Miner.Node, - expectedTxes, minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } + txes, err := getNTxsFromMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) + require.NoError(t.t, err) // All transactions should be spending from the commitment transaction. assertAllTxesSpendFrom(t, txes, closingTxid) // We'll now mine an additional block which should confirm both the // second layer transactions. - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } + _, err = net.Miner.Node.Generate(1) + require.NoError(t.t, err) time.Sleep(time.Second * 4) @@ -203,98 +177,52 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - t.Fatalf("unable to query for pending channels: %v", err) - } + require.NoError(t.t, err) - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - t.Fatalf("carol should have pending for close chan but doesn't") - } + require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels)) forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.LimboBalance == 0 { - t.Fatalf("carol should have nonzero limbo balance instead "+ - "has: %v", forceCloseChan.LimboBalance) - } + require.NotZero(t.t, forceCloseChan.LimboBalance) // The pending HTLC carol has should also now be in stage 2. - if len(forceCloseChan.PendingHtlcs) != 1 { - t.Fatalf("carol should have pending htlc but doesn't") - } - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - t.Fatalf("carol's htlc should have advanced to the second "+ - "stage: %v", err) - } + require.Len(t.t, forceCloseChan.PendingHtlcs, 1) + require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage) // Once the second-level transaction confirmed, Bob should have // extracted the preimage from the chain, and sent it back to Alice, // clearing the HTLC off-chain. nodes = []*lntest.HarnessNode{alice} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } + err = wait.NoError(func() error { + return assertNumActiveHtlcs(nodes, 0) + }, defaultTimeout) + require.NoError(t.t, err) // If we mine 4 additional blocks, then both outputs should now be // mature. - if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } + _, err = net.Miner.Node.Generate(defaultCSV) + require.NoError(t.t, err) // We should have a new transaction in the mempool. _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's sweeping transaction: %v", err) - } + require.NoError(t.t, err) // Finally, if we mine an additional block to confirm these two sweep // transactions, Carol should not show a pending channel in her report // afterwards. - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to mine block: %v", err) - } - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = carol.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("carol still has pending channels: %v", - spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + _, err = net.Miner.Node.Generate(1) + require.NoError(t.t, err) + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil) + require.NoError(t.t, err) // The invoice should show as settled for Carol, indicating that it was // swept on-chain. invoicesReq := &lnrpc.ListInvoiceRequest{} invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq) - if err != nil { - t.Fatalf("unable to retrieve invoices: %v", err) - } - if len(invoicesResp.Invoices) != 1 { - t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices)) - } + require.NoError(t.t, err) + require.Len(t.t, invoicesResp.Invoices, 1) invoice := invoicesResp.Invoices[0] - if invoice.State != lnrpc.Invoice_SETTLED { - t.Fatalf("expected invoice to be settled on chain") - } - if invoice.AmtPaidSat != invoiceAmt { - t.Fatalf("expected invoice to be settled with %d sat, got "+ - "%d sat", invoiceAmt, invoice.AmtPaidSat) - } + require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State) + require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat) // Finally, check that the Alice's payment is correctly marked // succeeded. @@ -302,9 +230,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, err = checkPaymentStatus( ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, ) - if err != nil { - t.Fatalf(err.Error()) - } + require.NoError(t.t, err) // We'll close out the channel between Alice and Bob, then shutdown // carol to conclude the test. diff --git a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go index 574ecf71..29ffd1ce 100644 --- a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go @@ -2,11 +2,8 @@ package itest import ( "context" - "fmt" - "time" "github.com/btcsuite/btcd/wire" - "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" @@ -15,6 +12,7 @@ import ( "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" + "github.com/stretchr/testify/require" ) // testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario, @@ -51,9 +49,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) defer cancel() carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } + require.NoError(t.t, err) // Now that we've created the invoice, we'll send a single payment from // Alice to Carol. We won't wait for the response however, as Carol @@ -62,32 +58,21 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest defer cancel() _, err = alice.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ + ctx, &routerrpc.SendPaymentRequest{ PaymentRequest: carolInvoice.PaymentRequest, TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat, }, ) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(t.t, err) // At this point, all 3 nodes should now have an active channel with // the created HTLC pending on all of them. - var predErr error nodes := []*lntest.HarnessNode{alice, bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash[:]) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } + err = wait.NoError(func() error { + return assertActiveHtlcs(nodes, payHash[:]) + }, defaultTimeout) + require.NoError(t.t, err) // Wait for carol to mark invoice as accepted. There is a small gap to // bridge between adding the htlc to the channel and executing the exit @@ -102,22 +87,20 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // immediately force close the channel by broadcast her commitment // transaction. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - aliceForceClose := closeChannelAndAssertType(ctxt, t, net, alice, - aliceChanPoint, c == commitTypeAnchors, true) + aliceForceClose := closeChannelAndAssertType( + ctxt, t, net, alice, aliceChanPoint, c == commitTypeAnchors, + true, + ) // Wait for the channel to be marked pending force close. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint) - if err != nil { - t.Fatalf("channel not pending force close: %v", err) - } + require.NoError(t.t, err) // Mine enough blocks for Alice to sweep her funds from the force // closed channel. _, err = net.Miner.Node.Generate(defaultCSV) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } + require.NoError(t.t, err) // Alice should now sweep her funds. If there are anchors, Alice should // also sweep hers. @@ -125,16 +108,14 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest if c == commitTypeAnchors { expectedTxes = 2 } - _, err = waitForNTxsInMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find sweeping tx in mempool: %v", err) - } + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) + require.NoError(t.t, err) // Suspend bob, so Carol is forced to go on chain. restartBob, err := net.SuspendNode(bob) - if err != nil { - t.Fatalf("unable to suspend bob: %v", err) - } + require.NoError(t.t, err) // Settle invoice. This will just mark the invoice as settled, as there // is no link anymore to remove the htlc from the commitment tx. For @@ -146,31 +127,25 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest _, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ Preimage: preimage[:], }) - if err != nil { - t.Fatalf("settle invoice: %v", err) - } + require.NoError(t.t, err) // We'll now mine enough blocks so Carol decides that she needs to go // on-chain to claim the HTLC as Bob has been inactive. - numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry- - lncfg.DefaultIncomingBroadcastDelta) - defaultCSV) + numBlocks := padCLTV(uint32( + invoiceReq.CltvExpiry-lncfg.DefaultIncomingBroadcastDelta, + ) - defaultCSV) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks") - } + _, err = net.Miner.Node.Generate(numBlocks) + require.NoError(t.t, err) // Carol's commitment transaction should now be in the mempool. If there // are anchors, Carol also sweeps her anchor. _, err = waitForNTxsInMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) - if err != nil { - t.Fatalf("unable to find carol's txes: %v", err) - } + require.NoError(t.t, err) bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } + require.NoError(t.t, err) carolFundingPoint := wire.OutPoint{ Hash: *bobFundingTxid, Index: bobChanPoint.OutputIndex, @@ -186,16 +161,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // Mine a block, which should contain: the commitment, possibly an // anchor sweep and the coinbase tx. block := mineBlocks(t, net, 1, expectedTxes)[0] - if len(block.Transactions) != expectedTxes+1 { - t.Fatalf("expected %v transactions in block, got %v", - expectedTxes, len(block.Transactions)) - } + require.Len(t.t, block.Transactions, expectedTxes+1) assertTxInBlock(t, block, &closingTxid) // Restart bob again. - if err := restartBob(); err != nil { - t.Fatalf("unable to restart bob: %v", err) - } + err = restartBob() + require.NoError(t.t, err) // After the force close transacion is mined, Carol should broadcast her // second level HTLC transacion. Bob will broadcast a sweep tx to sweep @@ -206,21 +177,17 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest if c == commitTypeAnchors { expectedTxes = 3 } - txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes, - minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } + txes, err := getNTxsFromMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) + require.NoError(t.t, err) // All transactions should be pending from the commitment transaction. assertAllTxesSpendFrom(t, txes, closingTxid) // Mine a block to confirm the two transactions (+ coinbase). block = mineBlocks(t, net, 1, expectedTxes)[0] - if len(block.Transactions) != expectedTxes+1 { - t.Fatalf("expected 3 transactions in block, got %v", - len(block.Transactions)) - } + require.Len(t.t, block.Transactions, expectedTxes+1) // Keep track of the second level tx maturity. carolSecondLevelCSV := uint32(defaultCSV) @@ -228,114 +195,60 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // When Bob notices Carol's second level transaction in the block, he // will extract the preimage and broadcast a sweep tx to directly claim // the HTLC in his (already closed) channel with Alice. - bobHtlcSweep, err := waitForTxInMempool(net.Miner.Node, - minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } + bobHtlcSweep, err := waitForTxInMempool( + net.Miner.Node, minerMempoolTimeout, + ) + require.NoError(t.t, err) // It should spend from the commitment in the channel with Alice. tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *aliceForceClose { - t.Fatalf("tx did not spend from alice's force close tx") - } + require.NoError(t.t, err) + require.Equal( + t.t, *aliceForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash, + ) // We'll now mine a block which should confirm Bob's HTLC sweep // transaction. block = mineBlocks(t, net, 1, 1)[0] - if len(block.Transactions) != 2 { - t.Fatalf("expected 2 transactions in block, got %v", - len(block.Transactions)) - } + require.Len(t.t, block.Transactions, 2) assertTxInBlock(t, block, bobHtlcSweep) carolSecondLevelCSV-- // Now that the sweeping transaction has been confirmed, Bob should now // recognize that all contracts have been fully resolved, and show no // pending close channels. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil) + require.NoError(t.t, err) // If we then mine 3 additional blocks, Carol's second level tx will // mature, and she should pull the funds. - if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil { - t.Fatalf("unable to generate block: %v", err) - } + _, err = net.Miner.Node.Generate(carolSecondLevelCSV) + require.NoError(t.t, err) - carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's sweeping transaction: %v", err) - } + carolSweep, err := waitForTxInMempool( + net.Miner.Node, minerMempoolTimeout, + ) + require.NoError(t.t, err) // When Carol's sweep gets confirmed, she should have no more pending // channels. block = mineBlocks(t, net, 1, 1)[0] assertTxInBlock(t, block, carolSweep) - pendingChansRequest = &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := carol.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("carol still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil) + require.NoError(t.t, err) // The invoice should show as settled for Carol, indicating that it was // swept on-chain. invoicesReq := &lnrpc.ListInvoiceRequest{} invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq) - if err != nil { - t.Fatalf("unable to retrieve invoices: %v", err) - } - if len(invoicesResp.Invoices) != 1 { - t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices)) - } + require.NoError(t.t, err) + require.Len(t.t, invoicesResp.Invoices, 1) invoice := invoicesResp.Invoices[0] - if invoice.State != lnrpc.Invoice_SETTLED { - t.Fatalf("expected invoice to be settled on chain") - } - if invoice.AmtPaidSat != invoiceAmt { - t.Fatalf("expected invoice to be settled with %d sat, got "+ - "%d sat", invoiceAmt, invoice.AmtPaidSat) - } + require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State) + require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat) // Finally, check that the Alice's payment is correctly marked // succeeded. @@ -343,7 +256,5 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest err = checkPaymentStatus( ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, ) - if err != nil { - t.Fatalf(err.Error()) - } + require.NoError(t.t, err) } diff --git a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go index 8eb5b97f..75c49690 100644 --- a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go @@ -3,14 +3,12 @@ package itest import ( "context" "fmt" - "time" "github.com/btcsuite/btcutil" - "github.com/davecgh/go-spew/spew" - "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/stretchr/testify/require" ) // testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC @@ -47,8 +45,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, carolPubKey := carol.PubKey[:] payHash := makeFakePayHash(t) _, err := alice.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ + ctx, &routerrpc.SendPaymentRequest{ Dest: carolPubKey, Amt: int64(htlcAmt), PaymentHash: payHash, @@ -57,21 +54,15 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, FeeLimitMsat: noFeeLimitMsat, }, ) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } + require.NoError(t.t, err) // Once the HTLC has cleared, all channels in our mini network should // have the it locked in. - var predErr error nodes := []*lntest.HarnessNode{alice, bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", err) - } + err = wait.NoError(func() error { + return assertActiveHtlcs(nodes, payHash) + }, defaultTimeout) + require.NoError(t.t, err) // Increase the fee estimate so that the following force close tx will // be cpfp'ed. @@ -87,105 +78,64 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // At this point, Bob should have a pending force close channel as he // just went to chain. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := bob.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose( + ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error { + if c.LimboBalance == 0 { + return fmt.Errorf("bob should have nonzero "+ + "limbo balance instead has: %v", + c.LimboBalance) + } - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.LimboBalance == 0 { - predErr = fmt.Errorf("bob should have nonzero limbo "+ - "balance instead has: %v", - forceCloseChan.LimboBalance) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + return nil + }, + ) + require.NoError(t.t, err) // We'll mine defaultCSV blocks in order to generate the sweep // transaction of Bob's funding output. If there are anchors, mine // Carol's anchor sweep too. if c == commitTypeAnchors { _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find carol's anchor sweep tx: %v", err) - } + require.NoError(t.t, err) } - if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } + _, err = net.Miner.Node.Generate(defaultCSV) + require.NoError(t.t, err) _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's funding output sweep tx: %v", err) - } + require.NoError(t.t, err) // We'll now mine enough blocks for the HTLC to expire. After this, Bob // should hand off the now expired HTLC output to the utxo nursery. numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1)) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } + _, err = net.Miner.Node.Generate(numBlocks) + require.NoError(t.t, err) // Bob's pending channel report should show that he has a single HTLC // that's now in stage one. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose( + ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error { + if len(c.PendingHtlcs) != 1 { + return fmt.Errorf("bob should have pending " + + "htlc but doesn't") + } - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending force " + - "close chan but doesn't") - return false - } + if c.PendingHtlcs[0].Stage != 1 { + return fmt.Errorf("bob's htlc should have "+ + "advanced to the first stage: %v", err) + } - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false - } - if forceCloseChan.PendingHtlcs[0].Stage != 1 { - predErr = fmt.Errorf("bob's htlc should have "+ - "advanced to the first stage: %v", err) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } + return nil + }, + ) + require.NoError(t.t, err) // We should also now find a transaction in the mempool, as Bob should // have broadcast his second layer timeout transaction. timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's htlc timeout tx: %v", err) - } + require.NoError(t.t, err) // Next, we'll mine an additional block. This should serve to confirm // the second layer timeout transaction. @@ -195,62 +145,39 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // With the second layer timeout transaction confirmed, Bob should have // canceled backwards the HTLC that carol sent. nodes = []*lntest.HarnessNode{alice} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("alice's channel still has active htlc's: %v", predErr) - } + err = wait.NoError(func() error { + return assertNumActiveHtlcs(nodes, 0) + }, defaultTimeout) + require.NoError(t.t, err) // Additionally, Bob should now show that HTLC as being advanced to the // second stage. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose( + ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error { + if len(c.PendingHtlcs) != 1 { + return fmt.Errorf("bob should have pending " + + "htlc but doesn't") + } - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false - } + if c.PendingHtlcs[0].Stage != 2 { + return fmt.Errorf("bob's htlc should have "+ + "advanced to the second stage: %v", err) + } - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false - } - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - predErr = fmt.Errorf("bob's htlc should have "+ - "advanced to the second stage: %v", err) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } + return nil + }, + ) + require.NoError(t.t, err) // We'll now mine 4 additional blocks. This should be enough for Bob's // CSV timelock to expire and the sweeping transaction of the HTLC to be // broadcast. - if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } + _, err = net.Miner.Node.Generate(defaultCSV) + require.NoError(t.t, err) sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's htlc sweep tx: %v", err) - } + require.NoError(t.t, err) // We'll then mine a final block which should confirm this second layer // sweep transaction. @@ -259,27 +186,9 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // At this point, Bob should no longer show any channels as pending // close. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil) + require.NoError(t.t, err) // Coop close, no anchors. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) diff --git a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go index 19d92165..fc0a813b 100644 --- a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go @@ -3,14 +3,12 @@ package itest import ( "context" "fmt" - "time" "github.com/btcsuite/btcutil" - "github.com/davecgh/go-spew/spew" - "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/stretchr/testify/require" ) // testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a @@ -48,8 +46,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, carolPubKey := carol.PubKey[:] payHash := makeFakePayHash(t) _, err := alice.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ + ctx, &routerrpc.SendPaymentRequest{ Dest: carolPubKey, Amt: int64(htlcAmt), PaymentHash: payHash, @@ -58,21 +55,15 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, FeeLimitMsat: noFeeLimitMsat, }, ) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } + require.NoError(t.t, err) // Once the HTLC has cleared, all the nodes in our mini network should // show that the HTLC has been locked in. - var predErr error nodes := []*lntest.HarnessNode{alice, bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } + err = wait.NoError(func() error { + return assertActiveHtlcs(nodes, payHash) + }, defaultTimeout) + require.NoError(t.t, err) // Increase the fee estimate so that the following force close tx will // be cpfp'ed. @@ -90,28 +81,9 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // At this point, Bob should have a pending force close channel as // Carol has gone directly to chain. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for "+ - "pending channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending " + - "force close channels but doesn't") - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose(ctxt, bob, 1, nil) + require.NoError(t.t, err) // Bob can sweep his output immediately. If there is an anchor, Bob will // sweep that as well. @@ -123,55 +95,39 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, _, err = waitForNTxsInMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) - if err != nil { - t.Fatalf("failed to find txes in miner mempool: %v", err) - } + require.NoError(t.t, err) // Next, we'll mine enough blocks for the HTLC to expire. At this // point, Bob should hand off the output to his internal utxo nursery, // which will broadcast a sweep transaction. numBlocks := padCLTV(finalCltvDelta - 1) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } + _, err = net.Miner.Node.Generate(numBlocks) + require.NoError(t.t, err) // If we check Bob's pending channel report, it should show that he has // a single HTLC that's now in the second stage, as skip the initial // first stage since this is a direct HTLC. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose( + ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error { + if len(c.PendingHtlcs) != 1 { + return fmt.Errorf("bob should have pending " + + "htlc but doesn't") + } - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false - } + if c.PendingHtlcs[0].Stage != 2 { + return fmt.Errorf("bob's htlc should have "+ + "advanced to the second stage: %v", err) + } - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false - } - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - predErr = fmt.Errorf("bob's htlc should have "+ - "advanced to the second stage: %v", err) - return false - } + return nil + }, + ) + require.NoError(t.t, err) - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } + // We need to generate an additional block to trigger the sweep. + _, err = net.Miner.Node.Generate(1) + require.NoError(t.t, err) // Bob's sweeping transaction should now be found in the mempool at // this point. @@ -185,14 +141,10 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // we'll fail. // TODO(halseth): can we use waitForChannelPendingForceClose to // avoid this hack? - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } + _, err = net.Miner.Node.Generate(1) + require.NoError(t.t, err) sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's sweeping transaction: "+ - "%v", err) - } + require.NoError(t.t, err) } // If we mine an additional block, then this should confirm Bob's @@ -204,45 +156,23 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // cancel back that HTLC. As a result, Alice should not know of any // active HTLC's. nodes = []*lntest.HarnessNode{alice} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - return predErr == nil - }, time.Second*15) - if err != nil { - t.Fatalf("alice's channel still has active htlc's: %v", predErr) - } + err = wait.NoError(func() error { + return assertNumActiveHtlcs(nodes, 0) + }, defaultTimeout) + require.NoError(t.t, err) // Now we'll check Bob's pending channel report. Since this was Carol's // commitment, he doesn't have to wait for any CSV delays. As a result, // he should show no additional pending transactions. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil) + require.NoError(t.t, err) // We'll close out the test by closing the channel from Alice to Bob, // and then shutting down the new node we created as its no longer // needed. Coop close, no anchors. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) closeChannelAndAssertType( - ctxt, t, net, alice, aliceChanPoint, false, - false, + ctxt, t, net, alice, aliceChanPoint, false, false, ) } diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index 0e0fccf4..7f60c405 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -330,37 +330,68 @@ func waitForChannelPendingForceClose(ctx context.Context, Index: fundingChanPoint.OutputIndex, } - var predErr error - err = wait.Predicate(func() bool { + return wait.NoError(func() error { pendingChansRequest := &lnrpc.PendingChannelsRequest{} pendingChanResp, err := node.PendingChannels( ctx, pendingChansRequest, ) if err != nil { - predErr = fmt.Errorf("unable to get pending "+ - "channels: %v", err) - return false + return fmt.Errorf("unable to get pending channels: %v", + err) } forceClose, err := findForceClosedChannel(pendingChanResp, &op) if err != nil { - predErr = err - return false + return err } // We must wait until the UTXO nursery has received the channel // and is aware of its maturity height. if forceClose.MaturityHeight == 0 { - predErr = fmt.Errorf("channel had maturity height of 0") - return false + return fmt.Errorf("channel had maturity height of 0") } - return true - }, time.Second*15) - if err != nil { - return predErr - } - return nil + return nil + }, defaultTimeout) +} + +// lnrpcForceCloseChannel is a short type alias for a ridiculously long type +// name in the lnrpc package. +type lnrpcForceCloseChannel = lnrpc.PendingChannelsResponse_ForceClosedChannel + +// waitForNumChannelPendingForceClose waits for the node to report a certain +// number of channels in state pending force close. +func waitForNumChannelPendingForceClose(ctx context.Context, + node *lntest.HarnessNode, expectedNum int, + perChanCheck func(channel *lnrpcForceCloseChannel) error) error { + + return wait.NoError(func() error { + resp, err := node.PendingChannels( + ctx, &lnrpc.PendingChannelsRequest{}, + ) + if err != nil { + return fmt.Errorf("unable to get pending channels: %v", + err) + } + + forceCloseChans := resp.PendingForceClosingChannels + if len(forceCloseChans) != expectedNum { + return fmt.Errorf("bob should have %d pending "+ + "force close channels but has %d", expectedNum, + len(forceCloseChans)) + } + + if perChanCheck != nil { + for _, forceCloseChan := range forceCloseChans { + err := perChanCheck(forceCloseChan) + if err != nil { + return err + } + } + } + + return nil + }, defaultTimeout) } // cleanupForceClose mines a force close commitment found in the mempool and From fd684a8ffa5cae829e39360e7797572099db9f19 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 26 Oct 2020 14:34:58 +0100 Subject: [PATCH 5/6] itest: add new errors to whitelist --- lntest/itest/log_error_whitelist.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lntest/itest/log_error_whitelist.txt b/lntest/itest/log_error_whitelist.txt index de643945..6378ffef 100644 --- a/lntest/itest/log_error_whitelist.txt +++ b/lntest/itest/log_error_whitelist.txt @@ -54,6 +54,7 @@