diff --git a/build/version.go b/build/version.go index d29e2e4750..22f4a145be 100644 --- a/build/version.go +++ b/build/version.go @@ -43,11 +43,11 @@ const ( AppMinor uint = 16 // AppPatch defines the application patch for this binary. - AppPatch uint = 2 + AppPatch uint = 3 // AppPreRelease MUST only contain characters from semanticAlphabet // per the semantic versioning spec. - AppPreRelease = "beta" + AppPreRelease = "beta.rc1" ) func init() { diff --git a/contractcourt/htlc_timeout_resolver.go b/contractcourt/htlc_timeout_resolver.go index 095e538e47..0b3e8688d3 100644 --- a/contractcourt/htlc_timeout_resolver.go +++ b/contractcourt/htlc_timeout_resolver.go @@ -327,6 +327,7 @@ func (h *htlcTimeoutResolver) sweepSecondLevelTx() error { Fee: sweep.FeePreference{ ConfTarget: secondLevelConfTarget, }, + Force: true, }, ) diff --git a/contractcourt/utxonursery.go b/contractcourt/utxonursery.go index 08b4a32be2..3776a35460 100644 --- a/contractcourt/utxonursery.go +++ b/contractcourt/utxonursery.go @@ -781,9 +781,10 @@ func (u *UtxoNursery) sweepMatureOutputs(classHeight uint32, // passed in with disastrous consequences. local := output - resultChan, err := u.cfg.SweepInput( - &local, sweep.Params{Fee: feePref}, - ) + resultChan, err := u.cfg.SweepInput(&local, sweep.Params{ + Fee: feePref, + Force: true, + }) if err != nil { return err } diff --git a/docs/release-notes/release-notes-0.16.3.md b/docs/release-notes/release-notes-0.16.3.md new file mode 100644 index 0000000000..c32d52c155 --- /dev/null +++ b/docs/release-notes/release-notes-0.16.3.md @@ -0,0 +1,35 @@ +# Release Notes + +## Mempool Optimizations + +* Optimized [mempool + management](https://github.com/lightningnetwork/lnd/pull/7681) to lower the + CPU usage. + +## Bug Fixes + +* [Re-encrypt/regenerate](https://github.com/lightningnetwork/lnd/pull/7705) + all macaroon DB root keys on `ChangePassword`/`GenerateNewRootKey` + respectively. + +## Channel Link Bug Fix + +* If we detect the remote link is inactive, [we'll now tear down the + connection](https://github.com/lightningnetwork/lnd/pull/7711) in addition to + stopping the link's statemachine. If we're persistently connected with the + peer, then this'll force a reconnect, which may restart things and help avoid + certain force close scenarios. + + +## Consistent Contract Resolution + +* If lnd decides to go to chain for an HTLC, it will now _always_ ensure the + HTLC is fully swept on the outgoing link. Prior logic would avoid sweeping + due to negative yield, but combined with other inputs, the HTLC will usually + be positive yield. + +# Contributors (Alphabetical Order) + +* Elle Mouton +* Olaoluwa Osuntokun +* Yong Yu diff --git a/go.mod b/go.mod index dc1139bb81..23d58a359e 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/btcsuite/btcd/btcutil/psbt v1.1.8 github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f - github.com/btcsuite/btcwallet v0.16.9 + github.com/btcsuite/btcwallet v0.16.10-0.20230517173256-aa62c04afcdf github.com/btcsuite/btcwallet/wallet/txauthor v1.3.2 github.com/btcsuite/btcwallet/wallet/txrules v1.2.0 github.com/btcsuite/btcwallet/walletdb v1.4.0 diff --git a/go.sum b/go.sum index 1d9141fa52..b707f0f17f 100644 --- a/go.sum +++ b/go.sum @@ -89,8 +89,8 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2/go.mod h1:7SFka0XMvUgj3hfZtyd github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcwallet v0.16.9 h1:hLAzEJvsiSn+r6j374G7ThnrYD/toa+Lv7l1Rm6+0oM= -github.com/btcsuite/btcwallet v0.16.9/go.mod h1:T3DjEAMZYIqQ28l+ixlB6DX4mFJXCX8Pzz+yACQcLsc= +github.com/btcsuite/btcwallet v0.16.10-0.20230517173256-aa62c04afcdf h1:YRY292S4axfeDPJrZYkDq4+JaRhaPvjMueZ65evmUfc= +github.com/btcsuite/btcwallet v0.16.10-0.20230517173256-aa62c04afcdf/go.mod h1:T3DjEAMZYIqQ28l+ixlB6DX4mFJXCX8Pzz+yACQcLsc= github.com/btcsuite/btcwallet/wallet/txauthor v1.3.2 h1:etuLgGEojecsDOYTII8rYiGHjGyV5xTqsXi+ZQ715UU= github.com/btcsuite/btcwallet/wallet/txauthor v1.3.2/go.mod h1:Zpk/LOb2sKqwP2lmHjaZT9AdaKsHPSbNLm2Uql5IQ/0= github.com/btcsuite/btcwallet/wallet/txrules v1.2.0 h1:BtEN5Empw62/RVnZ0VcJaVtVlBijnLlJY+dwjAye2Bg= diff --git a/htlcswitch/link.go b/htlcswitch/link.go index d2dfc20abf..1979f98399 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -1036,8 +1036,8 @@ func (l *channelLink) htlcManager() { // storing the transaction in the db. l.fail( LinkFailureError{ - code: ErrSyncError, - ForceClose: true, + code: ErrSyncError, + FailureAction: LinkFailureForceClose, //nolint:lll }, "unable to synchronize channel "+ "states: %v", err, @@ -1077,8 +1077,8 @@ func (l *channelLink) htlcManager() { l.fail( LinkFailureError{ - code: ErrRecoveryError, - ForceClose: false, + code: ErrRecoveryError, + FailureAction: LinkFailureForceNone, }, "unable to synchronize channel "+ "states: %v", err, @@ -1239,8 +1239,13 @@ func (l *channelLink) htlcManager() { } case <-l.cfg.PendingCommitTicker.Ticks(): - l.fail(LinkFailureError{code: ErrRemoteUnresponsive}, - "unable to complete dance") + l.fail( + LinkFailureError{ + code: ErrRemoteUnresponsive, + FailureAction: LinkFailureDisconnect, + }, + "unable to complete dance", + ) return // A message from the switch was just received. This indicates @@ -1782,8 +1787,8 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { if err := l.channel.ReceiveHTLCSettle(pre, idx); err != nil { l.fail( LinkFailureError{ - code: ErrInvalidUpdate, - ForceClose: true, + code: ErrInvalidUpdate, + FailureAction: LinkFailureForceClose, }, "unable to handle upstream settle HTLC: %v", err, ) @@ -1947,9 +1952,9 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { } l.fail( LinkFailureError{ - code: ErrInvalidCommitment, - ForceClose: true, - SendData: sendData, + code: ErrInvalidCommitment, + FailureAction: LinkFailureForceClose, + SendData: sendData, }, "ChannelPoint(%v): unable to accept new "+ "commitment: %v", diff --git a/htlcswitch/link_test.go b/htlcswitch/link_test.go index 72afa2ebb0..e744c75fc3 100644 --- a/htlcswitch/link_test.go +++ b/htlcswitch/link_test.go @@ -5457,8 +5457,10 @@ func TestChannelLinkFail(t *testing.T) { // If we expect the link to force close the channel in this // case, check that it happens. If not, make sure it does not // happen. - require.Equal( - t, test.shouldForceClose, linkErr.ForceClose, test.name, + isForceCloseErr := (linkErr.FailureAction == + LinkFailureForceClose) + require.True( + t, test.shouldForceClose == isForceCloseErr, test.name, ) require.Equal( t, test.permanentFailure, linkErr.PermanentFailure, @@ -6342,11 +6344,12 @@ func TestPendingCommitTicker(t *testing.T) { // Assert that we get the expected link failure from Alice. select { case linkErr := <-linkErrs: - if linkErr.code != ErrRemoteUnresponsive { - t.Fatalf("error code mismatch, "+ - "want: ErrRemoteUnresponsive, got: %v", - linkErr.code) - } + require.Equal( + t, linkErr.code, ErrRemoteUnresponsive, + fmt.Sprintf("error code mismatch, want: "+ + "ErrRemoteUnresponsive, got: %v", linkErr.code), + ) + require.Equal(t, linkErr.FailureAction, LinkFailureDisconnect) case <-time.After(time.Second): t.Fatalf("did not receive failure") @@ -6523,7 +6526,7 @@ func TestPipelineSettle(t *testing.T) { // ForceClose should be false. select { case linkErr := <-linkErrors: - require.False(t, linkErr.ForceClose) + require.False(t, linkErr.FailureAction == LinkFailureForceClose) case <-forwardChan: t.Fatal("packet was erroneously forwarded") } @@ -6559,7 +6562,7 @@ func TestPipelineSettle(t *testing.T) { // ForceClose should be false. select { case linkErr := <-linkErrors: - require.False(t, linkErr.ForceClose) + require.False(t, linkErr.FailureAction == LinkFailureForceClose) case <-forwardChan: t.Fatal("packet was erroneously forwarded") } diff --git a/htlcswitch/linkfailure.go b/htlcswitch/linkfailure.go index 1f454a7bb4..f04a41603b 100644 --- a/htlcswitch/linkfailure.go +++ b/htlcswitch/linkfailure.go @@ -53,6 +53,24 @@ const ( ErrCircuitError ) +// LinkFailureAction is an enum-like type that describes the action that should +// be taken in response to a link failure. +type LinkFailureAction uint8 + +const ( + // LinkFailureForceNone indicates no action is to be taken. + LinkFailureForceNone LinkFailureAction = iota + + // LinkFailureForceClose indicates that the channel should be force + // closed. + LinkFailureForceClose + + // LinkFailureDisconnect indicates that we should disconnect in an + // attempt to recycle the connection. This can be useful if we think a + // TCP connection or state machine is stalled. + LinkFailureDisconnect +) + // LinkFailureError encapsulates an error that will make us fail the current // link. It contains the necessary information needed to determine if we should // force close the channel in the process, and if any error data should be sent @@ -61,9 +79,8 @@ type LinkFailureError struct { // code is the type of error this LinkFailureError encapsulates. code errorCode - // ForceClose indicates whether we should force close the channel - // because of this error. - ForceClose bool + // FailureAction describes what we should do to fail the channel. + FailureAction LinkFailureAction // PermanentFailure indicates whether this failure is permanent, and // the channel should not be attempted loaded again. diff --git a/itest/lnd_mpp_test.go b/itest/lnd_mpp_test.go index e54da0aa49..6906582083 100644 --- a/itest/lnd_mpp_test.go +++ b/itest/lnd_mpp_test.go @@ -296,6 +296,9 @@ func (m *mppTestScenario) openChannels(r *mppOpenChannelRequest) { for _, cp := range m.channelPoints { m.ht.AssertTopologyChannelOpen(hn, cp) } + + // Each node should have exactly 6 edges. + m.ht.AssertNumEdges(hn, len(m.channelPoints), false) } } diff --git a/itest/lnd_multi-hop_test.go b/itest/lnd_multi-hop_test.go index 61b5cf9faa..2985cc23b7 100644 --- a/itest/lnd_multi-hop_test.go +++ b/itest/lnd_multi-hop_test.go @@ -1940,23 +1940,52 @@ func runExtraPreimageFromRemoteCommit(ht *lntest.HarnessTest, numBlocks = htlc.ExpirationHeight - uint32(height) - lncfg.DefaultOutgoingBroadcastDelta - // Mine empty blocks so Carol's htlc success tx stays in mempool. Once - // the height is reached, Bob's timeout resolver will resolve the htlc - // by extracing the preimage from the mempool. - ht.MineEmptyBlocks(int(numBlocks)) + // We should now have Carol's htlc suucess tx in the mempool. + numTxesMempool := 1 // For neutrino backend, the timeout resolver needs to extract the // preimage from the blocks. if ht.IsNeutrinoBackend() { // Mine a block to confirm Carol's 2nd level success tx. ht.MineBlocksAndAssertNumTxes(1, 1) + numTxesMempool-- } + // Mine empty blocks so Carol's htlc success tx stays in mempool. Once + // the height is reached, Bob's timeout resolver will resolve the htlc + // by extracing the preimage from the mempool. + ht.MineEmptyBlocks(int(numBlocks)) // Finally, check that the Alice's payment is marked as succeeded as // Bob has settled the htlc using the preimage extracted from Carol's // 2nd level success tx. ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED) + switch c { + // For non-anchor channel type, we should expect to see Bob's commit + // sweep in the mempool. + case lnrpc.CommitmentType_LEGACY: + numTxesMempool++ + + // For non-anchor channel type, we should expect to see Bob's commit + // sweep and his anchor sweep tx in the mempool. + case lnrpc.CommitmentType_ANCHORS: + numTxesMempool += 2 + + // For script-enforced leased channel, we should expect to see Bob's + // anchor sweep tx in the mempool. + case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: + numTxesMempool++ + + // For neutrino backend, because of the additional block mined, + // Bob's output is now mature. + if ht.IsNeutrinoBackend() { + numTxesMempool++ + } + } + + // Mine a block to clean the mempool. + ht.MineBlocksAndAssertNumTxes(1, numTxesMempool) + // NOTE: for non-standby nodes there's no need to clean up the force // close as long as the mempool is cleaned. ht.CleanShutDown() diff --git a/itest/lnd_switch_test.go b/itest/lnd_switch_test.go index fc3a6cd5be..be1992ed4c 100644 --- a/itest/lnd_switch_test.go +++ b/itest/lnd_switch_test.go @@ -264,6 +264,7 @@ func testSwitchOfflineDeliveryOutgoingOffline(ht *lntest.HarnessTest) { // three channels. Note that we won't call the cleanUp function here as // we will manually stop the node Carol and her channel. s := setupScenarioFourNodes(ht) + defer s.cleanUp() // Disconnect the two intermediaries, Alice and Dave, so that when carol // restarts, the response will be held by Dave. @@ -296,7 +297,7 @@ func testSwitchOfflineDeliveryOutgoingOffline(ht *lntest.HarnessTest) { // Shutdown carol and leave her offline for the rest of the test. This // is critical, as we wish to see if Dave can propragate settles even if // the outgoing link is never revived. - ht.Shutdown(s.carol) + restartCarol := ht.SuspendNode(s.carol) // Now restart Dave, ensuring he is both persisting the settles, and is // able to reforward them to Alice after recovering from a restart. @@ -339,8 +340,8 @@ func testSwitchOfflineDeliveryOutgoingOffline(ht *lntest.HarnessTest) { amountPaid+(baseFee*numPayments)*2, int64(0), ) - ht.CloseChannel(s.alice, s.chanPointAliceBob) - ht.CloseChannel(s.dave, s.chanPointDaveAlice) + // Finally, restart Carol so the cleanup process can be finished. + require.NoError(ht, restartCarol()) } // scenarioFourNodes specifies a scenario which we have a topology that has diff --git a/lntest/harness.go b/lntest/harness.go index b574711939..1c836881d0 100644 --- a/lntest/harness.go +++ b/lntest/harness.go @@ -1632,18 +1632,12 @@ func (h *HarnessTest) cleanMempool() { blocks := h.Miner.MineBlocksSlow(1) bestBlock = blocks[len(blocks)-1] + // Make sure all the active nodes are synced. + h.AssertActiveNodesSyncedTo(bestBlock) + return fmt.Errorf("still have %d txes in mempool", len(mem)) }, wait.MinerMempoolTimeout) require.NoError(h, err, "timeout cleaning up mempool") - - // Exit early if the best block is nil, which means we haven't mined - // any blocks during the cleanup. - if bestBlock == nil { - return - } - - // Make sure all the active nodes are synced. - h.AssertActiveNodesSyncedTo(bestBlock) } // CleanShutDown is used to quickly end a test by shutting down all non-standby diff --git a/macaroons/store.go b/macaroons/store.go index 09da988858..cca548d98c 100644 --- a/macaroons/store.go +++ b/macaroons/store.go @@ -54,6 +54,10 @@ var ( // ErrEncKeyNotFound specifies that there was no encryption key found // even if one was expected to be generated. ErrEncKeyNotFound = fmt.Errorf("macaroon encryption key not found") + + // ErrDefaultRootKeyNotFound is returned when the default root key is + // not found in the DB when it is expected to be. + ErrDefaultRootKeyNotFound = fmt.Errorf("default root key not found") ) // RootKeyStorage implements the bakery.RootKeyStorage interface. @@ -140,8 +144,8 @@ func (r *RootKeyStorage) CreateUnlock(password *[]byte) error { }, func() {}) } -// ChangePassword decrypts the macaroon root key with the old password and then -// encrypts it again with the new password. +// ChangePassword decrypts all the macaroon root keys with the old password and +// then encrypts them again with the new password. func (r *RootKeyStorage) ChangePassword(oldPw, newPw []byte) error { // We need the store to already be unlocked. With this we can make sure // that there already is a key in the DB. @@ -159,19 +163,18 @@ func (r *RootKeyStorage) ChangePassword(oldPw, newPw []byte) error { if bucket == nil { return ErrRootKeyBucketNotFound } - encKeyDb := bucket.Get(encryptionKeyID) - rootKeyDb := bucket.Get(DefaultRootKeyID) - // Both the encryption key and the root key must be present - // otherwise we are in the wrong state to change the password. - if len(encKeyDb) == 0 || len(rootKeyDb) == 0 { + // The encryption key must be present, otherwise we are in the + // wrong state to change the password. + encKeyDB := bucket.Get(encryptionKeyID) + if len(encKeyDB) == 0 { return ErrEncKeyNotFound } // Unmarshal parameters for old encryption key and derive the // old key with them. encKeyOld := &snacl.SecretKey{} - err := encKeyOld.Unmarshal(encKeyDb) + err := encKeyOld.Unmarshal(encKeyDB) if err != nil { return err } @@ -188,21 +191,42 @@ func (r *RootKeyStorage) ChangePassword(oldPw, newPw []byte) error { return err } - // Now try to decrypt the root key with the old encryption key, - // encrypt it with the new one and then store it in the DB. - decryptedKey, err := encKeyOld.Decrypt(rootKeyDb) - if err != nil { - return err - } - rootKey := make([]byte, len(decryptedKey)) - copy(rootKey, decryptedKey) - encryptedKey, err := encKeyNew.Encrypt(rootKey) + // foundDefaultRootKey is used to keep track of if we have + // found and re-encrypted the default root key so that we can + // return an error if it is not found. + var foundDefaultRootKey bool + err = bucket.ForEach(func(k, v []byte) error { + // Skip the key if it is the encryption key ID since + // we do not want to re-encrypt this. + if bytes.Equal(k, encryptionKeyID) { + return nil + } + + if bytes.Equal(k, DefaultRootKeyID) { + foundDefaultRootKey = true + } + + // Now try to decrypt the root key with the old + // encryption key, encrypt it with the new one and then + // store it in the DB. + decryptedKey, err := encKeyOld.Decrypt(v) + if err != nil { + return err + } + + encryptedKey, err := encKeyNew.Encrypt(decryptedKey) + if err != nil { + return err + } + + return bucket.Put(k, encryptedKey) + }) if err != nil { return err } - err = bucket.Put(DefaultRootKeyID, encryptedKey) - if err != nil { - return err + + if !foundDefaultRootKey { + return ErrDefaultRootKeyNotFound } // Finally, store the new encryption key parameters in the DB @@ -325,10 +349,34 @@ func (r *RootKeyStorage) GenerateNewRootKey() error { if bucket == nil { return ErrRootKeyBucketNotFound } + + // The default root key should be created even if it does not + // yet exist, so we do this separately from the rest of the + // root keys. _, err := generateAndStoreNewRootKey( bucket, DefaultRootKeyID, r.encKey, ) - return err + if err != nil { + return err + } + + // Now iterate over all the other root keys that may exist + // and re-generate each of them. + return bucket.ForEach(func(k, v []byte) error { + if bytes.Equal(k, encryptionKeyID) { + return nil + } + + if bytes.Equal(k, DefaultRootKeyID) { + return nil + } + + _, err := generateAndStoreNewRootKey( + bucket, k, r.encKey, + ) + + return err + }) }, func() {}) } diff --git a/macaroons/store_test.go b/macaroons/store_test.go index a9a49edda9..ace1e764a1 100644 --- a/macaroons/store_test.go +++ b/macaroons/store_test.go @@ -16,6 +16,10 @@ var ( defaultRootKeyIDContext = macaroons.ContextWithRootKeyID( context.Background(), macaroons.DefaultRootKeyID, ) + + nonDefaultRootKeyIDContext = macaroons.ContextWithRootKeyID( + context.Background(), []byte{1}, + ) ) // newTestStore creates a new bolt DB in a temporary directory and then @@ -131,8 +135,8 @@ func TestStore(t *testing.T) { require.Equal(t, rootID, id) } -// TestStoreGenerateNewRootKey tests that a root key can be replaced with a new -// one in the store without changing the password. +// TestStoreGenerateNewRootKey tests that root keys can be replaced with new +// ones in the store without changing the password. func TestStoreGenerateNewRootKey(t *testing.T) { _, store := newTestStore(t) @@ -140,23 +144,33 @@ func TestStoreGenerateNewRootKey(t *testing.T) { err := store.GenerateNewRootKey() require.Equal(t, macaroons.ErrStoreLocked, err) - // Unlock the store and read the current key. + // Unlock the store. pw := []byte("weks") err = store.CreateUnlock(&pw) require.NoError(t, err) - oldRootKey, _, err := store.RootKey(defaultRootKeyIDContext) + + // Read the default root key. + oldRootKey1, _, err := store.RootKey(defaultRootKeyIDContext) require.NoError(t, err) - // Replace the root key with a new random key. + // Read the non-default root-key. + oldRootKey2, _, err := store.RootKey(nonDefaultRootKeyIDContext) + require.NoError(t, err) + + // Replace the root keys with new random keys. err = store.GenerateNewRootKey() require.NoError(t, err) - // Finally, read the root key from the DB and compare it to the one + // Finally, read both root keys from the DB and compare them to the ones // we got returned earlier. This makes sure that the encryption/ // decryption of the key in the DB worked as expected too. - newRootKey, _, err := store.RootKey(defaultRootKeyIDContext) + newRootKey1, _, err := store.RootKey(defaultRootKeyIDContext) require.NoError(t, err) - require.NotEqual(t, oldRootKey, newRootKey) + require.NotEqual(t, oldRootKey1, newRootKey1) + + newRootKey2, _, err := store.RootKey(nonDefaultRootKeyIDContext) + require.NoError(t, err) + require.NotEqual(t, oldRootKey2, newRootKey2) } // TestStoreSetRootKey tests that a root key can be set to a specified value. @@ -195,20 +209,25 @@ func TestStoreSetRootKey(t *testing.T) { } // TestStoreChangePassword tests that the password for the store can be changed -// without changing the root key. +// without changing the root keys. func TestStoreChangePassword(t *testing.T) { tempDir, store := newTestStore(t) - // The store must be unlocked to replace the root key. + // The store must be unlocked to replace the root keys. err := store.ChangePassword(nil, nil) require.Equal(t, macaroons.ErrStoreLocked, err) - // Unlock the DB and read the current root key. This will need to stay - // the same after changing the password for the test to succeed. + // Unlock the DB and read the current default root key and one other + // non-default root key. Both of these should stay the same after + // changing the password for the test to succeed. pw := []byte("weks") err = store.CreateUnlock(&pw) require.NoError(t, err) - rootKey, _, err := store.RootKey(defaultRootKeyIDContext) + + rootKey1, _, err := store.RootKey(defaultRootKeyIDContext) + require.NoError(t, err) + + rootKey2, _, err := store.RootKey(nonDefaultRootKeyIDContext) require.NoError(t, err) // Both passwords must be set. @@ -242,9 +261,13 @@ func TestStoreChangePassword(t *testing.T) { err = store.CreateUnlock(&newPw) require.NoError(t, err) - // Finally read the root key from the DB using the new password and - // make sure the root key stayed the same. - rootKeyDb, _, err := store.RootKey(defaultRootKeyIDContext) + // Finally, read the root keys from the DB using the new password and + // make sure that both root keys stayed the same. + rootKeyDB1, _, err := store.RootKey(defaultRootKeyIDContext) + require.NoError(t, err) + require.Equal(t, rootKey1, rootKeyDB1) + + rootKeyDB2, _, err := store.RootKey(nonDefaultRootKeyIDContext) require.NoError(t, err) - require.Equal(t, rootKey, rootKeyDb) + require.Equal(t, rootKey2, rootKeyDB2) } diff --git a/peer/brontide.go b/peer/brontide.go index 82b11e310c..79ec43d9e4 100644 --- a/peer/brontide.go +++ b/peer/brontide.go @@ -3094,11 +3094,10 @@ func (p *Brontide) handleLinkFailure(failure linkFailureReport) { // being applied. p.WipeChannel(&failure.chanPoint) - // If the error encountered was severe enough, we'll now force close the - // channel to prevent reading it to the switch in the future. - if failure.linkErr.ForceClose { - p.log.Warnf("Force closing link(%v)", - failure.shortChanID) + // If the error encountered was severe enough, we'll now force close + // the channel to prevent reading it to the switch in the future. + if failure.linkErr.FailureAction == htlcswitch.LinkFailureForceClose { + p.log.Warnf("Force closing link(%v)", failure.shortChanID) closeTx, err := p.cfg.ChainArb.ForceCloseContract( failure.chanPoint, @@ -3143,6 +3142,13 @@ func (p *Brontide) handleLinkFailure(failure linkFailureReport) { "remote peer: %v", err) } } + + // If the failure action is disconnect, then we'll execute that now. If + // we had to send an error above, it was a sync call, so we expect the + // message to be flushed on the wire by now. + if failure.linkErr.FailureAction == htlcswitch.LinkFailureDisconnect { + p.Disconnect(fmt.Errorf("link requested disconnect")) + } } // tryLinkShutdown attempts to fetch a target link from the switch, calls