mirror of
https://github.com/aljazceru/ark.git
synced 2026-01-25 06:14:19 +01:00
Support macaroons and TLS && Add arkd wallet cmds (#232)
* Update protos * Update handlers * Support macaroons and TLS * Add arkd cli * Minor fixes * Update deps * Fixes * Update makefile * Fixes * Fix * Fix * Fix * Remove trusted onboarding from client * Completely remove trusted onboarding * Fix compose files and add --no-macaroon flag to arkd cli * Lint * Remove e2e for trusted onboarding * Add sleep time
This commit is contained in:
committed by
GitHub
parent
059e837794
commit
57ce08f239
19
server/pkg/kvdb/LICENSE
Normal file
19
server/pkg/kvdb/LICENSE
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2015-2022 Lightning Labs and The Lightning Network Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
274
server/pkg/kvdb/backend.go
Normal file
274
server/pkg/kvdb/backend.go
Normal file
@@ -0,0 +1,274 @@
|
||||
//go:build !js
|
||||
// +build !js
|
||||
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
_ "github.com/btcsuite/btcwallet/walletdb/bdb" // Import to register backend.
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultTempDBFileName is the default name of the temporary bolt DB
|
||||
// file that we'll use to atomically compact the primary DB file on
|
||||
// startup.
|
||||
DefaultTempDBFileName = "temp-dont-use.db"
|
||||
|
||||
// LastCompactionFileNameSuffix is the suffix we append to the file name
|
||||
// of a database file to record the timestamp when the last compaction
|
||||
// occurred.
|
||||
LastCompactionFileNameSuffix = ".last-compacted"
|
||||
)
|
||||
|
||||
var (
|
||||
byteOrder = binary.BigEndian
|
||||
)
|
||||
|
||||
// fileExists returns true if the file exists, and false otherwise.
|
||||
func fileExists(path string) bool {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// BoltBackendConfig is a struct that holds settings specific to the bolt
|
||||
// database backend.
|
||||
type BoltBackendConfig struct {
|
||||
// DBPath is the directory path in which the database file should be
|
||||
// stored.
|
||||
DBPath string
|
||||
|
||||
// DBFileName is the name of the database file.
|
||||
DBFileName string
|
||||
|
||||
// NoFreelistSync, if true, prevents the database from syncing its
|
||||
// freelist to disk, resulting in improved performance at the expense of
|
||||
// increased startup time.
|
||||
NoFreelistSync bool
|
||||
|
||||
// AutoCompact specifies if a Bolt based database backend should be
|
||||
// automatically compacted on startup (if the minimum age of the
|
||||
// database file is reached). This will require additional disk space
|
||||
// for the compacted copy of the database but will result in an overall
|
||||
// lower database size after the compaction.
|
||||
AutoCompact bool
|
||||
|
||||
// AutoCompactMinAge specifies the minimum time that must have passed
|
||||
// since a bolt database file was last compacted for the compaction to
|
||||
// be considered again.
|
||||
AutoCompactMinAge time.Duration
|
||||
|
||||
// DBTimeout specifies the timeout value to use when opening the wallet
|
||||
// database.
|
||||
DBTimeout time.Duration
|
||||
}
|
||||
|
||||
// GetBoltBackend opens (or creates if doesn't exits) a bbolt backed database
|
||||
// and returns a kvdb.Backend wrapping it.
|
||||
func GetBoltBackend(cfg *BoltBackendConfig) (Backend, error) {
|
||||
dbFilePath := filepath.Join(cfg.DBPath, cfg.DBFileName)
|
||||
|
||||
// Is this a new database?
|
||||
if !fileExists(dbFilePath) {
|
||||
if !fileExists(cfg.DBPath) {
|
||||
if err := os.MkdirAll(cfg.DBPath, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return Create(
|
||||
BoltBackendName, dbFilePath,
|
||||
cfg.NoFreelistSync, cfg.DBTimeout,
|
||||
)
|
||||
}
|
||||
|
||||
// This is an existing database. We might want to compact it on startup
|
||||
// to free up some space.
|
||||
if cfg.AutoCompact {
|
||||
if err := compactAndSwap(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return Open(
|
||||
BoltBackendName, dbFilePath,
|
||||
cfg.NoFreelistSync, cfg.DBTimeout,
|
||||
)
|
||||
}
|
||||
|
||||
// compactAndSwap will attempt to write a new temporary DB file to disk with
|
||||
// the compacted database content, then atomically swap (via rename) the old
|
||||
// file for the new file by updating the name of the new file to the old.
|
||||
func compactAndSwap(cfg *BoltBackendConfig) error {
|
||||
sourceName := cfg.DBFileName
|
||||
|
||||
// If the main DB file isn't set, then we can't proceed.
|
||||
if sourceName == "" {
|
||||
return fmt.Errorf("cannot compact DB with empty name")
|
||||
}
|
||||
sourceFilePath := filepath.Join(cfg.DBPath, sourceName)
|
||||
tempDestFilePath := filepath.Join(cfg.DBPath, DefaultTempDBFileName)
|
||||
|
||||
// Let's find out how long ago the last compaction of the source file
|
||||
// occurred and possibly skip compacting it again now.
|
||||
lastCompactionDate, err := lastCompactionDate(sourceFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine last compaction date of "+
|
||||
"source DB file: %v", err)
|
||||
}
|
||||
compactAge := time.Since(lastCompactionDate)
|
||||
if cfg.AutoCompactMinAge != 0 && compactAge <= cfg.AutoCompactMinAge {
|
||||
log.Infof("Not compacting database file at %v, it was last "+
|
||||
"compacted at %v (%v ago), min age is set to %v",
|
||||
sourceFilePath, lastCompactionDate,
|
||||
compactAge.Truncate(time.Second), cfg.AutoCompactMinAge)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("Compacting database file at %v", sourceFilePath)
|
||||
|
||||
// If the old temporary DB file still exists, then we'll delete it
|
||||
// before proceeding.
|
||||
if _, err := os.Stat(tempDestFilePath); err == nil {
|
||||
log.Infof("Found old temp DB @ %v, removing before swap",
|
||||
tempDestFilePath)
|
||||
|
||||
err = os.Remove(tempDestFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to remove old temp DB file: "+
|
||||
"%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Now that we know the staging area is clear, we'll create the new
|
||||
// temporary DB file and close it before we write the new DB to it.
|
||||
tempFile, err := os.Create(tempDestFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create temp DB file: %w", err)
|
||||
}
|
||||
if err := tempFile.Close(); err != nil {
|
||||
return fmt.Errorf("unable to close file: %w", err)
|
||||
}
|
||||
|
||||
// With the file created, we'll start the compaction and remove the
|
||||
// temporary file all together once this method exits.
|
||||
defer func() {
|
||||
// This will only succeed if the rename below fails. If the
|
||||
// compaction is successful, the file won't exist on exit
|
||||
// anymore so no need to log an error here.
|
||||
_ = os.Remove(tempDestFilePath)
|
||||
}()
|
||||
c := &compacter{
|
||||
srcPath: sourceFilePath,
|
||||
dstPath: tempDestFilePath,
|
||||
dbTimeout: cfg.DBTimeout,
|
||||
}
|
||||
initialSize, newSize, err := c.execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during compact: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("DB compaction of %v successful, %d -> %d bytes (gain=%.2fx)",
|
||||
sourceFilePath, initialSize, newSize,
|
||||
float64(initialSize)/float64(newSize))
|
||||
|
||||
// We try to store the current timestamp in a file with the suffix
|
||||
// .last-compacted so we can figure out how long ago the last compaction
|
||||
// was. But since this shouldn't fail the compaction process itself, we
|
||||
// only log the error. Worst case if this file cannot be written is that
|
||||
// we compact on every startup.
|
||||
err = updateLastCompactionDate(sourceFilePath)
|
||||
if err != nil {
|
||||
log.Warnf("Could not update last compaction timestamp in "+
|
||||
"%s%s: %v", sourceFilePath,
|
||||
LastCompactionFileNameSuffix, err)
|
||||
}
|
||||
|
||||
log.Infof("Swapping old DB file from %v to %v", tempDestFilePath,
|
||||
sourceFilePath)
|
||||
|
||||
// Finally, we'll attempt to atomically rename the temporary file to
|
||||
// the main back up file. If this succeeds, then we'll only have a
|
||||
// single file on disk once this method exits.
|
||||
return os.Rename(tempDestFilePath, sourceFilePath)
|
||||
}
|
||||
|
||||
// lastCompactionDate returns the date the given database file was last
|
||||
// compacted or a zero time.Time if no compaction was recorded before. The
|
||||
// compaction date is read from a file in the same directory and with the same
|
||||
// name as the DB file, but with the suffix ".last-compacted".
|
||||
func lastCompactionDate(dbFile string) (time.Time, error) {
|
||||
zeroTime := time.Unix(0, 0)
|
||||
|
||||
tsFile := fmt.Sprintf("%s%s", dbFile, LastCompactionFileNameSuffix)
|
||||
if !fileExists(tsFile) {
|
||||
return zeroTime, nil
|
||||
}
|
||||
|
||||
tsBytes, err := os.ReadFile(tsFile)
|
||||
if err != nil {
|
||||
return zeroTime, err
|
||||
}
|
||||
|
||||
tsNano := byteOrder.Uint64(tsBytes)
|
||||
return time.Unix(0, int64(tsNano)), nil
|
||||
}
|
||||
|
||||
// updateLastCompactionDate stores the current time as a timestamp in a file
|
||||
// in the same directory and with the same name as the DB file, but with the
|
||||
// suffix ".last-compacted".
|
||||
func updateLastCompactionDate(dbFile string) error {
|
||||
var tsBytes [8]byte
|
||||
byteOrder.PutUint64(tsBytes[:], uint64(time.Now().UnixNano()))
|
||||
|
||||
tsFile := fmt.Sprintf("%s%s", dbFile, LastCompactionFileNameSuffix)
|
||||
return os.WriteFile(tsFile, tsBytes[:], 0600)
|
||||
}
|
||||
|
||||
// GetTestBackend opens (or creates if doesn't exist) a bbolt or etcd
|
||||
// backed database (for testing), and returns a kvdb.Backend and a cleanup
|
||||
// func. Whether to create/open bbolt or embedded etcd database is based
|
||||
// on the TestBackend constant which is conditionally compiled with build tag.
|
||||
// The passed path is used to hold all db files, while the name is only used
|
||||
// for bbolt.
|
||||
func GetTestBackend(path, name string) (Backend, func(), error) {
|
||||
empty := func() {}
|
||||
|
||||
// Note that for tests, we expect only one db backend build flag
|
||||
// (or none) to be set at a time and thus one of the following switch
|
||||
// cases should ever be true
|
||||
switch {
|
||||
case EtcdBackend:
|
||||
etcdConfig, cancel, err := StartEtcdTestBackend(path, 0, 0, "")
|
||||
if err != nil {
|
||||
return nil, empty, err
|
||||
}
|
||||
backend, err := Open(
|
||||
EtcdBackendName, context.TODO(), etcdConfig,
|
||||
)
|
||||
return backend, cancel, err
|
||||
|
||||
default:
|
||||
db, err := GetBoltBackend(&BoltBackendConfig{
|
||||
DBPath: path,
|
||||
DBFileName: name,
|
||||
NoFreelistSync: true,
|
||||
DBTimeout: DefaultDBTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return db, empty, nil
|
||||
}
|
||||
}
|
||||
48
server/pkg/kvdb/backend_js.go
Normal file
48
server/pkg/kvdb/backend_js.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BoltBackendConfig is a struct that holds settings specific to the bolt
|
||||
// database backend.
|
||||
type BoltBackendConfig struct {
|
||||
// DBPath is the directory path in which the database file should be
|
||||
// stored.
|
||||
DBPath string
|
||||
|
||||
// DBFileName is the name of the database file.
|
||||
DBFileName string
|
||||
|
||||
// NoFreelistSync, if true, prevents the database from syncing its
|
||||
// freelist to disk, resulting in improved performance at the expense of
|
||||
// increased startup time.
|
||||
NoFreelistSync bool
|
||||
|
||||
// AutoCompact specifies if a Bolt based database backend should be
|
||||
// automatically compacted on startup (if the minimum age of the
|
||||
// database file is reached). This will require additional disk space
|
||||
// for the compacted copy of the database but will result in an overall
|
||||
// lower database size after the compaction.
|
||||
AutoCompact bool
|
||||
|
||||
// AutoCompactMinAge specifies the minimum time that must have passed
|
||||
// since a bolt database file was last compacted for the compaction to
|
||||
// be considered again.
|
||||
AutoCompactMinAge time.Duration
|
||||
|
||||
// DBTimeout specifies the timeout value to use when opening the wallet
|
||||
// database.
|
||||
DBTimeout time.Duration
|
||||
}
|
||||
|
||||
// GetBoltBackend opens (or creates if doesn't exits) a bbolt backed database
|
||||
// and returns a kvdb.Backend wrapping it.
|
||||
func GetBoltBackend(cfg *BoltBackendConfig) (Backend, error) {
|
||||
return nil, fmt.Errorf("bolt backend not supported in WebAssembly")
|
||||
}
|
||||
|
||||
func GetTestBackend(path, name string) (Backend, func(), error) {
|
||||
return nil, nil, fmt.Errorf("bolt backend not supported in WebAssembly")
|
||||
}
|
||||
283
server/pkg/kvdb/bolt_compact.go
Normal file
283
server/pkg/kvdb/bolt_compact.go
Normal file
@@ -0,0 +1,283 @@
|
||||
// The code in this file is an adapted version of the bbolt compact command
|
||||
// implemented in this file:
|
||||
// https://github.com/etcd-io/bbolt/blob/master/cmd/bbolt/main.go
|
||||
|
||||
//go:build !js
|
||||
// +build !js
|
||||
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/lightningnetwork/lnd/healthcheck"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultResultFileSizeMultiplier is the default multiplier we apply to
|
||||
// the current database size to calculate how big it could possibly get
|
||||
// after compacting, in case the database is already at its optimal size
|
||||
// and compaction causes it to grow. This should normally not be the
|
||||
// case but we really want to avoid not having enough disk space for the
|
||||
// compaction, so we apply a safety margin of 10%.
|
||||
defaultResultFileSizeMultiplier = float64(1.1)
|
||||
|
||||
// defaultTxMaxSize is the default maximum number of operations that
|
||||
// are allowed to be executed in a single transaction.
|
||||
defaultTxMaxSize = 65536
|
||||
|
||||
// bucketFillSize is the fill size setting that is used for each new
|
||||
// bucket that is created in the compacted database. This setting is not
|
||||
// persisted and is therefore only effective for the compaction itself.
|
||||
// Because during the compaction we only append data a fill percent of
|
||||
// 100% is optimal for performance.
|
||||
bucketFillSize = 1.0
|
||||
)
|
||||
|
||||
type compacter struct {
|
||||
srcPath string
|
||||
dstPath string
|
||||
txMaxSize int64
|
||||
|
||||
// dbTimeout specifies the timeout value used when opening the db.
|
||||
dbTimeout time.Duration
|
||||
}
|
||||
|
||||
// execute opens the source and destination databases and then compacts the
|
||||
// source into destination and returns the size of both files as a result.
|
||||
func (cmd *compacter) execute() (int64, int64, error) {
|
||||
if cmd.txMaxSize == 0 {
|
||||
cmd.txMaxSize = defaultTxMaxSize
|
||||
}
|
||||
|
||||
// Ensure source file exists.
|
||||
fi, err := os.Stat(cmd.srcPath)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("error determining source database "+
|
||||
"size: %v", err)
|
||||
}
|
||||
initialSize := fi.Size()
|
||||
marginSize := float64(initialSize) * defaultResultFileSizeMultiplier
|
||||
|
||||
// Before opening any of the databases, let's first make sure we have
|
||||
// enough free space on the destination file system to create a full
|
||||
// copy of the source DB (worst-case scenario if the compaction doesn't
|
||||
// actually shrink the file size).
|
||||
destFolder := path.Dir(cmd.dstPath)
|
||||
freeSpace, err := healthcheck.AvailableDiskSpace(destFolder)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("error determining free disk space on "+
|
||||
"%s: %v", destFolder, err)
|
||||
}
|
||||
log.Debugf("Free disk space on compaction destination file system: "+
|
||||
"%d bytes", freeSpace)
|
||||
if freeSpace < uint64(marginSize) {
|
||||
return 0, 0, fmt.Errorf("could not start compaction, "+
|
||||
"destination folder %s only has %d bytes of free disk "+
|
||||
"space available while we need at least %d for worst-"+
|
||||
"case compaction", destFolder, freeSpace, uint64(marginSize))
|
||||
}
|
||||
|
||||
// Open source database. We open it in read only mode to avoid (and fix)
|
||||
// possible freelist sync problems.
|
||||
src, err := bbolt.Open(cmd.srcPath, 0444, &bbolt.Options{
|
||||
ReadOnly: true,
|
||||
Timeout: cmd.dbTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("error opening source database: %w",
|
||||
err)
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
log.Errorf("Compact error: closing source DB: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Open destination database.
|
||||
dst, err := bbolt.Open(cmd.dstPath, fi.Mode(), &bbolt.Options{
|
||||
Timeout: cmd.dbTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("error opening destination database: "+
|
||||
"%w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := dst.Close(); err != nil {
|
||||
log.Errorf("Compact error: closing dest DB: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Run compaction.
|
||||
if err := cmd.compact(dst, src); err != nil {
|
||||
return 0, 0, fmt.Errorf("error running compaction: %w", err)
|
||||
}
|
||||
|
||||
// Report stats on new size.
|
||||
fi, err = os.Stat(cmd.dstPath)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("error determining destination "+
|
||||
"database size: %w", err)
|
||||
} else if fi.Size() == 0 {
|
||||
return 0, 0, fmt.Errorf("zero db size")
|
||||
}
|
||||
|
||||
return initialSize, fi.Size(), nil
|
||||
}
|
||||
|
||||
// compact tries to create a compacted copy of the source database in a new
|
||||
// destination database.
|
||||
func (cmd *compacter) compact(dst, src *bbolt.DB) error {
|
||||
// Commit regularly, or we'll run out of memory for large datasets if
|
||||
// using one transaction.
|
||||
var size int64
|
||||
tx, err := dst.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
if err := cmd.walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
|
||||
// On each key/value, check if we have exceeded tx size.
|
||||
sz := int64(len(k) + len(v))
|
||||
if size+sz > cmd.txMaxSize && cmd.txMaxSize != 0 {
|
||||
// Commit previous transaction.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start new transaction.
|
||||
tx, err = dst.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size = 0
|
||||
}
|
||||
size += sz
|
||||
|
||||
// Create bucket on the root transaction if this is the first
|
||||
// level.
|
||||
nk := len(keys)
|
||||
if nk == 0 {
|
||||
bkt, err := tx.CreateBucket(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bkt.SetSequence(seq); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create buckets on subsequent levels, if necessary.
|
||||
b := tx.Bucket(keys[0])
|
||||
if nk > 1 {
|
||||
for _, k := range keys[1:] {
|
||||
b = b.Bucket(k)
|
||||
}
|
||||
}
|
||||
|
||||
// Fill the entire page for best compaction.
|
||||
b.FillPercent = bucketFillSize
|
||||
|
||||
// If there is no value then this is a bucket call.
|
||||
if v == nil {
|
||||
bkt, err := b.CreateBucket(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bkt.SetSequence(seq); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise treat it as a key/value pair.
|
||||
return b.Put(k, v)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// walkFunc is the type of the function called for keys (buckets and "normal"
|
||||
// values) discovered by Walk. keys is the list of keys to descend to the bucket
|
||||
// owning the discovered key/value pair k/v.
|
||||
type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error
|
||||
|
||||
// walk walks recursively the bolt database db, calling walkFn for each key it
|
||||
// finds.
|
||||
func (cmd *compacter) walk(db *bbolt.DB, walkFn walkFunc) error {
|
||||
return db.View(func(tx *bbolt.Tx) error {
|
||||
return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
|
||||
// This will log the top level buckets only to give the
|
||||
// user some sense of progress.
|
||||
log.Debugf("Compacting top level bucket '%s'",
|
||||
LoggableKeyName(name))
|
||||
|
||||
return cmd.walkBucket(
|
||||
b, nil, name, nil, b.Sequence(), walkFn,
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// LoggableKeyName returns a printable name of the given key.
|
||||
func LoggableKeyName(key []byte) string {
|
||||
strKey := string(key)
|
||||
if hasSpecialChars(strKey) {
|
||||
return hex.EncodeToString(key)
|
||||
}
|
||||
|
||||
return strKey
|
||||
}
|
||||
|
||||
// hasSpecialChars returns true if any of the characters in the given string
|
||||
// cannot be printed.
|
||||
func hasSpecialChars(s string) bool {
|
||||
for _, b := range s {
|
||||
if !(b >= 'a' && b <= 'z') && !(b >= 'A' && b <= 'Z') &&
|
||||
!(b >= '0' && b <= '9') && b != '-' && b != '_' {
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// walkBucket recursively walks through a bucket.
|
||||
func (cmd *compacter) walkBucket(b *bbolt.Bucket, keyPath [][]byte, k, v []byte,
|
||||
seq uint64, fn walkFunc) error {
|
||||
|
||||
// Execute callback.
|
||||
if err := fn(keyPath, k, v, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If this is not a bucket then stop.
|
||||
if v != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate over each child key/value.
|
||||
keyPath = append(keyPath, k)
|
||||
return b.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
bkt := b.Bucket(k)
|
||||
return cmd.walkBucket(
|
||||
bkt, keyPath, k, nil, bkt.Sequence(), fn,
|
||||
)
|
||||
}
|
||||
return cmd.walkBucket(b, keyPath, k, v, b.Sequence(), fn)
|
||||
})
|
||||
}
|
||||
35
server/pkg/kvdb/bolt_fixture.go
Normal file
35
server/pkg/kvdb/bolt_fixture.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type boltFixture struct {
|
||||
t *testing.T
|
||||
tempDir string
|
||||
}
|
||||
|
||||
func NewBoltFixture(t *testing.T) *boltFixture {
|
||||
return &boltFixture{
|
||||
t: t,
|
||||
tempDir: t.TempDir(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *boltFixture) NewBackend() walletdb.DB {
|
||||
dbPath := filepath.Join(b.tempDir)
|
||||
|
||||
db, err := GetBoltBackend(&BoltBackendConfig{
|
||||
DBPath: dbPath,
|
||||
DBFileName: "test.db",
|
||||
NoFreelistSync: true,
|
||||
DBTimeout: DefaultDBTimeout,
|
||||
})
|
||||
require.NoError(b.t, err)
|
||||
|
||||
return db
|
||||
}
|
||||
83
server/pkg/kvdb/bolt_test.go
Normal file
83
server/pkg/kvdb/bolt_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
func TestBolt(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
test func(*testing.T, walletdb.DB)
|
||||
}{
|
||||
{
|
||||
name: "read cursor empty interval",
|
||||
test: testReadCursorEmptyInterval,
|
||||
},
|
||||
{
|
||||
name: "read cursor non empty interval",
|
||||
test: testReadCursorNonEmptyInterval,
|
||||
},
|
||||
{
|
||||
name: "read write cursor",
|
||||
test: testReadWriteCursor,
|
||||
},
|
||||
{
|
||||
name: "read write cursor with bucket and value",
|
||||
test: testReadWriteCursorWithBucketAndValue,
|
||||
},
|
||||
{
|
||||
name: "bucket creation",
|
||||
test: testBucketCreation,
|
||||
},
|
||||
{
|
||||
name: "bucket deletion",
|
||||
test: testBucketDeletion,
|
||||
},
|
||||
{
|
||||
name: "bucket for each",
|
||||
test: testBucketForEach,
|
||||
},
|
||||
{
|
||||
name: "bucket for each with error",
|
||||
test: testBucketForEachWithError,
|
||||
},
|
||||
{
|
||||
name: "bucket sequence",
|
||||
test: testBucketSequence,
|
||||
},
|
||||
{
|
||||
name: "key clash",
|
||||
test: testKeyClash,
|
||||
},
|
||||
{
|
||||
name: "bucket create delete",
|
||||
test: testBucketCreateDelete,
|
||||
},
|
||||
{
|
||||
name: "tx manual commit",
|
||||
test: testTxManualCommit,
|
||||
},
|
||||
{
|
||||
name: "tx rollback",
|
||||
test: testTxRollback,
|
||||
},
|
||||
{
|
||||
name: "prefetch",
|
||||
test: testPrefetch,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewBoltFixture(t)
|
||||
|
||||
test.test(t, f.NewBackend())
|
||||
})
|
||||
}
|
||||
}
|
||||
45
server/pkg/kvdb/config.go
Normal file
45
server/pkg/kvdb/config.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package kvdb
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
// BoltBackendName is the name of the backend that should be passed into
|
||||
// kvdb.Create to initialize a new instance of kvdb.Backend backed by a
|
||||
// live instance of bbolt.
|
||||
BoltBackendName = "bdb"
|
||||
|
||||
// EtcdBackendName is the name of the backend that should be passed into
|
||||
// kvdb.Create to initialize a new instance of kvdb.Backend backed by a
|
||||
// live instance of etcd.
|
||||
EtcdBackendName = "etcd"
|
||||
|
||||
// PostgresBackendName is the name of the backend that should be passed
|
||||
// into kvdb.Create to initialize a new instance of kvdb.Backend backed
|
||||
// by a live instance of postgres.
|
||||
PostgresBackendName = "postgres"
|
||||
|
||||
// SqliteBackendName is the name of the backend that should be passed
|
||||
// into kvdb.Create to initialize a new instance of kvdb.Backend backed
|
||||
// by a live instance of sqlite.
|
||||
SqliteBackendName = "sqlite"
|
||||
|
||||
// DefaultBoltAutoCompactMinAge is the default minimum time that must
|
||||
// have passed since a bolt database file was last compacted for the
|
||||
// compaction to be considered again.
|
||||
DefaultBoltAutoCompactMinAge = time.Hour * 24 * 7
|
||||
|
||||
// DefaultDBTimeout specifies the default timeout value when opening
|
||||
// the bbolt database.
|
||||
DefaultDBTimeout = time.Second * 60
|
||||
)
|
||||
|
||||
// BoltConfig holds bolt configuration.
|
||||
type BoltConfig struct {
|
||||
NoFreelistSync bool `long:"nofreelistsync" description:"Whether the databases used within lnd should sync their freelist to disk. This is set to true by default, meaning we don't sync the free-list resulting in improved memory performance during operation, but with an increase in startup time."`
|
||||
|
||||
AutoCompact bool `long:"auto-compact" description:"Whether the databases used within lnd should automatically be compacted on every startup (and if the database has the configured minimum age). This is disabled by default because it requires additional disk space to be available during the compaction that is freed afterwards. In general compaction leads to smaller database files."`
|
||||
|
||||
AutoCompactMinAge time.Duration `long:"auto-compact-min-age" description:"How long ago the last compaction of a database file must be for it to be considered for auto compaction again. Can be set to 0 to compact on every startup."`
|
||||
|
||||
DBTimeout time.Duration `long:"dbtimeout" description:"Specify the timeout value used when opening the database."`
|
||||
}
|
||||
9
server/pkg/kvdb/debug.go
Normal file
9
server/pkg/kvdb/debug.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build dev
|
||||
// +build dev
|
||||
|
||||
package kvdb
|
||||
|
||||
const (
|
||||
// Switch on extra debug code.
|
||||
etcdDebug = true
|
||||
)
|
||||
139
server/pkg/kvdb/etcd/bucket.go
Normal file
139
server/pkg/kvdb/etcd/bucket.go
Normal file
@@ -0,0 +1,139 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketIDLength = 32
|
||||
)
|
||||
|
||||
var (
|
||||
valuePostfix = []byte{0x00}
|
||||
bucketPostfix = []byte{0xFF}
|
||||
sequencePrefix = []byte("$seq$")
|
||||
)
|
||||
|
||||
// makeBucketID returns a deterministic key for the passed byte slice.
|
||||
// Currently it returns the sha256 hash of the slice.
|
||||
func makeBucketID(key []byte) [bucketIDLength]byte {
|
||||
return sha256.Sum256(key)
|
||||
}
|
||||
|
||||
// isValidBucketID checks if the passed slice is the required length to be a
|
||||
// valid bucket id.
|
||||
func isValidBucketID(s []byte) bool {
|
||||
return len(s) == bucketIDLength
|
||||
}
|
||||
|
||||
// makeKey concatenates parent, key and postfix into one byte slice.
|
||||
// The postfix indicates the use of this key (whether bucket or value), while
|
||||
// parent refers to the parent bucket.
|
||||
func makeKey(parent, key, postfix []byte) []byte {
|
||||
keyBuf := make([]byte, len(parent)+len(key)+len(postfix))
|
||||
copy(keyBuf, parent)
|
||||
copy(keyBuf[len(parent):], key)
|
||||
copy(keyBuf[len(parent)+len(key):], postfix)
|
||||
|
||||
return keyBuf
|
||||
}
|
||||
|
||||
// makeBucketKey returns a bucket key from the passed parent bucket id and
|
||||
// the key.
|
||||
func makeBucketKey(parent []byte, key []byte) []byte {
|
||||
return makeKey(parent, key, bucketPostfix)
|
||||
}
|
||||
|
||||
// makeValueKey returns a value key from the passed parent bucket id and
|
||||
// the key.
|
||||
func makeValueKey(parent []byte, key []byte) []byte {
|
||||
return makeKey(parent, key, valuePostfix)
|
||||
}
|
||||
|
||||
// makeSequenceKey returns a sequence key of the passed parent bucket id.
|
||||
func makeSequenceKey(parent []byte) []byte {
|
||||
keyBuf := make([]byte, len(sequencePrefix)+len(parent))
|
||||
copy(keyBuf, sequencePrefix)
|
||||
copy(keyBuf[len(sequencePrefix):], parent)
|
||||
return keyBuf
|
||||
}
|
||||
|
||||
// isBucketKey returns true if the passed key is a bucket key, meaning it
|
||||
// keys a bucket name.
|
||||
func isBucketKey(key string) bool {
|
||||
if len(key) < bucketIDLength+1 {
|
||||
return false
|
||||
}
|
||||
|
||||
return key[len(key)-1] == bucketPostfix[0]
|
||||
}
|
||||
|
||||
// getKey chops out the key from the raw key (by removing the bucket id
|
||||
// prefixing the key and the postfix indicating whether it is a bucket or
|
||||
// a value key)
|
||||
func getKey(rawKey string) []byte {
|
||||
return []byte(rawKey[bucketIDLength : len(rawKey)-1])
|
||||
}
|
||||
|
||||
// getKeyVal chops out the key from the raw key (by removing the bucket id
|
||||
// prefixing the key and the postfix indicating whether it is a bucket or
|
||||
// a value key) and also returns the appropriate value for the key, which is
|
||||
// nil in case of buckets (or the set value otherwise).
|
||||
func getKeyVal(kv *KV) ([]byte, []byte) {
|
||||
var val []byte
|
||||
|
||||
if !isBucketKey(kv.key) {
|
||||
val = []byte(kv.val)
|
||||
}
|
||||
|
||||
return getKey(kv.key), val
|
||||
}
|
||||
|
||||
// BucketKey is a helper function used in tests to create a bucket key from
|
||||
// passed bucket list.
|
||||
func BucketKey(buckets ...string) string {
|
||||
var bucketKey []byte
|
||||
|
||||
rootID := makeBucketID([]byte(etcdDefaultRootBucketId))
|
||||
parent := rootID[:]
|
||||
|
||||
for _, bucketName := range buckets {
|
||||
bucketKey = makeBucketKey(parent, []byte(bucketName))
|
||||
id := makeBucketID(bucketKey)
|
||||
parent = id[:]
|
||||
}
|
||||
|
||||
return string(bucketKey)
|
||||
}
|
||||
|
||||
// BucketVal is a helper function used in tests to create a bucket value (the
|
||||
// value for a bucket key) from the passed bucket list.
|
||||
func BucketVal(buckets ...string) string {
|
||||
id := makeBucketID([]byte(BucketKey(buckets...)))
|
||||
return string(id[:])
|
||||
}
|
||||
|
||||
// ValueKey is a helper function used in tests to create a value key from the
|
||||
// passed key and bucket list.
|
||||
func ValueKey(key string, buckets ...string) string {
|
||||
rootID := makeBucketID([]byte(etcdDefaultRootBucketId))
|
||||
bucket := rootID[:]
|
||||
|
||||
for _, bucketName := range buckets {
|
||||
bucketKey := makeBucketKey(bucket, []byte(bucketName))
|
||||
id := makeBucketID(bucketKey)
|
||||
bucket = id[:]
|
||||
}
|
||||
|
||||
return string(makeValueKey(bucket, []byte(key)))
|
||||
}
|
||||
|
||||
// SequenceKey is a helper function used in tests or external tools to create a
|
||||
// sequence key from the passed bucket list.
|
||||
func SequenceKey(buckets ...string) string {
|
||||
id := makeBucketID([]byte(BucketKey(buckets...)))
|
||||
return string(makeSequenceKey(id[:]))
|
||||
}
|
||||
34
server/pkg/kvdb/etcd/bucket_test.go
Normal file
34
server/pkg/kvdb/etcd/bucket_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestBucketKey tests that a key for a bucket can be created correctly.
|
||||
func TestBucketKey(t *testing.T) {
|
||||
rootID := sha256.Sum256([]byte("@"))
|
||||
key := append(rootID[:], []byte("foo")...)
|
||||
key = append(key, 0xff)
|
||||
require.Equal(t, string(key), BucketKey("foo"))
|
||||
}
|
||||
|
||||
// TestBucketVal tests that a key for a bucket value can be created correctly.
|
||||
func TestBucketVal(t *testing.T) {
|
||||
rootID := sha256.Sum256([]byte("@"))
|
||||
key := append(rootID[:], []byte("foo")...)
|
||||
key = append(key, 0xff)
|
||||
|
||||
keyID := sha256.Sum256(key)
|
||||
require.Equal(t, string(keyID[:]), BucketVal("foo"))
|
||||
}
|
||||
|
||||
// TestSequenceKey tests that a key for a sequence can be created correctly.
|
||||
func TestSequenceKey(t *testing.T) {
|
||||
require.Contains(t, SequenceKey("foo", "bar", "baz"), "$seq$")
|
||||
}
|
||||
213
server/pkg/kvdb/etcd/commit_queue.go
Normal file
213
server/pkg/kvdb/etcd/commit_queue.go
Normal file
@@ -0,0 +1,213 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// commitQueue is a simple execution queue to manage conflicts for transactions
|
||||
// and thereby reduce the number of times conflicting transactions need to be
|
||||
// retried. When a new transaction is added to the queue, we first upgrade the
|
||||
// read/write counts in the queue's own accounting to decide whether the new
|
||||
// transaction has any conflicting dependencies. If the transaction does not
|
||||
// conflict with any other, then it is committed immediately, otherwise it'll be
|
||||
// queued up for later execution.
|
||||
// The algorithm is described in: http://www.cs.umd.edu/~abadi/papers/vll-vldb13.pdf
|
||||
type commitQueue struct {
|
||||
ctx context.Context
|
||||
mx sync.Mutex
|
||||
readerMap map[string]int
|
||||
writerMap map[string]int
|
||||
|
||||
queue *list.List
|
||||
queueMx sync.Mutex
|
||||
queueCond *sync.Cond
|
||||
|
||||
shutdown chan struct{}
|
||||
}
|
||||
|
||||
type commitQueueTxn struct {
|
||||
commitLoop func()
|
||||
blocked bool
|
||||
rset []string
|
||||
wset []string
|
||||
}
|
||||
|
||||
// NewCommitQueue creates a new commit queue, with the passed abort context.
|
||||
func NewCommitQueue(ctx context.Context) *commitQueue {
|
||||
q := &commitQueue{
|
||||
ctx: ctx,
|
||||
readerMap: make(map[string]int),
|
||||
writerMap: make(map[string]int),
|
||||
queue: list.New(),
|
||||
shutdown: make(chan struct{}),
|
||||
}
|
||||
q.queueCond = sync.NewCond(&q.queueMx)
|
||||
|
||||
// Start the queue consumer loop.
|
||||
go q.mainLoop()
|
||||
|
||||
return q
|
||||
}
|
||||
|
||||
// Stop signals the queue to stop after the queue context has been canceled and
|
||||
// waits until the has stopped.
|
||||
func (c *commitQueue) Stop() {
|
||||
// Signal the queue's condition variable to ensure the mainLoop reliably
|
||||
// unblocks to check for the exit condition.
|
||||
c.queueCond.Signal()
|
||||
<-c.shutdown
|
||||
}
|
||||
|
||||
// Add increases lock counts and queues up tx commit closure for execution.
|
||||
// Transactions that don't have any conflicts are executed immediately by
|
||||
// "downgrading" the count mutex to allow concurrency.
|
||||
func (c *commitQueue) Add(commitLoop func(), rset []string, wset []string) {
|
||||
c.mx.Lock()
|
||||
blocked := false
|
||||
|
||||
// Mark as blocked if there's any writer changing any of the keys in
|
||||
// the read set. Do not increment the reader counts yet as we'll need to
|
||||
// use the original reader counts when scanning through the write set.
|
||||
for _, key := range rset {
|
||||
if c.writerMap[key] > 0 {
|
||||
blocked = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Mark as blocked if there's any writer or reader for any of the keys
|
||||
// in the write set.
|
||||
for _, key := range wset {
|
||||
blocked = blocked || c.readerMap[key] > 0 || c.writerMap[key] > 0
|
||||
|
||||
// Increment the writer count.
|
||||
c.writerMap[key] += 1
|
||||
}
|
||||
|
||||
// Finally we can increment the reader counts for keys in the read set.
|
||||
for _, key := range rset {
|
||||
c.readerMap[key] += 1
|
||||
}
|
||||
|
||||
c.queueCond.L.Lock()
|
||||
c.queue.PushBack(&commitQueueTxn{
|
||||
commitLoop: commitLoop,
|
||||
blocked: blocked,
|
||||
rset: rset,
|
||||
wset: wset,
|
||||
})
|
||||
c.queueCond.L.Unlock()
|
||||
|
||||
c.mx.Unlock()
|
||||
|
||||
c.queueCond.Signal()
|
||||
}
|
||||
|
||||
// done decreases lock counts of the keys in the read/write sets.
|
||||
func (c *commitQueue) done(rset []string, wset []string) {
|
||||
c.mx.Lock()
|
||||
defer c.mx.Unlock()
|
||||
|
||||
for _, key := range rset {
|
||||
c.readerMap[key] -= 1
|
||||
if c.readerMap[key] == 0 {
|
||||
delete(c.readerMap, key)
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range wset {
|
||||
c.writerMap[key] -= 1
|
||||
if c.writerMap[key] == 0 {
|
||||
delete(c.writerMap, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mainLoop executes queued transaction commits for transactions that have
|
||||
// dependencies. The queue ensures that the top element doesn't conflict with
|
||||
// any other transactions and therefore can be executed freely.
|
||||
func (c *commitQueue) mainLoop() {
|
||||
defer close(c.shutdown)
|
||||
|
||||
for {
|
||||
// Wait until there are no unblocked transactions being
|
||||
// executed, and for there to be at least one blocked
|
||||
// transaction in our queue.
|
||||
c.queueCond.L.Lock()
|
||||
for c.queue.Front() == nil {
|
||||
c.queueCond.Wait()
|
||||
|
||||
// Check the exit condition before looping again.
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
c.queueCond.L.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Now collect all txns until we find the next blocking one.
|
||||
// These shouldn't conflict (if the precollected read/write
|
||||
// keys sets don't grow), meaning we can safely commit them
|
||||
// in parallel.
|
||||
work := make([]*commitQueueTxn, 1)
|
||||
e := c.queue.Front()
|
||||
work[0] = c.queue.Remove(e).(*commitQueueTxn)
|
||||
|
||||
for {
|
||||
e := c.queue.Front()
|
||||
if e == nil {
|
||||
break
|
||||
}
|
||||
|
||||
next := e.Value.(*commitQueueTxn)
|
||||
if !next.blocked {
|
||||
work = append(work, next)
|
||||
c.queue.Remove(e)
|
||||
} else {
|
||||
// We found the next blocking txn which means
|
||||
// the block of work needs to be cut here.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
c.queueCond.L.Unlock()
|
||||
|
||||
// Check if we need to exit before continuing.
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(work))
|
||||
|
||||
// Fire up N goroutines where each will run its commit loop
|
||||
// and then clean up the reader/writer maps.
|
||||
for _, txn := range work {
|
||||
go func(txn *commitQueueTxn) {
|
||||
defer wg.Done()
|
||||
txn.commitLoop()
|
||||
|
||||
// We can safely cleanup here as done only
|
||||
// holds the main mutex.
|
||||
c.done(txn.rset, txn.wset)
|
||||
}(txn)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Check if we need to exit before continuing.
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
100
server/pkg/kvdb/etcd/commit_queue_test.go
Normal file
100
server/pkg/kvdb/etcd/commit_queue_test.go
Normal file
@@ -0,0 +1,100 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestCommitQueue tests that non-conflicting transactions commit concurrently,
|
||||
// while conflicting transactions are queued up.
|
||||
func TestCommitQueue(t *testing.T) {
|
||||
// The duration of each commit.
|
||||
const commitDuration = time.Millisecond * 500
|
||||
const numCommits = 5
|
||||
|
||||
var wg sync.WaitGroup
|
||||
commits := make([]string, numCommits)
|
||||
idx := int32(-1)
|
||||
|
||||
commit := func(tag string, sleep bool) func() {
|
||||
return func() {
|
||||
defer wg.Done()
|
||||
|
||||
// Update our log of commit order. Avoid blocking
|
||||
// by preallocating the commit log and increasing
|
||||
// the log index atomically.
|
||||
if sleep {
|
||||
time.Sleep(commitDuration)
|
||||
}
|
||||
|
||||
i := atomic.AddInt32(&idx, 1)
|
||||
commits[i] = tag
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
q := NewCommitQueue(ctx)
|
||||
defer q.Stop()
|
||||
defer cancel()
|
||||
|
||||
wg.Add(numCommits)
|
||||
t1 := time.Now()
|
||||
|
||||
// Tx1 (long): reads: key1, key2, writes: key3, conflict: none
|
||||
q.Add(
|
||||
commit("free", true),
|
||||
[]string{"key1", "key2"},
|
||||
[]string{"key3"},
|
||||
)
|
||||
// Tx2: reads: key1, key2, writes: key3, conflict: Tx1
|
||||
q.Add(
|
||||
commit("blocked1", false),
|
||||
[]string{"key1", "key2"},
|
||||
[]string{"key3"},
|
||||
)
|
||||
// Tx3 (long): reads: key1, writes: key4, conflict: none
|
||||
q.Add(
|
||||
commit("free", true),
|
||||
[]string{"key1", "key2"},
|
||||
[]string{"key4"},
|
||||
)
|
||||
// Tx4 (long): reads: key1, writes: none, conflict: none
|
||||
q.Add(
|
||||
commit("free", true),
|
||||
[]string{"key1", "key2"},
|
||||
[]string{},
|
||||
)
|
||||
// Tx4: reads: key2, writes: key4 conflict: Tx3
|
||||
q.Add(
|
||||
commit("blocked2", false),
|
||||
[]string{"key2"},
|
||||
[]string{"key4"},
|
||||
)
|
||||
|
||||
// Wait for all commits.
|
||||
wg.Wait()
|
||||
t2 := time.Now()
|
||||
|
||||
// Expected total execution time: delta.
|
||||
// 2 * commitDuration <= delta < 3 * commitDuration
|
||||
delta := t2.Sub(t1)
|
||||
require.LessOrEqual(t, int64(commitDuration*2), int64(delta))
|
||||
require.Greater(t, int64(commitDuration*3), int64(delta))
|
||||
|
||||
// Expect that the non-conflicting "free" transactions are executed
|
||||
// before the blocking ones, and the blocking ones are executed in
|
||||
// the order of addition.
|
||||
require.Equal(t,
|
||||
[]string{"free", "blocked1", "free", "free", "blocked2"},
|
||||
commits,
|
||||
)
|
||||
}
|
||||
90
server/pkg/kvdb/etcd/config.go
Normal file
90
server/pkg/kvdb/etcd/config.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package etcd
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Config holds etcd configuration alongside with configuration related to our higher level interface.
|
||||
//
|
||||
//nolint:lll
|
||||
type Config struct {
|
||||
Embedded bool `long:"embedded" description:"Use embedded etcd instance instead of the external one. Note: use for testing only."`
|
||||
|
||||
EmbeddedClientPort uint16 `long:"embedded_client_port" description:"Client port to use for the embedded instance. Note: use for testing only."`
|
||||
|
||||
EmbeddedPeerPort uint16 `long:"embedded_peer_port" description:"Peer port to use for the embedded instance. Note: use for testing only."`
|
||||
|
||||
EmbeddedLogFile string `long:"embedded_log_file" description:"Optional log file to use for embedded instance logs. note: use for testing only."`
|
||||
|
||||
Host string `long:"host" description:"Etcd database host. Supports multiple hosts separated by a comma."`
|
||||
|
||||
User string `long:"user" description:"Etcd database user."`
|
||||
|
||||
Pass string `long:"pass" description:"Password for the database user."`
|
||||
|
||||
Namespace string `long:"namespace" description:"The etcd namespace to use."`
|
||||
|
||||
DisableTLS bool `long:"disabletls" description:"Disable TLS for etcd connection. Caution: use for development only."`
|
||||
|
||||
CertFile string `long:"cert_file" description:"Path to the TLS certificate for etcd RPC."`
|
||||
|
||||
KeyFile string `long:"key_file" description:"Path to the TLS private key for etcd RPC."`
|
||||
|
||||
InsecureSkipVerify bool `long:"insecure_skip_verify" description:"Whether we intend to skip TLS verification"`
|
||||
|
||||
CollectStats bool `long:"collect_stats" description:"Whether to collect etcd commit stats."`
|
||||
|
||||
MaxMsgSize int `long:"max_msg_size" description:"The maximum message size in bytes that we may send to etcd."`
|
||||
|
||||
// SingleWriter should be set to true if we intend to only allow a
|
||||
// single writer to the database at a time.
|
||||
SingleWriter bool
|
||||
}
|
||||
|
||||
// CloneWithSubNamespace clones the current configuration and returns a new
|
||||
// instance with the given sub namespace applied by appending it to the main
|
||||
// namespace.
|
||||
func (c *Config) CloneWithSubNamespace(subNamespace string) *Config {
|
||||
ns := c.Namespace
|
||||
if len(ns) == 0 {
|
||||
ns = subNamespace
|
||||
} else {
|
||||
ns = fmt.Sprintf("%s/%s", ns, subNamespace)
|
||||
}
|
||||
|
||||
return &Config{
|
||||
Embedded: c.Embedded,
|
||||
EmbeddedClientPort: c.EmbeddedClientPort,
|
||||
EmbeddedPeerPort: c.EmbeddedPeerPort,
|
||||
Host: c.Host,
|
||||
User: c.User,
|
||||
Pass: c.Pass,
|
||||
Namespace: ns,
|
||||
DisableTLS: c.DisableTLS,
|
||||
CertFile: c.CertFile,
|
||||
KeyFile: c.KeyFile,
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
CollectStats: c.CollectStats,
|
||||
MaxMsgSize: c.MaxMsgSize,
|
||||
SingleWriter: c.SingleWriter,
|
||||
}
|
||||
}
|
||||
|
||||
// CloneWithSingleWriter clones the current configuration and returns a new
|
||||
// instance with the single writer property set to true.
|
||||
func (c *Config) CloneWithSingleWriter() *Config {
|
||||
return &Config{
|
||||
Embedded: c.Embedded,
|
||||
EmbeddedClientPort: c.EmbeddedClientPort,
|
||||
EmbeddedPeerPort: c.EmbeddedPeerPort,
|
||||
Host: c.Host,
|
||||
User: c.User,
|
||||
Pass: c.Pass,
|
||||
Namespace: c.Namespace,
|
||||
DisableTLS: c.DisableTLS,
|
||||
CertFile: c.CertFile,
|
||||
KeyFile: c.KeyFile,
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
CollectStats: c.CollectStats,
|
||||
MaxMsgSize: c.MaxMsgSize,
|
||||
SingleWriter: true,
|
||||
}
|
||||
}
|
||||
323
server/pkg/kvdb/etcd/db.go
Normal file
323
server/pkg/kvdb/etcd/db.go
Normal file
@@ -0,0 +1,323 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/namespace"
|
||||
)
|
||||
|
||||
const (
|
||||
// etcdConnectionTimeout is the timeout until successful connection to
|
||||
// the etcd instance.
|
||||
etcdConnectionTimeout = 10 * time.Second
|
||||
|
||||
// etcdLongTimeout is a timeout for longer taking etcd operations.
|
||||
etcdLongTimeout = 30 * time.Second
|
||||
|
||||
// etcdDefaultRootBucketId is used as the root bucket key. Note that
|
||||
// the actual key is not visible, since all bucket keys are hashed.
|
||||
etcdDefaultRootBucketId = "@"
|
||||
)
|
||||
|
||||
// callerStats holds commit stats for a specific caller. Currently it only
|
||||
// holds the max stat, meaning that for a particular caller the largest
|
||||
// commit set is recorded.
|
||||
type callerStats struct {
|
||||
count int
|
||||
commitStats CommitStats
|
||||
}
|
||||
|
||||
func (s callerStats) String() string {
|
||||
return fmt.Sprintf("count: %d, retries: %d, rset: %d, wset: %d",
|
||||
s.count, s.commitStats.Retries, s.commitStats.Rset,
|
||||
s.commitStats.Wset)
|
||||
}
|
||||
|
||||
// commitStatsCollector collects commit stats for commits succeeding
|
||||
// and also for commits failing.
|
||||
type commitStatsCollector struct {
|
||||
sync.RWMutex
|
||||
succ map[string]*callerStats
|
||||
fail map[string]*callerStats
|
||||
}
|
||||
|
||||
// newCommitStatsCollector creates a new commitStatsCollector instance.
|
||||
func newCommitStatsCollector() *commitStatsCollector {
|
||||
return &commitStatsCollector{
|
||||
succ: make(map[string]*callerStats),
|
||||
fail: make(map[string]*callerStats),
|
||||
}
|
||||
}
|
||||
|
||||
// PrintStats returns collected stats pretty printed into a string.
|
||||
func (c *commitStatsCollector) PrintStats() string {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
s := "\nFailure:\n"
|
||||
for k, v := range c.fail {
|
||||
s += fmt.Sprintf("%s\t%s\n", k, v)
|
||||
}
|
||||
|
||||
s += "\nSuccess:\n"
|
||||
for k, v := range c.succ {
|
||||
s += fmt.Sprintf("%s\t%s\n", k, v)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// updateStatsMap updatess commit stats map for a caller.
|
||||
func updateStatMap(
|
||||
caller string, stats CommitStats, m map[string]*callerStats) {
|
||||
|
||||
if _, ok := m[caller]; !ok {
|
||||
m[caller] = &callerStats{}
|
||||
}
|
||||
|
||||
curr := m[caller]
|
||||
curr.count++
|
||||
|
||||
// Update only if the total commit set is greater or equal.
|
||||
currTotal := curr.commitStats.Rset + curr.commitStats.Wset
|
||||
if currTotal <= (stats.Rset + stats.Wset) {
|
||||
curr.commitStats = stats
|
||||
}
|
||||
}
|
||||
|
||||
// callback is an STM commit stats callback passed which can be passed
|
||||
// using a WithCommitStatsCallback to the STM upon construction.
|
||||
func (c *commitStatsCollector) callback(succ bool, stats CommitStats) {
|
||||
caller := "unknown"
|
||||
|
||||
// Get the caller. As this callback is called from
|
||||
// the backend interface that means we need to ascend
|
||||
// 4 frames in the callstack.
|
||||
_, file, no, ok := runtime.Caller(4)
|
||||
if ok {
|
||||
caller = fmt.Sprintf("%s#%d", file, no)
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if succ {
|
||||
updateStatMap(caller, stats, c.succ)
|
||||
} else {
|
||||
updateStatMap(caller, stats, c.fail)
|
||||
}
|
||||
}
|
||||
|
||||
// db holds a reference to the etcd client connection.
|
||||
type db struct {
|
||||
cfg Config
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
cli *clientv3.Client
|
||||
commitStatsCollector *commitStatsCollector
|
||||
txQueue *commitQueue
|
||||
txMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// Enforce db implements the walletdb.DB interface.
|
||||
var _ walletdb.DB = (*db)(nil)
|
||||
|
||||
// NewEtcdClient creates a new etcd v3 API client.
|
||||
func NewEtcdClient(ctx context.Context, cfg Config) (*clientv3.Client,
|
||||
context.Context, func(), error) {
|
||||
|
||||
clientCfg := clientv3.Config{
|
||||
Endpoints: strings.Split(cfg.Host, ","),
|
||||
DialTimeout: etcdConnectionTimeout,
|
||||
Username: cfg.User,
|
||||
Password: cfg.Pass,
|
||||
MaxCallSendMsgSize: cfg.MaxMsgSize,
|
||||
}
|
||||
|
||||
if !cfg.DisableTLS {
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: cfg.CertFile,
|
||||
KeyFile: cfg.KeyFile,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
}
|
||||
|
||||
tlsConfig, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
clientCfg.TLS = tlsConfig
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
clientCfg.Context = ctx
|
||||
cli, err := clientv3.New(clientCfg)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
// Apply the namespace.
|
||||
cli.KV = namespace.NewKV(cli.KV, cfg.Namespace)
|
||||
cli.Watcher = namespace.NewWatcher(cli.Watcher, cfg.Namespace)
|
||||
cli.Lease = namespace.NewLease(cli.Lease, cfg.Namespace)
|
||||
|
||||
return cli, ctx, cancel, nil
|
||||
}
|
||||
|
||||
// newEtcdBackend returns a db object initialized with the passed backend
|
||||
// config. If etcd connection cannot be established, then returns error.
|
||||
func newEtcdBackend(ctx context.Context, cfg Config) (*db, error) {
|
||||
cli, ctx, cancel, err := NewEtcdClient(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
backend := &db{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cli: cli,
|
||||
txQueue: NewCommitQueue(ctx),
|
||||
}
|
||||
|
||||
if cfg.CollectStats {
|
||||
backend.commitStatsCollector = newCommitStatsCollector()
|
||||
}
|
||||
|
||||
return backend, nil
|
||||
}
|
||||
|
||||
// getSTMOptions creates all STM options based on the backend config.
|
||||
func (db *db) getSTMOptions() []STMOptionFunc {
|
||||
opts := []STMOptionFunc{
|
||||
WithAbortContext(db.ctx),
|
||||
}
|
||||
|
||||
if db.cfg.CollectStats {
|
||||
opts = append(opts,
|
||||
WithCommitStatsCallback(db.commitStatsCollector.callback),
|
||||
)
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// View opens a database read transaction and executes the function f with the
|
||||
// transaction passed as a parameter. After f exits, the transaction is rolled
|
||||
// back. If f errors, its error is returned, not a rollback error (if any
|
||||
// occur). The passed reset function is called before the start of the
|
||||
// transaction and can be used to reset intermediate state. As callers may
|
||||
// expect retries of the f closure (depending on the database backend used), the
|
||||
// reset function will be called before each retry respectively.
|
||||
func (db *db) View(f func(tx walletdb.ReadTx) error, reset func()) error {
|
||||
if db.cfg.SingleWriter {
|
||||
db.txMutex.RLock()
|
||||
defer db.txMutex.RUnlock()
|
||||
}
|
||||
|
||||
apply := func(stm STM) error {
|
||||
reset()
|
||||
return f(newReadWriteTx(stm, etcdDefaultRootBucketId, nil))
|
||||
}
|
||||
|
||||
_, err := RunSTM(db.cli, apply, db.txQueue, db.getSTMOptions()...)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update opens a database read/write transaction and executes the function f
|
||||
// with the transaction passed as a parameter. After f exits, if f did not
|
||||
// error, the transaction is committed. Otherwise, if f did error, the
|
||||
// transaction is rolled back. If the rollback fails, the original error
|
||||
// returned by f is still returned. If the commit fails, the commit error is
|
||||
// returned. As callers may expect retries of the f closure, the reset function
|
||||
// will be called before each retry respectively.
|
||||
func (db *db) Update(f func(tx walletdb.ReadWriteTx) error, reset func()) error {
|
||||
if db.cfg.SingleWriter {
|
||||
db.txMutex.Lock()
|
||||
defer db.txMutex.Unlock()
|
||||
}
|
||||
|
||||
apply := func(stm STM) error {
|
||||
reset()
|
||||
return f(newReadWriteTx(stm, etcdDefaultRootBucketId, nil))
|
||||
}
|
||||
|
||||
_, err := RunSTM(db.cli, apply, db.txQueue, db.getSTMOptions()...)
|
||||
return err
|
||||
}
|
||||
|
||||
// PrintStats returns all collected stats pretty printed into a string.
|
||||
func (db *db) PrintStats() string {
|
||||
if db.commitStatsCollector != nil {
|
||||
return db.commitStatsCollector.PrintStats()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// BeginReadWriteTx opens a database read+write transaction.
|
||||
func (db *db) BeginReadWriteTx() (walletdb.ReadWriteTx, error) {
|
||||
var locker sync.Locker
|
||||
if db.cfg.SingleWriter {
|
||||
db.txMutex.Lock()
|
||||
locker = &db.txMutex
|
||||
}
|
||||
|
||||
return newReadWriteTx(
|
||||
NewSTM(db.cli, db.txQueue, db.getSTMOptions()...),
|
||||
etcdDefaultRootBucketId, locker,
|
||||
), nil
|
||||
}
|
||||
|
||||
// BeginReadTx opens a database read transaction.
|
||||
func (db *db) BeginReadTx() (walletdb.ReadTx, error) {
|
||||
var locker sync.Locker
|
||||
if db.cfg.SingleWriter {
|
||||
db.txMutex.RLock()
|
||||
locker = db.txMutex.RLocker()
|
||||
}
|
||||
|
||||
return newReadWriteTx(
|
||||
NewSTM(db.cli, db.txQueue, db.getSTMOptions()...),
|
||||
etcdDefaultRootBucketId, locker,
|
||||
), nil
|
||||
}
|
||||
|
||||
// Copy writes a copy of the database to the provided writer. This call will
|
||||
// start a read-only transaction to perform all operations.
|
||||
// This function is part of the walletdb.Db interface implementation.
|
||||
func (db *db) Copy(w io.Writer) error {
|
||||
ctx, cancel := context.WithTimeout(db.ctx, etcdLongTimeout)
|
||||
defer cancel()
|
||||
|
||||
readCloser, err := db.cli.Snapshot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(w, readCloser)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Close cleanly shuts down the database and syncs all data.
|
||||
// This function is part of the walletdb.Db interface implementation.
|
||||
func (db *db) Close() error {
|
||||
err := db.cli.Close()
|
||||
db.cancel()
|
||||
db.txQueue.Stop()
|
||||
return err
|
||||
}
|
||||
99
server/pkg/kvdb/etcd/db_test.go
Normal file
99
server/pkg/kvdb/etcd/db_test.go
Normal file
@@ -0,0 +1,99 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDump tests that the Dump() method creates a one-to-one copy of the
|
||||
// database content.
|
||||
func TestDump(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
|
||||
db, err := newEtcdBackend(context.TODO(), f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.Update(func(tx walletdb.ReadWriteTx) error {
|
||||
// "apple"
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
require.NoError(t, apple.Put([]byte("key"), []byte("val")))
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
// Expect non-zero copy.
|
||||
var buf bytes.Buffer
|
||||
|
||||
require.NoError(t, db.Copy(&buf))
|
||||
require.Greater(t, buf.Len(), 0)
|
||||
require.Nil(t, err)
|
||||
|
||||
expected := map[string]string{
|
||||
BucketKey("apple"): BucketVal("apple"),
|
||||
ValueKey("key", "apple"): "val",
|
||||
}
|
||||
require.Equal(t, expected, f.Dump())
|
||||
}
|
||||
|
||||
// TestAbortContext tests that an update on the database is aborted if the
|
||||
// database's main context in cancelled.
|
||||
func TestAbortContext(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
config := f.BackendConfig()
|
||||
|
||||
// Pass abort context and abort right away.
|
||||
db, err := newEtcdBackend(ctx, config)
|
||||
require.NoError(t, err)
|
||||
cancel()
|
||||
|
||||
// Expect that the update will fail.
|
||||
err = db.Update(func(tx walletdb.ReadWriteTx) error {
|
||||
_, err := tx.CreateTopLevelBucket([]byte("bucket"))
|
||||
require.Error(t, err, "context canceled")
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.Error(t, err, "context canceled")
|
||||
|
||||
// No changes in the DB.
|
||||
require.Equal(t, map[string]string{}, f.Dump())
|
||||
}
|
||||
|
||||
// TestNewEtcdClient tests that an etcd v3 client can be created correctly.
|
||||
func TestNewEtcdClient(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
|
||||
client, ctx, cancel, err := NewEtcdClient(
|
||||
context.Background(), f.BackendConfig(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
_, err = client.Put(ctx, "foo/bar", "baz")
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := client.Get(ctx, "foo/bar")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, resp.Kvs, 1)
|
||||
require.Equal(t, "baz", string(resp.Kvs[0].Value))
|
||||
}
|
||||
9
server/pkg/kvdb/etcd/debug.go
Normal file
9
server/pkg/kvdb/etcd/debug.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build dev
|
||||
// +build dev
|
||||
|
||||
package etcd
|
||||
|
||||
const (
|
||||
// Switch on extra debug code.
|
||||
etcdDebug = true
|
||||
)
|
||||
80
server/pkg/kvdb/etcd/driver.go
Normal file
80
server/pkg/kvdb/etcd/driver.go
Normal file
@@ -0,0 +1,80 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
const (
|
||||
dbType = "etcd"
|
||||
)
|
||||
|
||||
// parseArgs parses the arguments from the walletdb Open/Create methods.
|
||||
func parseArgs(funcName string, args ...interface{}) (context.Context,
|
||||
*Config, error) {
|
||||
|
||||
if len(args) != 2 {
|
||||
return nil, nil, fmt.Errorf("invalid number of arguments to "+
|
||||
"%s.%s -- expected: context.Context, etcd.Config",
|
||||
dbType, funcName,
|
||||
)
|
||||
}
|
||||
|
||||
ctx, ok := args[0].(context.Context)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("argument 0 to %s.%s is invalid "+
|
||||
"-- expected: context.Context",
|
||||
dbType, funcName,
|
||||
)
|
||||
}
|
||||
|
||||
config, ok := args[1].(*Config)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("argument 1 to %s.%s is invalid -- "+
|
||||
"expected: etcd.Config",
|
||||
dbType, funcName,
|
||||
)
|
||||
}
|
||||
|
||||
return ctx, config, nil
|
||||
}
|
||||
|
||||
// createDBDriver is the callback provided during driver registration that
|
||||
// creates, initializes, and opens a database for use.
|
||||
func createDBDriver(args ...interface{}) (walletdb.DB, error) {
|
||||
ctx, config, err := parseArgs("Create", args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newEtcdBackend(ctx, *config)
|
||||
}
|
||||
|
||||
// openDBDriver is the callback provided during driver registration that opens
|
||||
// an existing database for use.
|
||||
func openDBDriver(args ...interface{}) (walletdb.DB, error) {
|
||||
ctx, config, err := parseArgs("Open", args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newEtcdBackend(ctx, *config)
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Register the driver.
|
||||
driver := walletdb.Driver{
|
||||
DbType: dbType,
|
||||
Create: createDBDriver,
|
||||
Open: openDBDriver,
|
||||
}
|
||||
if err := walletdb.RegisterDriver(driver); err != nil {
|
||||
panic(fmt.Sprintf("Failed to regiser database driver '%s': %v",
|
||||
dbType, err))
|
||||
}
|
||||
}
|
||||
31
server/pkg/kvdb/etcd/driver_test.go
Normal file
31
server/pkg/kvdb/etcd/driver_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOpenCreateFailure(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, err := walletdb.Open(dbType)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, db)
|
||||
|
||||
db, err = walletdb.Open(dbType, "wrong")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, db)
|
||||
|
||||
db, err = walletdb.Create(dbType)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, db)
|
||||
|
||||
db, err = walletdb.Create(dbType, "wrong")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, db)
|
||||
}
|
||||
118
server/pkg/kvdb/etcd/embed.go
Normal file
118
server/pkg/kvdb/etcd/embed.go
Normal file
@@ -0,0 +1,118 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
)
|
||||
|
||||
const (
|
||||
// readyTimeout is the time until the embedded etcd instance should start.
|
||||
readyTimeout = 10 * time.Second
|
||||
|
||||
// defaultEtcdPort is the start of the range for listening ports of
|
||||
// embedded etcd servers. Ports are monotonically increasing starting
|
||||
// from this number and are determined by the results of getFreePort().
|
||||
defaultEtcdPort = 2379
|
||||
|
||||
// defaultNamespace is the namespace we'll use in our embedded etcd
|
||||
// instance. Since it is only used for testing, we'll use the namespace
|
||||
// name "test/" for this. Note that the namespace can be any string,
|
||||
// the trailing / is not required.
|
||||
defaultNamespace = "test/"
|
||||
)
|
||||
|
||||
var (
|
||||
// lastPort is the last port determined to be free for use by a new
|
||||
// embedded etcd server. It should be used atomically.
|
||||
lastPort uint32 = defaultEtcdPort
|
||||
)
|
||||
|
||||
// getFreePort returns the first port that is available for listening by a new
|
||||
// embedded etcd server. It panics if no port is found and the maximum available
|
||||
// TCP port is reached.
|
||||
func getFreePort() int {
|
||||
port := atomic.AddUint32(&lastPort, 1)
|
||||
for port < 65535 {
|
||||
// If there are no errors while attempting to listen on this
|
||||
// port, close the socket and return it as available.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d", port)
|
||||
l, err := net.Listen("tcp4", addr)
|
||||
if err == nil {
|
||||
err := l.Close()
|
||||
if err == nil {
|
||||
return int(port)
|
||||
}
|
||||
}
|
||||
port = atomic.AddUint32(&lastPort, 1)
|
||||
}
|
||||
|
||||
// No ports available? Must be a mistake.
|
||||
panic("no ports available for listening")
|
||||
}
|
||||
|
||||
// NewEmbeddedEtcdInstance creates an embedded etcd instance for testing,
|
||||
// listening on random open ports. Returns the backend config and a cleanup
|
||||
// func that will stop the etcd instance.
|
||||
func NewEmbeddedEtcdInstance(path string, clientPort, peerPort uint16,
|
||||
logFile string) (*Config, func(), error) {
|
||||
|
||||
cfg := embed.NewConfig()
|
||||
cfg.Dir = path
|
||||
|
||||
// To ensure that we can submit large transactions.
|
||||
cfg.MaxTxnOps = 16384
|
||||
cfg.MaxRequestBytes = 16384 * 1024
|
||||
cfg.Logger = "zap"
|
||||
if logFile != "" {
|
||||
cfg.LogLevel = "info"
|
||||
cfg.LogOutputs = []string{logFile}
|
||||
} else {
|
||||
cfg.LogLevel = "error"
|
||||
}
|
||||
|
||||
// Listen on random free ports if no ports were specified.
|
||||
if clientPort == 0 {
|
||||
clientPort = uint16(getFreePort())
|
||||
}
|
||||
|
||||
if peerPort == 0 {
|
||||
peerPort = uint16(getFreePort())
|
||||
}
|
||||
|
||||
clientURL := fmt.Sprintf("127.0.0.1:%d", clientPort)
|
||||
peerURL := fmt.Sprintf("127.0.0.1:%d", peerPort)
|
||||
cfg.LCUrls = []url.URL{{Host: clientURL}}
|
||||
cfg.LPUrls = []url.URL{{Host: peerURL}}
|
||||
|
||||
etcd, err := embed.StartEtcd(cfg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-etcd.Server.ReadyNotify():
|
||||
case <-time.After(readyTimeout):
|
||||
etcd.Close()
|
||||
return nil, nil,
|
||||
fmt.Errorf("etcd failed to start after: %v", readyTimeout)
|
||||
}
|
||||
|
||||
connConfig := &Config{
|
||||
Host: "http://" + clientURL,
|
||||
InsecureSkipVerify: true,
|
||||
Namespace: defaultNamespace,
|
||||
MaxMsgSize: int(cfg.MaxRequestBytes),
|
||||
}
|
||||
|
||||
return connConfig, func() {
|
||||
etcd.Close()
|
||||
}, nil
|
||||
}
|
||||
135
server/pkg/kvdb/etcd/fixture.go
Normal file
135
server/pkg/kvdb/etcd/fixture.go
Normal file
@@ -0,0 +1,135 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/namespace"
|
||||
)
|
||||
|
||||
const (
|
||||
// testEtcdTimeout is used for all RPC calls initiated by the test fixture.
|
||||
testEtcdTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
// EtcdTestFixture holds internal state of the etcd test fixture.
|
||||
type EtcdTestFixture struct {
|
||||
t *testing.T
|
||||
cli *clientv3.Client
|
||||
config *Config
|
||||
}
|
||||
|
||||
// NewTestEtcdInstance creates an embedded etcd instance for testing, listening
|
||||
// on random open ports. Returns the connection config and a cleanup func that
|
||||
// will stop the etcd instance.
|
||||
func NewTestEtcdInstance(t *testing.T, path string) (*Config, func()) {
|
||||
t.Helper()
|
||||
|
||||
config, cleanup, err := NewEmbeddedEtcdInstance(path, 0, 0, "")
|
||||
if err != nil {
|
||||
t.Fatalf("error while staring embedded etcd instance: %v", err)
|
||||
}
|
||||
|
||||
return config, cleanup
|
||||
}
|
||||
|
||||
// NewEtcdTestFixture creates a new etcd-test fixture. This is helper
|
||||
// object to facilitate etcd tests and ensure pre- and post-conditions.
|
||||
func NewEtcdTestFixture(t *testing.T) *EtcdTestFixture {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
config, etcdCleanup := NewTestEtcdInstance(t, tmpDir)
|
||||
t.Cleanup(etcdCleanup)
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: strings.Split(config.Host, ","),
|
||||
Username: config.User,
|
||||
Password: config.Pass,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create etcd test fixture: %v", err)
|
||||
}
|
||||
|
||||
// Apply the default namespace (since that's what we use in tests).
|
||||
cli.KV = namespace.NewKV(cli.KV, defaultNamespace)
|
||||
cli.Watcher = namespace.NewWatcher(cli.Watcher, defaultNamespace)
|
||||
cli.Lease = namespace.NewLease(cli.Lease, defaultNamespace)
|
||||
|
||||
return &EtcdTestFixture{
|
||||
t: t,
|
||||
cli: cli,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *EtcdTestFixture) NewBackend(singleWriter bool) walletdb.DB {
|
||||
cfg := f.BackendConfig()
|
||||
if singleWriter {
|
||||
cfg.SingleWriter = true
|
||||
}
|
||||
|
||||
db, err := newEtcdBackend(context.TODO(), cfg)
|
||||
require.NoError(f.t, err)
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
// Put puts a string key/value into the test etcd database.
|
||||
func (f *EtcdTestFixture) Put(key, value string) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := f.cli.Put(ctx, key, value)
|
||||
if err != nil {
|
||||
f.t.Fatalf("etcd test fixture failed to put: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get queries a key and returns the stored value from the test etcd database.
|
||||
func (f *EtcdTestFixture) Get(key string) string {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := f.cli.Get(ctx, key)
|
||||
if err != nil {
|
||||
f.t.Fatalf("etcd test fixture failed to get: %v", err)
|
||||
}
|
||||
|
||||
if len(resp.Kvs) > 0 {
|
||||
return string(resp.Kvs[0].Value)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Dump scans and returns all key/values from the test etcd database.
|
||||
func (f *EtcdTestFixture) Dump() map[string]string {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := f.cli.Get(ctx, "\x00", clientv3.WithFromKey())
|
||||
if err != nil {
|
||||
f.t.Fatalf("etcd test fixture failed to get: %v", err)
|
||||
}
|
||||
|
||||
result := make(map[string]string)
|
||||
for _, kv := range resp.Kvs {
|
||||
result[string(kv.Key)] = string(kv.Value)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// BackendConfig returns the backend config for connecting to the embedded
|
||||
// etcd instance.
|
||||
func (f *EtcdTestFixture) BackendConfig() Config {
|
||||
return *f.config
|
||||
}
|
||||
9
server/pkg/kvdb/etcd/nodebug.go
Normal file
9
server/pkg/kvdb/etcd/nodebug.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build !dev
|
||||
// +build !dev
|
||||
|
||||
package etcd
|
||||
|
||||
const (
|
||||
// Switch off extra debug code.
|
||||
etcdDebug = false
|
||||
)
|
||||
437
server/pkg/kvdb/etcd/readwrite_bucket.go
Normal file
437
server/pkg/kvdb/etcd/readwrite_bucket.go
Normal file
@@ -0,0 +1,437 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
// readWriteBucket stores the bucket id and the buckets transaction.
|
||||
type readWriteBucket struct {
|
||||
// id is used to identify the bucket and is created by
|
||||
// hashing the parent id with the bucket key. For each key/value,
|
||||
// sub-bucket or the bucket sequence the bucket id is used with the
|
||||
// appropriate prefix to prefix the key.
|
||||
id []byte
|
||||
|
||||
// key is the bucket key.
|
||||
key []byte
|
||||
|
||||
// tx holds the parent transaction.
|
||||
tx *readWriteTx
|
||||
}
|
||||
|
||||
// newReadWriteBucket creates a new rw bucket with the passed transaction
|
||||
// and bucket id.
|
||||
func newReadWriteBucket(tx *readWriteTx, key, id []byte) *readWriteBucket {
|
||||
return &readWriteBucket{
|
||||
id: id,
|
||||
key: key,
|
||||
tx: tx,
|
||||
}
|
||||
}
|
||||
|
||||
// NestedReadBucket retrieves a nested read bucket with the given key.
|
||||
// Returns nil if the bucket does not exist.
|
||||
func (b *readWriteBucket) NestedReadBucket(key []byte) walletdb.ReadBucket {
|
||||
return b.NestedReadWriteBucket(key)
|
||||
}
|
||||
|
||||
// ForEach invokes the passed function with every key/value pair in
|
||||
// the bucket. This includes nested buckets, in which case the value
|
||||
// is nil, but it does not include the key/value pairs within those
|
||||
// nested buckets.
|
||||
func (b *readWriteBucket) ForEach(cb func(k, v []byte) error) error {
|
||||
prefix := string(b.id)
|
||||
|
||||
// Get the first matching key that is in the bucket.
|
||||
kv, err := b.tx.stm.First(prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for kv != nil {
|
||||
key, val := getKeyVal(kv)
|
||||
|
||||
if err := cb(key, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Step to the next key.
|
||||
kv, err = b.tx.stm.Next(prefix, kv.key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForAll is an optimized version of ForEach for the case when we know we will
|
||||
// fetch all (or almost all) items.
|
||||
//
|
||||
// NOTE: ForAll differs from ForEach in that no additional queries can
|
||||
// be executed within the callback.
|
||||
func (b *readWriteBucket) ForAll(cb func(k, v []byte) error) error {
|
||||
// When we opened this bucket, we fetched the bucket key using the STM
|
||||
// which put a revision "lock" in the read set. We can leverage this
|
||||
// by incrementing the revision on the bucket, making any transaction
|
||||
// retry that'd touch this same bucket. This way we can safely read all
|
||||
// keys from the bucket and not cache them in the STM.
|
||||
// To increment the bucket's revision, we simply put in the bucket key
|
||||
// value again (which is idempotent if the bucket has just been created).
|
||||
b.tx.stm.Put(string(b.key), string(b.id))
|
||||
|
||||
// TODO(bhandras): page size should be configurable in ForAll.
|
||||
return b.tx.stm.FetchRangePaginatedRaw(
|
||||
string(b.id), 1000,
|
||||
func(kv KV) error {
|
||||
key, val := getKeyVal(&kv)
|
||||
return cb(key, val)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Get returns the value for the given key. Returns nil if the key does
|
||||
// not exist in this bucket.
|
||||
func (b *readWriteBucket) Get(key []byte) []byte {
|
||||
// Return nil if the key is empty.
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch the associated value.
|
||||
val, err := b.tx.stm.Get(string(makeValueKey(b.id, key)))
|
||||
if err != nil {
|
||||
// TODO: we should return the error once the
|
||||
// kvdb inteface is extended.
|
||||
return nil
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func (b *readWriteBucket) ReadCursor() walletdb.ReadCursor {
|
||||
return newReadWriteCursor(b)
|
||||
}
|
||||
|
||||
// NestedReadWriteBucket retrieves a nested bucket with the given key.
|
||||
// Returns nil if the bucket does not exist.
|
||||
func (b *readWriteBucket) NestedReadWriteBucket(key []byte) walletdb.ReadWriteBucket {
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the bucket id (and return nil if bucket doesn't exist).
|
||||
bucketKey := makeBucketKey(b.id, key)
|
||||
bucketVal, err := b.tx.stm.Get(string(bucketKey))
|
||||
if err != nil {
|
||||
// TODO: we should return the error once the
|
||||
// kvdb inteface is extended.
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isValidBucketID(bucketVal) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return the bucket with the fetched bucket id.
|
||||
return newReadWriteBucket(b.tx, bucketKey, bucketVal)
|
||||
}
|
||||
|
||||
// assertNoValue checks if the value for the passed key exists.
|
||||
func (b *readWriteBucket) assertNoValue(key []byte) error {
|
||||
if !etcdDebug {
|
||||
return nil
|
||||
}
|
||||
|
||||
val, err := b.tx.stm.Get(string(makeValueKey(b.id, key)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val != nil {
|
||||
return walletdb.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// assertNoBucket checks if the bucket for the passed key exists.
|
||||
func (b *readWriteBucket) assertNoBucket(key []byte) error {
|
||||
if !etcdDebug {
|
||||
return nil
|
||||
}
|
||||
|
||||
val, err := b.tx.stm.Get(string(makeBucketKey(b.id, key)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val != nil {
|
||||
return walletdb.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateBucket creates and returns a new nested bucket with the given
|
||||
// key. Returns ErrBucketExists if the bucket already exists,
|
||||
// ErrBucketNameRequired if the key is empty, or ErrIncompatibleValue
|
||||
// if the key value is otherwise invalid for the particular database
|
||||
// implementation. Other errors are possible depending on the
|
||||
// implementation.
|
||||
func (b *readWriteBucket) CreateBucket(key []byte) (
|
||||
walletdb.ReadWriteBucket, error) {
|
||||
|
||||
if len(key) == 0 {
|
||||
return nil, walletdb.ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Check if the bucket already exists.
|
||||
bucketKey := makeBucketKey(b.id, key)
|
||||
|
||||
bucketVal, err := b.tx.stm.Get(string(bucketKey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isValidBucketID(bucketVal) {
|
||||
return nil, walletdb.ErrBucketExists
|
||||
}
|
||||
|
||||
if err := b.assertNoValue(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a deterministic bucket id from the bucket key.
|
||||
newID := makeBucketID(bucketKey)
|
||||
|
||||
// Create the bucket.
|
||||
b.tx.stm.Put(string(bucketKey), string(newID[:]))
|
||||
|
||||
return newReadWriteBucket(b.tx, bucketKey, newID[:]), nil
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates and returns a new nested bucket with
|
||||
// the given key if it does not already exist. Returns
|
||||
// ErrBucketNameRequired if the key is empty or ErrIncompatibleValue
|
||||
// if the key value is otherwise invalid for the particular database
|
||||
// backend. Other errors are possible depending on the implementation.
|
||||
func (b *readWriteBucket) CreateBucketIfNotExists(key []byte) (
|
||||
walletdb.ReadWriteBucket, error) {
|
||||
|
||||
if len(key) == 0 {
|
||||
return nil, walletdb.ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Check for the bucket and create if it doesn't exist.
|
||||
bucketKey := makeBucketKey(b.id, key)
|
||||
|
||||
bucketVal, err := b.tx.stm.Get(string(bucketKey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isValidBucketID(bucketVal) {
|
||||
if err := b.assertNoValue(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newID := makeBucketID(bucketKey)
|
||||
b.tx.stm.Put(string(bucketKey), string(newID[:]))
|
||||
|
||||
return newReadWriteBucket(b.tx, bucketKey, newID[:]), nil
|
||||
}
|
||||
|
||||
// Otherwise return the bucket with the fetched bucket id.
|
||||
return newReadWriteBucket(b.tx, bucketKey, bucketVal), nil
|
||||
}
|
||||
|
||||
// DeleteNestedBucket deletes the nested bucket and its sub-buckets
|
||||
// pointed to by the passed key. All values in the bucket and sub-buckets
|
||||
// will be deleted as well.
|
||||
func (b *readWriteBucket) DeleteNestedBucket(key []byte) error {
|
||||
// TODO shouldn't empty key return ErrBucketNameRequired ?
|
||||
if len(key) == 0 {
|
||||
return walletdb.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Get the bucket first.
|
||||
bucketKey := string(makeBucketKey(b.id, key))
|
||||
|
||||
bucketVal, err := b.tx.stm.Get(bucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isValidBucketID(bucketVal) {
|
||||
return walletdb.ErrBucketNotFound
|
||||
}
|
||||
|
||||
// Enqueue the top level bucket id.
|
||||
queue := [][]byte{bucketVal}
|
||||
|
||||
// Traverse the buckets breadth first.
|
||||
for len(queue) != 0 {
|
||||
if !isValidBucketID(queue[0]) {
|
||||
return walletdb.ErrBucketNotFound
|
||||
}
|
||||
|
||||
id := queue[0]
|
||||
queue = queue[1:]
|
||||
|
||||
kv, err := b.tx.stm.First(string(id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for kv != nil {
|
||||
b.tx.stm.Del(kv.key)
|
||||
|
||||
if isBucketKey(kv.key) {
|
||||
queue = append(queue, []byte(kv.val))
|
||||
}
|
||||
|
||||
kv, err = b.tx.stm.Next(string(id), kv.key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Finally delete the sequence key for the bucket.
|
||||
b.tx.stm.Del(string(makeSequenceKey(id)))
|
||||
}
|
||||
|
||||
// Delete the top level bucket and sequence key.
|
||||
b.tx.stm.Del(bucketKey)
|
||||
b.tx.stm.Del(string(makeSequenceKey(bucketVal)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put updates the value for the passed key.
|
||||
// Returns ErrKeyRequired if the passed key is empty.
|
||||
func (b *readWriteBucket) Put(key, value []byte) error {
|
||||
if len(key) == 0 {
|
||||
return walletdb.ErrKeyRequired
|
||||
}
|
||||
|
||||
if err := b.assertNoBucket(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the transaction with the new value.
|
||||
b.tx.stm.Put(string(makeValueKey(b.id, key)), string(value))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the key/value pointed to by the passed key.
|
||||
// Returns ErrKeyRequired if the passed key is empty.
|
||||
func (b *readWriteBucket) Delete(key []byte) error {
|
||||
if key == nil {
|
||||
return nil
|
||||
}
|
||||
if len(key) == 0 {
|
||||
return walletdb.ErrKeyRequired
|
||||
}
|
||||
|
||||
// Update the transaction to delete the key/value.
|
||||
b.tx.stm.Del(string(makeValueKey(b.id, key)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadWriteCursor returns a new read-write cursor for this bucket.
|
||||
func (b *readWriteBucket) ReadWriteCursor() walletdb.ReadWriteCursor {
|
||||
return newReadWriteCursor(b)
|
||||
}
|
||||
|
||||
// Tx returns the buckets transaction.
|
||||
func (b *readWriteBucket) Tx() walletdb.ReadWriteTx {
|
||||
return b.tx
|
||||
}
|
||||
|
||||
// NextSequence returns an auto-incrementing sequence number for this bucket.
|
||||
// Note that this is not a thread safe function and as such it must not be used
|
||||
// for synchronization.
|
||||
func (b *readWriteBucket) NextSequence() (uint64, error) {
|
||||
seq := b.Sequence() + 1
|
||||
|
||||
return seq, b.SetSequence(seq)
|
||||
}
|
||||
|
||||
// SetSequence updates the sequence number for the bucket.
|
||||
func (b *readWriteBucket) SetSequence(v uint64) error {
|
||||
// Convert the number to string.
|
||||
val := strconv.FormatUint(v, 10)
|
||||
|
||||
// Update the transaction with the new value for the sequence key.
|
||||
b.tx.stm.Put(string(makeSequenceKey(b.id)), val)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sequence returns the current sequence number for this bucket without
|
||||
// incrementing it.
|
||||
func (b *readWriteBucket) Sequence() uint64 {
|
||||
val, err := b.tx.stm.Get(string(makeSequenceKey(b.id)))
|
||||
if err != nil {
|
||||
// TODO: This update kvdb interface such that error
|
||||
// may be returned here.
|
||||
return 0
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
// If the sequence number is not yet
|
||||
// stored, then take the default value.
|
||||
return 0
|
||||
}
|
||||
|
||||
// Otherwise try to parse a 64-bit unsigned integer from the value.
|
||||
num, _ := strconv.ParseUint(string(val), 10, 64)
|
||||
|
||||
return num
|
||||
}
|
||||
|
||||
func flattenMap(m map[string]struct{}) []string {
|
||||
result := make([]string, len(m))
|
||||
i := 0
|
||||
|
||||
for key := range m {
|
||||
result[i] = key
|
||||
i++
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Prefetch will prefetch all keys in the passed-in paths as well as all bucket
|
||||
// keys along the paths.
|
||||
func (b *readWriteBucket) Prefetch(paths ...[]string) {
|
||||
keys := make(map[string]struct{})
|
||||
ranges := make(map[string]struct{})
|
||||
|
||||
for _, path := range paths {
|
||||
parent := b.id
|
||||
for _, bucket := range path {
|
||||
bucketKey := makeBucketKey(parent, []byte(bucket))
|
||||
keys[string(bucketKey[:])] = struct{}{}
|
||||
|
||||
id := makeBucketID(bucketKey)
|
||||
parent = id[:]
|
||||
}
|
||||
|
||||
ranges[string(parent)] = struct{}{}
|
||||
}
|
||||
|
||||
b.tx.stm.Prefetch(flattenMap(keys), flattenMap(ranges))
|
||||
}
|
||||
127
server/pkg/kvdb/etcd/readwrite_cursor.go
Normal file
127
server/pkg/kvdb/etcd/readwrite_cursor.go
Normal file
@@ -0,0 +1,127 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
// readWriteCursor holds a reference to the cursors bucket, the value
|
||||
// prefix and the current key used while iterating.
|
||||
type readWriteCursor struct {
|
||||
// bucket holds the reference to the parent bucket.
|
||||
bucket *readWriteBucket
|
||||
|
||||
// prefix holds the value prefix which is in front of each
|
||||
// value key in the bucket.
|
||||
prefix string
|
||||
|
||||
// currKey holds the current key of the cursor.
|
||||
currKey string
|
||||
}
|
||||
|
||||
func newReadWriteCursor(bucket *readWriteBucket) *readWriteCursor {
|
||||
return &readWriteCursor{
|
||||
bucket: bucket,
|
||||
prefix: string(bucket.id),
|
||||
}
|
||||
}
|
||||
|
||||
// First positions the cursor at the first key/value pair and returns
|
||||
// the pair.
|
||||
func (c *readWriteCursor) First() (key, value []byte) {
|
||||
// Get the first key with the value prefix.
|
||||
kv, err := c.bucket.tx.stm.First(c.prefix)
|
||||
if err != nil {
|
||||
// TODO: revise this once kvdb interface supports errors
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if kv != nil {
|
||||
c.currKey = kv.key
|
||||
return getKeyVal(kv)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Last positions the cursor at the last key/value pair and returns the
|
||||
// pair.
|
||||
func (c *readWriteCursor) Last() (key, value []byte) {
|
||||
kv, err := c.bucket.tx.stm.Last(c.prefix)
|
||||
if err != nil {
|
||||
// TODO: revise this once kvdb interface supports errors
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if kv != nil {
|
||||
c.currKey = kv.key
|
||||
return getKeyVal(kv)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Next moves the cursor one key/value pair forward and returns the new
|
||||
// pair.
|
||||
func (c *readWriteCursor) Next() (key, value []byte) {
|
||||
kv, err := c.bucket.tx.stm.Next(c.prefix, c.currKey)
|
||||
if err != nil {
|
||||
// TODO: revise this once kvdb interface supports errors
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if kv != nil {
|
||||
c.currKey = kv.key
|
||||
return getKeyVal(kv)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Prev moves the cursor one key/value pair backward and returns the new
|
||||
// pair.
|
||||
func (c *readWriteCursor) Prev() (key, value []byte) {
|
||||
kv, err := c.bucket.tx.stm.Prev(c.prefix, c.currKey)
|
||||
if err != nil {
|
||||
// TODO: revise this once kvdb interface supports errors
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if kv != nil {
|
||||
c.currKey = kv.key
|
||||
return getKeyVal(kv)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Seek positions the cursor at the passed seek key. If the key does
|
||||
// not exist, the cursor is moved to the next key after seek. Returns
|
||||
// the new pair.
|
||||
func (c *readWriteCursor) Seek(seek []byte) (key, value []byte) {
|
||||
// Seek to the first key with prefix + seek. If that key is not present
|
||||
// STM will seek to the next matching key with prefix.
|
||||
kv, err := c.bucket.tx.stm.Seek(c.prefix, c.prefix+string(seek))
|
||||
if err != nil {
|
||||
// TODO: revise this once kvdb interface supports errors
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if kv != nil {
|
||||
c.currKey = kv.key
|
||||
return getKeyVal(kv)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Delete removes the current key/value pair the cursor is at without
|
||||
// invalidating the cursor. Returns ErrIncompatibleValue if attempted
|
||||
// when the cursor points to a nested bucket.
|
||||
func (c *readWriteCursor) Delete() error {
|
||||
if isBucketKey(c.currKey) {
|
||||
c.bucket.DeleteNestedBucket(getKey(c.currKey))
|
||||
} else {
|
||||
c.bucket.Delete(getKey(c.currKey))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
139
server/pkg/kvdb/etcd/readwrite_tx.go
Normal file
139
server/pkg/kvdb/etcd/readwrite_tx.go
Normal file
@@ -0,0 +1,139 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
// readWriteTx holds a reference to the STM transaction.
|
||||
type readWriteTx struct {
|
||||
// stm is the reference to the parent STM.
|
||||
stm STM
|
||||
|
||||
// rootBucketID holds the sha256 hash of the root bucket id, which is used
|
||||
// for key space spearation.
|
||||
rootBucketID [bucketIDLength]byte
|
||||
|
||||
// active is true if the transaction hasn't been committed yet.
|
||||
active bool
|
||||
|
||||
// lock is passed on for manual txns when the backend is instantiated
|
||||
// such that we read/write lock transactions to ensure a single writer.
|
||||
lock sync.Locker
|
||||
}
|
||||
|
||||
// newReadWriteTx creates an rw transaction with the passed STM.
|
||||
func newReadWriteTx(stm STM, prefix string, lock sync.Locker) *readWriteTx {
|
||||
return &readWriteTx{
|
||||
stm: stm,
|
||||
active: true,
|
||||
lock: lock,
|
||||
rootBucketID: makeBucketID([]byte(prefix)),
|
||||
}
|
||||
}
|
||||
|
||||
// rooBucket is a helper function to return the always present
|
||||
// pseudo root bucket.
|
||||
func rootBucket(tx *readWriteTx) *readWriteBucket {
|
||||
return newReadWriteBucket(tx, tx.rootBucketID[:], tx.rootBucketID[:])
|
||||
}
|
||||
|
||||
// RootBucket will return a handle to the root bucket. This is not a real handle
|
||||
// but just a wrapper around the root bucket ID to allow derivation of child
|
||||
// keys.
|
||||
func (tx *readWriteTx) RootBucket() walletdb.ReadBucket {
|
||||
return rootBucket(tx)
|
||||
}
|
||||
|
||||
// ReadBucket opens the root bucket for read only access. If the bucket
|
||||
// described by the key does not exist, nil is returned.
|
||||
func (tx *readWriteTx) ReadBucket(key []byte) walletdb.ReadBucket {
|
||||
return rootBucket(tx).NestedReadWriteBucket(key)
|
||||
}
|
||||
|
||||
// ForEachBucket iterates through all top level buckets.
|
||||
func (tx *readWriteTx) ForEachBucket(fn func(key []byte) error) error {
|
||||
root := rootBucket(tx)
|
||||
// We can safely use ForEach here since on the top level there are
|
||||
// no values, only buckets.
|
||||
return root.ForEach(func(key []byte, val []byte) error {
|
||||
if val != nil {
|
||||
// A non-nil value would mean that we have a non
|
||||
// walletdb/kvdb compatible database containing
|
||||
// arbitrary key/values.
|
||||
return walletdb.ErrInvalid
|
||||
}
|
||||
|
||||
return fn(key)
|
||||
})
|
||||
}
|
||||
|
||||
// Rollback closes the transaction, discarding changes (if any) if the
|
||||
// database was modified by a write transaction.
|
||||
func (tx *readWriteTx) Rollback() error {
|
||||
// If the transaction has been closed roolback will fail.
|
||||
if !tx.active {
|
||||
return walletdb.ErrTxClosed
|
||||
}
|
||||
|
||||
if tx.lock != nil {
|
||||
defer tx.lock.Unlock()
|
||||
}
|
||||
|
||||
// Rollback the STM and set the tx to inactive.
|
||||
tx.stm.Rollback()
|
||||
tx.active = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadWriteBucket opens the root bucket for read/write access. If the
|
||||
// bucket described by the key does not exist, nil is returned.
|
||||
func (tx *readWriteTx) ReadWriteBucket(key []byte) walletdb.ReadWriteBucket {
|
||||
return rootBucket(tx).NestedReadWriteBucket(key)
|
||||
}
|
||||
|
||||
// CreateTopLevelBucket creates the top level bucket for a key if it
|
||||
// does not exist. The newly-created bucket it returned.
|
||||
func (tx *readWriteTx) CreateTopLevelBucket(key []byte) (walletdb.ReadWriteBucket, error) {
|
||||
return rootBucket(tx).CreateBucketIfNotExists(key)
|
||||
}
|
||||
|
||||
// DeleteTopLevelBucket deletes the top level bucket for a key. This
|
||||
// errors if the bucket can not be found or the key keys a single value
|
||||
// instead of a bucket.
|
||||
func (tx *readWriteTx) DeleteTopLevelBucket(key []byte) error {
|
||||
return rootBucket(tx).DeleteNestedBucket(key)
|
||||
}
|
||||
|
||||
// Commit commits the transaction if not already committed. Will return
|
||||
// error if the underlying STM fails.
|
||||
func (tx *readWriteTx) Commit() error {
|
||||
// Commit will fail if the transaction is already committed.
|
||||
if !tx.active {
|
||||
return walletdb.ErrTxClosed
|
||||
}
|
||||
|
||||
if tx.lock != nil {
|
||||
defer tx.lock.Unlock()
|
||||
}
|
||||
|
||||
// Try committing the transaction.
|
||||
if err := tx.stm.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Mark the transaction as not active after commit.
|
||||
tx.active = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnCommit sets the commit callback (overriding if already set).
|
||||
func (tx *readWriteTx) OnCommit(cb func()) {
|
||||
tx.stm.OnCommit(cb)
|
||||
}
|
||||
92
server/pkg/kvdb/etcd/readwrite_tx_test.go
Normal file
92
server/pkg/kvdb/etcd/readwrite_tx_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestChangeDuringManualTx(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
|
||||
db, err := newEtcdBackend(context.TODO(), f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
|
||||
tx, err := db.BeginReadWriteTx()
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, tx)
|
||||
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
require.NoError(t, apple.Put([]byte("testKey"), []byte("testVal")))
|
||||
|
||||
// Try overwriting the bucket key.
|
||||
f.Put(BucketKey("apple"), "banana")
|
||||
|
||||
// TODO: translate error
|
||||
require.NotNil(t, tx.Commit())
|
||||
require.Equal(t, map[string]string{
|
||||
BucketKey("apple"): "banana",
|
||||
}, f.Dump())
|
||||
}
|
||||
|
||||
func TestChangeDuringUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
|
||||
db, err := newEtcdBackend(context.TODO(), f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
|
||||
count := 0
|
||||
|
||||
err = db.Update(func(tx walletdb.ReadWriteTx) error {
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
require.NoError(t, apple.Put([]byte("key"), []byte("value")))
|
||||
|
||||
if count == 0 {
|
||||
f.Put(ValueKey("key", "apple"), "new_value")
|
||||
f.Put(ValueKey("key2", "apple"), "value2")
|
||||
}
|
||||
|
||||
cursor := apple.ReadCursor()
|
||||
k, v := cursor.First()
|
||||
require.Equal(t, []byte("key"), k)
|
||||
require.Equal(t, []byte("value"), v)
|
||||
require.Equal(t, v, apple.Get([]byte("key")))
|
||||
|
||||
k, v = cursor.Next()
|
||||
if count == 0 {
|
||||
require.Nil(t, k)
|
||||
require.Nil(t, v)
|
||||
} else {
|
||||
require.Equal(t, []byte("key2"), k)
|
||||
require.Equal(t, []byte("value2"), v)
|
||||
}
|
||||
|
||||
count++
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, count, 2)
|
||||
|
||||
expected := map[string]string{
|
||||
BucketKey("apple"): BucketVal("apple"),
|
||||
ValueKey("key", "apple"): "value",
|
||||
ValueKey("key2", "apple"): "value2",
|
||||
}
|
||||
require.Equal(t, expected, f.Dump())
|
||||
}
|
||||
1210
server/pkg/kvdb/etcd/stm.go
Normal file
1210
server/pkg/kvdb/etcd/stm.go
Normal file
File diff suppressed because it is too large
Load Diff
412
server/pkg/kvdb/etcd/stm_test.go
Normal file
412
server/pkg/kvdb/etcd/stm_test.go
Normal file
@@ -0,0 +1,412 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func reverseKVs(a []KV) []KV {
|
||||
for i, j := 0, len(a)-1; i < j; i, j = i+1, j-1 {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func TestPutToEmpty(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
txQueue := NewCommitQueue(ctx)
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
txQueue.Stop()
|
||||
})
|
||||
|
||||
db, err := newEtcdBackend(ctx, f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
|
||||
apply := func(stm STM) error {
|
||||
stm.Put("123", "abc")
|
||||
return nil
|
||||
}
|
||||
|
||||
callCount, err := RunSTM(db.cli, apply, txQueue)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, callCount)
|
||||
|
||||
require.Equal(t, "abc", f.Get("123"))
|
||||
}
|
||||
|
||||
func TestGetPutDel(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
txQueue := NewCommitQueue(ctx)
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
txQueue.Stop()
|
||||
})
|
||||
|
||||
testKeyValues := []KV{
|
||||
{"a", "1"},
|
||||
{"b", "2"},
|
||||
{"c", "3"},
|
||||
{"d", "4"},
|
||||
{"e", "5"},
|
||||
}
|
||||
|
||||
// Extra 2 => Get(x), Commit()
|
||||
expectedCallCount := len(testKeyValues) + 2
|
||||
|
||||
for _, kv := range testKeyValues {
|
||||
f.Put(kv.key, kv.val)
|
||||
}
|
||||
|
||||
db, err := newEtcdBackend(ctx, f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
|
||||
apply := func(stm STM) error {
|
||||
// Get some non existing keys.
|
||||
v, err := stm.Get("")
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, v)
|
||||
|
||||
// Fetches: 1.
|
||||
v, err = stm.Get("x")
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, v)
|
||||
|
||||
// Get all existing keys. Fetches: len(testKeyValues)
|
||||
for _, kv := range testKeyValues {
|
||||
v, err = stm.Get(kv.key)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte(kv.val), v)
|
||||
}
|
||||
|
||||
// Overwrite, then delete an existing key.
|
||||
stm.Put("c", "6")
|
||||
|
||||
v, err = stm.Get("c")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("6"), v)
|
||||
|
||||
stm.Del("c")
|
||||
|
||||
v, err = stm.Get("c")
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, v)
|
||||
|
||||
// Re-add the deleted key.
|
||||
stm.Put("c", "7")
|
||||
|
||||
v, err = stm.Get("c")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("7"), v)
|
||||
|
||||
// Add a new key.
|
||||
stm.Put("x", "x")
|
||||
|
||||
v, err = stm.Get("x")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("x"), v)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
callCount, err := RunSTM(db.cli, apply, txQueue)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCallCount, callCount)
|
||||
|
||||
require.Equal(t, "1", f.Get("a"))
|
||||
require.Equal(t, "2", f.Get("b"))
|
||||
require.Equal(t, "7", f.Get("c"))
|
||||
require.Equal(t, "4", f.Get("d"))
|
||||
require.Equal(t, "5", f.Get("e"))
|
||||
require.Equal(t, "x", f.Get("x"))
|
||||
}
|
||||
|
||||
func TestFirstLastNextPrev(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testFirstLastNextPrev(t, nil, nil, 41)
|
||||
testFirstLastNextPrev(t, nil, []string{"k"}, 4)
|
||||
testFirstLastNextPrev(t, nil, []string{"k", "w"}, 2)
|
||||
testFirstLastNextPrev(t, []string{"kb"}, nil, 42)
|
||||
testFirstLastNextPrev(t, []string{"kb", "ke"}, nil, 42)
|
||||
testFirstLastNextPrev(t, []string{"kb", "ke", "w"}, []string{"k", "w"}, 2)
|
||||
}
|
||||
|
||||
func testFirstLastNextPrev(t *testing.T, prefetchKeys []string,
|
||||
prefetchRange []string, expectedCallCount int) {
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
txQueue := NewCommitQueue(ctx)
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
txQueue.Stop()
|
||||
})
|
||||
|
||||
testKeyValues := []KV{
|
||||
{"kb", "1"},
|
||||
{"kc", "2"},
|
||||
{"kda", "3"},
|
||||
{"ke", "4"},
|
||||
{"w", "w"},
|
||||
}
|
||||
for _, kv := range testKeyValues {
|
||||
f.Put(kv.key, kv.val)
|
||||
}
|
||||
|
||||
db, err := newEtcdBackend(ctx, f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
|
||||
apply := func(stm STM) error {
|
||||
stm.Prefetch(prefetchKeys, prefetchRange)
|
||||
|
||||
// First/Last on valid multi item interval.
|
||||
kv, err := stm.First("k")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"kb", "1"}, kv)
|
||||
|
||||
kv, err = stm.Last("k")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"ke", "4"}, kv)
|
||||
|
||||
// First/Last on single item interval.
|
||||
kv, err = stm.First("w")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"w", "w"}, kv)
|
||||
|
||||
kv, err = stm.Last("w")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"w", "w"}, kv)
|
||||
|
||||
// Non existing.
|
||||
val, err := stm.Get("ke1")
|
||||
require.Nil(t, val)
|
||||
require.Nil(t, err)
|
||||
|
||||
val, err = stm.Get("ke2")
|
||||
require.Nil(t, val)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Next/Prev on start/end.
|
||||
kv, err = stm.Next("k", "ke")
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, kv)
|
||||
|
||||
// Non existing.
|
||||
val, err = stm.Get("ka")
|
||||
require.Nil(t, val)
|
||||
require.Nil(t, err)
|
||||
|
||||
kv, err = stm.Prev("k", "kb")
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, kv)
|
||||
|
||||
// Next/Prev in the middle.
|
||||
kv, err = stm.Next("k", "kc")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"kda", "3"}, kv)
|
||||
|
||||
kv, err = stm.Prev("k", "ke")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"kda", "3"}, kv)
|
||||
|
||||
// Delete first item, then add an item before the
|
||||
// deleted one. Check that First/Next will "jump"
|
||||
// over the deleted item and return the new first.
|
||||
stm.Del("kb")
|
||||
stm.Put("ka", "0")
|
||||
|
||||
kv, err = stm.First("k")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"ka", "0"}, kv)
|
||||
|
||||
kv, err = stm.Prev("k", "kc")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"ka", "0"}, kv)
|
||||
|
||||
// Similarly test that a new end is returned if
|
||||
// the old end is deleted first.
|
||||
stm.Del("ke")
|
||||
stm.Put("kf", "5")
|
||||
|
||||
kv, err = stm.Last("k")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"kf", "5"}, kv)
|
||||
|
||||
kv, err = stm.Next("k", "kda")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"kf", "5"}, kv)
|
||||
|
||||
// Overwrite one in the middle.
|
||||
stm.Put("kda", "6")
|
||||
|
||||
kv, err = stm.Next("k", "kc")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &KV{"kda", "6"}, kv)
|
||||
|
||||
// Add three in the middle, then delete one.
|
||||
stm.Put("kdb", "7")
|
||||
stm.Put("kdc", "8")
|
||||
stm.Put("kdd", "9")
|
||||
stm.Del("kdc")
|
||||
|
||||
// Check that stepping from first to last returns
|
||||
// the expected sequence.
|
||||
var kvs []KV
|
||||
|
||||
curr, err := stm.First("k")
|
||||
require.NoError(t, err)
|
||||
|
||||
for curr != nil {
|
||||
kvs = append(kvs, *curr)
|
||||
curr, err = stm.Next("k", curr.key)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
expected := []KV{
|
||||
{"ka", "0"},
|
||||
{"kc", "2"},
|
||||
{"kda", "6"},
|
||||
{"kdb", "7"},
|
||||
{"kdd", "9"},
|
||||
{"kf", "5"},
|
||||
}
|
||||
require.Equal(t, expected, kvs)
|
||||
|
||||
// Similarly check that stepping from last to first
|
||||
// returns the expected sequence.
|
||||
kvs = []KV{}
|
||||
|
||||
curr, err = stm.Last("k")
|
||||
require.NoError(t, err)
|
||||
|
||||
for curr != nil {
|
||||
kvs = append(kvs, *curr)
|
||||
curr, err = stm.Prev("k", curr.key)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
expected = reverseKVs(expected)
|
||||
require.Equal(t, expected, kvs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
callCount, err := RunSTM(db.cli, apply, txQueue)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCallCount, callCount)
|
||||
|
||||
require.Equal(t, "0", f.Get("ka"))
|
||||
require.Equal(t, "2", f.Get("kc"))
|
||||
require.Equal(t, "6", f.Get("kda"))
|
||||
require.Equal(t, "7", f.Get("kdb"))
|
||||
require.Equal(t, "9", f.Get("kdd"))
|
||||
require.Equal(t, "5", f.Get("kf"))
|
||||
require.Equal(t, "w", f.Get("w"))
|
||||
}
|
||||
|
||||
func TestCommitError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
txQueue := NewCommitQueue(ctx)
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
txQueue.Stop()
|
||||
})
|
||||
|
||||
db, err := newEtcdBackend(ctx, f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Preset DB state.
|
||||
f.Put("123", "xyz")
|
||||
|
||||
// Count the number of applies.
|
||||
cnt := 0
|
||||
|
||||
apply := func(stm STM) error {
|
||||
// STM must have the key/value.
|
||||
val, err := stm.Get("123")
|
||||
require.NoError(t, err)
|
||||
|
||||
if cnt == 0 {
|
||||
require.Equal(t, []byte("xyz"), val)
|
||||
|
||||
// Put a conflicting key/value during the first apply.
|
||||
f.Put("123", "def")
|
||||
}
|
||||
|
||||
// We'd expect to
|
||||
stm.Put("123", "abc")
|
||||
|
||||
cnt++
|
||||
return nil
|
||||
}
|
||||
|
||||
callCount, err := RunSTM(db.cli, apply, txQueue)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, cnt)
|
||||
// Get() + 2 * Commit().
|
||||
require.Equal(t, 3, callCount)
|
||||
|
||||
require.Equal(t, "abc", f.Get("123"))
|
||||
}
|
||||
|
||||
func TestManualTxError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
txQueue := NewCommitQueue(ctx)
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
txQueue.Stop()
|
||||
})
|
||||
|
||||
db, err := newEtcdBackend(ctx, f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Preset DB state.
|
||||
f.Put("123", "xyz")
|
||||
|
||||
stm := NewSTM(db.cli, txQueue)
|
||||
|
||||
val, err := stm.Get("123")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("xyz"), val)
|
||||
|
||||
// Put a conflicting key/value.
|
||||
f.Put("123", "def")
|
||||
|
||||
// Should still get the original version.
|
||||
val, err = stm.Get("123")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("xyz"), val)
|
||||
|
||||
// Commit will fail with CommitError.
|
||||
err = stm.Commit()
|
||||
var e CommitError
|
||||
require.True(t, errors.As(err, &e))
|
||||
|
||||
// We expect that the transacton indeed did not commit.
|
||||
require.Equal(t, "def", f.Get("123"))
|
||||
}
|
||||
19
server/pkg/kvdb/etcd/walletdb_interface_test.go
Normal file
19
server/pkg/kvdb/etcd/walletdb_interface_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb/walletdbtest"
|
||||
)
|
||||
|
||||
// TestWalletDBInterface performs the WalletDB interface test suite for the
|
||||
// etcd database driver.
|
||||
func TestWalletDBInterface(t *testing.T) {
|
||||
f := NewEtcdTestFixture(t)
|
||||
cfg := f.BackendConfig()
|
||||
walletdbtest.TestInterface(t, dbType, context.TODO(), &cfg)
|
||||
}
|
||||
171
server/pkg/kvdb/etcd_test.go
Normal file
171
server/pkg/kvdb/etcd_test.go
Normal file
@@ -0,0 +1,171 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ark-network/tools/kvdb/etcd"
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
bkey = etcd.BucketKey
|
||||
bval = etcd.BucketVal
|
||||
vkey = etcd.ValueKey
|
||||
)
|
||||
|
||||
func TestEtcd(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
debugOnly bool
|
||||
test func(*testing.T, walletdb.DB)
|
||||
expectedDb map[string]string
|
||||
}{
|
||||
{
|
||||
name: "read cursor empty interval",
|
||||
test: testReadCursorEmptyInterval,
|
||||
},
|
||||
{
|
||||
name: "read cursor non empty interval",
|
||||
test: testReadCursorNonEmptyInterval,
|
||||
},
|
||||
{
|
||||
name: "read write cursor",
|
||||
test: testReadWriteCursor,
|
||||
expectedDb: map[string]string{
|
||||
bkey("apple"): bval("apple"),
|
||||
vkey("a", "apple"): "0",
|
||||
vkey("c", "apple"): "3",
|
||||
vkey("cx", "apple"): "x",
|
||||
vkey("cy", "apple"): "y",
|
||||
vkey("da", "apple"): "3",
|
||||
vkey("f", "apple"): "5",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "read write cursor with bucket and value",
|
||||
test: testReadWriteCursorWithBucketAndValue,
|
||||
expectedDb: map[string]string{
|
||||
bkey("apple"): bval("apple"),
|
||||
bkey("apple", "banana"): bval("apple", "banana"),
|
||||
bkey("apple", "pear"): bval("apple", "pear"),
|
||||
vkey("key", "apple"): "val",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bucket creation",
|
||||
test: testBucketCreation,
|
||||
expectedDb: map[string]string{
|
||||
bkey("apple"): bval("apple"),
|
||||
bkey("apple", "banana"): bval("apple", "banana"),
|
||||
bkey("apple", "mango"): bval("apple", "mango"),
|
||||
bkey("apple", "banana", "pear"): bval("apple", "banana", "pear"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bucket deletion",
|
||||
test: testBucketDeletion,
|
||||
expectedDb: map[string]string{
|
||||
bkey("apple"): bval("apple"),
|
||||
bkey("apple", "banana"): bval("apple", "banana"),
|
||||
vkey("key1", "apple", "banana"): "val1",
|
||||
vkey("key3", "apple", "banana"): "val3",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bucket for each",
|
||||
test: testBucketForEach,
|
||||
expectedDb: map[string]string{
|
||||
bkey("apple"): bval("apple"),
|
||||
bkey("apple", "banana"): bval("apple", "banana"),
|
||||
vkey("key1", "apple"): "val1",
|
||||
vkey("key2", "apple"): "val2",
|
||||
vkey("key3", "apple"): "val3",
|
||||
vkey("key1", "apple", "banana"): "val1",
|
||||
vkey("key2", "apple", "banana"): "val2",
|
||||
vkey("key3", "apple", "banana"): "val3",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bucket for each with error",
|
||||
test: testBucketForEachWithError,
|
||||
expectedDb: map[string]string{
|
||||
bkey("apple"): bval("apple"),
|
||||
bkey("apple", "banana"): bval("apple", "banana"),
|
||||
bkey("apple", "pear"): bval("apple", "pear"),
|
||||
vkey("key1", "apple"): "val1",
|
||||
vkey("key2", "apple"): "val2",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bucket sequence",
|
||||
test: testBucketSequence,
|
||||
},
|
||||
{
|
||||
name: "key clash",
|
||||
debugOnly: true,
|
||||
test: testKeyClash,
|
||||
expectedDb: map[string]string{
|
||||
bkey("apple"): bval("apple"),
|
||||
bkey("apple", "banana"): bval("apple", "banana"),
|
||||
vkey("key", "apple"): "val",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bucket create delete",
|
||||
test: testBucketCreateDelete,
|
||||
expectedDb: map[string]string{
|
||||
vkey("banana", "apple"): "value",
|
||||
bkey("apple"): bval("apple"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "tx manual commit",
|
||||
test: testTxManualCommit,
|
||||
expectedDb: map[string]string{
|
||||
bkey("apple"): bval("apple"),
|
||||
vkey("testKey", "apple"): "testVal",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "tx rollback",
|
||||
test: testTxRollback,
|
||||
expectedDb: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "prefetch",
|
||||
test: testPrefetch,
|
||||
expectedDb: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
|
||||
if test.debugOnly && !etcdDebug {
|
||||
continue
|
||||
}
|
||||
|
||||
rwLock := []bool{false, true}
|
||||
for _, doRwLock := range rwLock {
|
||||
name := fmt.Sprintf("%v/RWLock=%v", test.name, doRwLock)
|
||||
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := etcd.NewEtcdTestFixture(t)
|
||||
|
||||
test.test(t, f.NewBackend(doRwLock))
|
||||
|
||||
if test.expectedDb != nil {
|
||||
dump := f.Dump()
|
||||
require.Equal(t, test.expectedDb, dump)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
88
server/pkg/kvdb/go.mod
Normal file
88
server/pkg/kvdb/go.mod
Normal file
@@ -0,0 +1,88 @@
|
||||
module github.com/ark-network/tools/kvdb
|
||||
|
||||
go 1.22.4
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcwallet/walletdb v1.4.2
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/google/btree v1.1.2
|
||||
github.com/lightningnetwork/lnd/healthcheck v1.2.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.9.0
|
||||
go.etcd.io/bbolt v1.3.10
|
||||
go.etcd.io/etcd/api/v3 v3.5.15
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.15
|
||||
go.etcd.io/etcd/client/v3 v3.5.15
|
||||
go.etcd.io/etcd/server/v3 v3.5.15
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.23.3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.23.2 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.2.2 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/lightningnetwork/lnd/ticker v1.1.0 // indirect
|
||||
github.com/lightningnetwork/lnd/tor v1.0.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/miekg/dns v1.1.43 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.11.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.26.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/soheilhy/cmux v0.1.5 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
|
||||
go.etcd.io/etcd/client/v2 v2.305.15 // indirect
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.15 // indirect
|
||||
go.etcd.io/etcd/raft/v3 v3.5.15 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect
|
||||
go.opentelemetry.io/otel v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.20.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.17.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/oauth2 v0.14.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect
|
||||
google.golang.org/grpc v1.59.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
437
server/pkg/kvdb/go.sum
Normal file
437
server/pkg/kvdb/go.sum
Normal file
@@ -0,0 +1,437 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
|
||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
|
||||
github.com/btcsuite/btcd v0.22.0-beta.0.20220207191057-4dc4ff7963b4/go.mod h1:7alexyj/lHlOtr2PJK7L/+HDJZpcGDn/pAU98r7DY08=
|
||||
github.com/btcsuite/btcd v0.23.2 h1:/YOgUp25sdCnP5ho6Hl3s0E438zlX+Kak7E6TgBgoT0=
|
||||
github.com/btcsuite/btcd v0.23.2/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
|
||||
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcwallet/walletdb v1.4.2 h1:zwZZ+zaHo4mK+FAN6KeK85S3oOm+92x2avsHvFAhVBE=
|
||||
github.com/btcsuite/btcwallet/walletdb v1.4.2/go.mod h1:7ZQ+BvOEre90YT7eSq8bLoxTsgXidUzA/mqbRS114CQ=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY=
|
||||
github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
|
||||
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lightningnetwork/lnd/healthcheck v1.2.5 h1:aTJy5xeBpcWgRtW/PGBDe+LMQEmNm/HQewlQx2jt7OA=
|
||||
github.com/lightningnetwork/lnd/healthcheck v1.2.5/go.mod h1:G7Tst2tVvWo7cx6mSBEToQC5L1XOGxzZTPB29g9Rv2I=
|
||||
github.com/lightningnetwork/lnd/ticker v1.1.0 h1:ShoBiRP3pIxZHaETndfQ5kEe+S4NdAY1hiX7YbZ4QE4=
|
||||
github.com/lightningnetwork/lnd/ticker v1.1.0/go.mod h1:ubqbSVCn6RlE0LazXuBr7/Zi6QT0uQo++OgIRBxQUrk=
|
||||
github.com/lightningnetwork/lnd/tor v1.0.0 h1:wvEc7I+Y7IOtPglVP3cVBbYhiVhc7uTd7cMF9gQRzwA=
|
||||
github.com/lightningnetwork/lnd/tor v1.0.0/go.mod h1:RDtaAdwfAm+ONuPYwUhNIH1RAvKPv+75lHPOegUcz64=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
|
||||
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
|
||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
|
||||
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
|
||||
go.etcd.io/etcd/api/v3 v3.5.15 h1:3KpLJir1ZEBrYuV2v+Twaa/e2MdDCEZ/70H+lzEiwsk=
|
||||
go.etcd.io/etcd/api/v3 v3.5.15/go.mod h1:N9EhGzXq58WuMllgH9ZvnEr7SI9pS0k0+DHZezGp7jM=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.15 h1:fo0HpWz/KlHGMCC+YejpiCmyWDEuIpnTDzpJLB5fWlA=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.15/go.mod h1:mXDI4NAOwEiszrHCb0aqfAYNCrZP4e9hRca3d1YK8EU=
|
||||
go.etcd.io/etcd/client/v2 v2.305.15 h1:VG2xbf8Vz1KJh65Ar2V5eDmfkp1bpzkSEHlhJM3usp8=
|
||||
go.etcd.io/etcd/client/v2 v2.305.15/go.mod h1:Ad5dRjPVb/n5yXgAWQ/hXzuXXkBk0Y658ocuXYaUU48=
|
||||
go.etcd.io/etcd/client/v3 v3.5.15 h1:23M0eY4Fd/inNv1ZfU3AxrbbOdW79r9V9Rl62Nm6ip4=
|
||||
go.etcd.io/etcd/client/v3 v3.5.15/go.mod h1:CLSJxrYjvLtHsrPKsy7LmZEE+DK2ktfd2bN4RhBMwlU=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.15 h1:/Iu6Sr3iYaAjy++8sIDoZW9/EfhcwLZwd4FOZX2mMOU=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.15/go.mod h1:e3Acf298sPFmTCGTrnGvkClEw9RYIyPtNzi1XM8rets=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.15 h1:jOA2HJF7zb3wy8H/pL13e8geWqkEa/kUs0waUggZC0I=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.15/go.mod h1:k3r7P4seEiUcgxOPLp+mloJWV3Q4QLPGNvy/OgC8OtM=
|
||||
go.etcd.io/etcd/server/v3 v3.5.15 h1:x35jrWnZgsRwMsFsUJIUdT1bvzIz1B+29HjMfRYVN/E=
|
||||
go.etcd.io/etcd/server/v3 v3.5.15/go.mod h1:l9jX9oa/iuArjqz0RNX/TDbc70dLXxRZo/nmPucrpFo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
|
||||
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
|
||||
go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
|
||||
go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA=
|
||||
go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM=
|
||||
go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM=
|
||||
go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0=
|
||||
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
|
||||
go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
|
||||
golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA=
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
160
server/pkg/kvdb/interface.go
Normal file
160
server/pkg/kvdb/interface.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
// Update opens a database read/write transaction and executes the function f
|
||||
// with the transaction passed as a parameter. After f exits, if f did not
|
||||
// error, the transaction is committed. Otherwise, if f did error, the
|
||||
// transaction is rolled back. If the rollback fails, the original error
|
||||
// returned by f is still returned. If the commit fails, the commit error is
|
||||
// returned. As callers may expect retries of the f closure (depending on the
|
||||
// database backend used), the reset function will be called before each retry
|
||||
// respectively.
|
||||
func Update(db Backend, f func(tx RwTx) error, reset func()) error {
|
||||
return db.Update(f, reset)
|
||||
}
|
||||
|
||||
// View opens a database read transaction and executes the function f with the
|
||||
// transaction passed as a parameter. After f exits, the transaction is rolled
|
||||
// back. If f errors, its error is returned, not a rollback error (if any
|
||||
// occur). The passed reset function is called before the start of the
|
||||
// transaction and can be used to reset intermediate state. As callers may
|
||||
// expect retries of the f closure (depending on the database backend used), the
|
||||
// reset function will be called before each retry respectively.
|
||||
func View(db Backend, f func(tx RTx) error, reset func()) error {
|
||||
return db.View(f, reset)
|
||||
}
|
||||
|
||||
// Batch is identical to the Update call, but it attempts to combine several
|
||||
// individual Update transactions into a single write database transaction on
|
||||
// an optimistic basis. This only has benefits if multiple goroutines call
|
||||
// Batch. For etcd Batch simply does an Update since combination is more complex
|
||||
// in that case due to STM retries.
|
||||
func Batch(db Backend, f func(tx RwTx) error) error {
|
||||
// Fall back to the normal Update method if the backend doesn't support
|
||||
// batching.
|
||||
if _, ok := db.(walletdb.BatchDB); !ok {
|
||||
// Since Batch calls handle external state reset, we can safely
|
||||
// pass in an empty reset closure.
|
||||
return db.Update(f, func() {})
|
||||
}
|
||||
|
||||
return walletdb.Batch(db, f)
|
||||
}
|
||||
|
||||
// Create initializes and opens a database for the specified type. The
|
||||
// arguments are specific to the database type driver. See the documentation
|
||||
// for the database driver for further details.
|
||||
//
|
||||
// ErrDbUnknownType will be returned if the database type is not registered.
|
||||
var Create = walletdb.Create
|
||||
|
||||
// Backend represents an ACID database. All database access is performed
|
||||
// through read or read+write transactions.
|
||||
type Backend = walletdb.DB
|
||||
|
||||
// Open opens an existing database for the specified type. The arguments are
|
||||
// specific to the database type driver. See the documentation for the database
|
||||
// driver for further details.
|
||||
//
|
||||
// ErrDbUnknownType will be returned if the database type is not registered.
|
||||
var Open = walletdb.Open
|
||||
|
||||
// Driver defines a structure for backend drivers to use when they registered
|
||||
// themselves as a backend which implements the Backend interface.
|
||||
type Driver = walletdb.Driver
|
||||
|
||||
// RBucket represents a bucket (a hierarchical structure within the
|
||||
// database) that is only allowed to perform read operations.
|
||||
type RBucket = walletdb.ReadBucket
|
||||
|
||||
// RCursor represents a bucket cursor that can be positioned at the start or
|
||||
// end of the bucket's key/value pairs and iterate over pairs in the bucket.
|
||||
// This type is only allowed to perform database read operations.
|
||||
type RCursor = walletdb.ReadCursor
|
||||
|
||||
// RTx represents a database transaction that can only be used for reads. If
|
||||
// a database update must occur, use a RwTx.
|
||||
type RTx = walletdb.ReadTx
|
||||
|
||||
// RwBucket represents a bucket (a hierarchical structure within the database)
|
||||
// that is allowed to perform both read and write operations.
|
||||
type RwBucket = walletdb.ReadWriteBucket
|
||||
|
||||
// RwCursor represents a bucket cursor that can be positioned at the start or
|
||||
// end of the bucket's key/value pairs and iterate over pairs in the bucket.
|
||||
// This abstraction is allowed to perform both database read and write
|
||||
// operations.
|
||||
type RwCursor = walletdb.ReadWriteCursor
|
||||
|
||||
// RwTx represents a database transaction that can be used for both reads and
|
||||
// writes. When only reads are necessary, consider using a RTx instead.
|
||||
type RwTx = walletdb.ReadWriteTx
|
||||
|
||||
// ExtendedRTx is an extension to walletdb.ReadTx to allow prefetching of keys.
|
||||
type ExtendedRTx interface {
|
||||
RTx
|
||||
|
||||
// RootBucket returns the "root bucket" which is pseudo bucket used
|
||||
// when prefetching (keys from) top level buckets.
|
||||
RootBucket() RBucket
|
||||
}
|
||||
|
||||
// ExtendedRBucket is an extension to walletdb.ReadBucket to allow prefetching
|
||||
// of all values inside buckets.
|
||||
type ExtendedRBucket interface {
|
||||
RBucket
|
||||
|
||||
// Prefetch will attempt to prefetch all values under a path.
|
||||
Prefetch(paths ...[]string)
|
||||
|
||||
// ForAll is an optimized version of ForEach.
|
||||
//
|
||||
// NOTE: ForAll differs from ForEach in that no additional queries can
|
||||
// be executed within the callback.
|
||||
ForAll(func(k, v []byte) error) error
|
||||
}
|
||||
|
||||
// Prefetch will attempt to prefetch all values under a path from the passed
|
||||
// bucket.
|
||||
func Prefetch(b RBucket, paths ...[]string) {
|
||||
if bucket, ok := b.(ExtendedRBucket); ok {
|
||||
bucket.Prefetch(paths...)
|
||||
}
|
||||
}
|
||||
|
||||
// ForAll is an optimized version of ForEach with the limitation that no
|
||||
// additional queries can be executed within the callback.
|
||||
func ForAll(b RBucket, cb func(k, v []byte) error) error {
|
||||
if bucket, ok := b.(ExtendedRBucket); ok {
|
||||
return bucket.ForAll(cb)
|
||||
}
|
||||
|
||||
return b.ForEach(cb)
|
||||
}
|
||||
|
||||
// RootBucket is a wrapper to ExtendedRTx.RootBucket which does nothing if
|
||||
// the implementation doesn't have ExtendedRTx.
|
||||
func RootBucket(t RTx) RBucket {
|
||||
if tx, ok := t.(ExtendedRTx); ok {
|
||||
return tx.RootBucket()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrBucketNotFound is returned when trying to access a bucket that
|
||||
// has not been created yet.
|
||||
ErrBucketNotFound = walletdb.ErrBucketNotFound
|
||||
|
||||
// ErrBucketExists is returned when creating a bucket that already
|
||||
// exists.
|
||||
ErrBucketExists = walletdb.ErrBucketExists
|
||||
|
||||
// ErrDatabaseNotOpen is returned when a database instance is accessed
|
||||
// before it is opened or after it is closed.
|
||||
ErrDatabaseNotOpen = walletdb.ErrDbNotOpen
|
||||
)
|
||||
22
server/pkg/kvdb/kvdb_etcd.go
Normal file
22
server/pkg/kvdb/kvdb_etcd.go
Normal file
@@ -0,0 +1,22 @@
|
||||
//go:build kvdb_etcd
|
||||
// +build kvdb_etcd
|
||||
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"github.com/ark-network/tools/kvdb/etcd"
|
||||
)
|
||||
|
||||
// EtcdBackend is conditionally set to etcd when the kvdb_etcd build tag is
|
||||
// defined, allowing testing our database code with etcd backend.
|
||||
const EtcdBackend = true
|
||||
|
||||
// GetEtcdTestBackend creates an embedded etcd backend for testing
|
||||
// storig the database at the passed path.
|
||||
func StartEtcdTestBackend(path string, clientPort, peerPort uint16,
|
||||
logFile string) (*etcd.Config, func(), error) {
|
||||
|
||||
return etcd.NewEmbeddedEtcdInstance(
|
||||
path, clientPort, peerPort, logFile,
|
||||
)
|
||||
}
|
||||
23
server/pkg/kvdb/kvdb_no_etcd.go
Normal file
23
server/pkg/kvdb/kvdb_no_etcd.go
Normal file
@@ -0,0 +1,23 @@
|
||||
//go:build !kvdb_etcd
|
||||
// +build !kvdb_etcd
|
||||
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ark-network/tools/kvdb/etcd"
|
||||
)
|
||||
|
||||
// EtcdBackend is conditionally set to false when the kvdb_etcd build tag is not
|
||||
// defined. This will allow testing of other database backends.
|
||||
const EtcdBackend = false
|
||||
|
||||
var errEtcdNotAvailable = fmt.Errorf("etcd backend not available")
|
||||
|
||||
// StartEtcdTestBackend is a stub returning nil, and errEtcdNotAvailable error.
|
||||
func StartEtcdTestBackend(path string, clientPort, peerPort uint16,
|
||||
logFile string) (*etcd.Config, func(), error) {
|
||||
|
||||
return nil, func() {}, errEtcdNotAvailable
|
||||
}
|
||||
9
server/pkg/kvdb/nodebug.go
Normal file
9
server/pkg/kvdb/nodebug.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build !dev
|
||||
// +build !dev
|
||||
|
||||
package kvdb
|
||||
|
||||
const (
|
||||
// Switch off extra debug code.
|
||||
etcdDebug = false
|
||||
)
|
||||
188
server/pkg/kvdb/prefetch_test.go
Normal file
188
server/pkg/kvdb/prefetch_test.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func fetchBucket(t *testing.T, bucket walletdb.ReadBucket) map[string]string {
|
||||
items := make(map[string]string)
|
||||
err := bucket.ForEach(func(k, v []byte) error {
|
||||
if v != nil {
|
||||
items[string(k)] = string(v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func alterBucket(t *testing.T, bucket walletdb.ReadWriteBucket,
|
||||
put map[string]string, remove []string) {
|
||||
|
||||
for k, v := range put {
|
||||
require.NoError(t, bucket.Put([]byte(k), []byte(v)))
|
||||
}
|
||||
|
||||
for _, k := range remove {
|
||||
require.NoError(t, bucket.Delete([]byte(k)))
|
||||
}
|
||||
}
|
||||
|
||||
func prefetchTest(t *testing.T, db walletdb.DB,
|
||||
prefetchAt []bool, put map[string]string, remove []string) {
|
||||
|
||||
prefetch := func(i int, tx walletdb.ReadTx) {
|
||||
require.Less(t, i, len(prefetchAt))
|
||||
if prefetchAt[i] {
|
||||
Prefetch(
|
||||
RootBucket(tx),
|
||||
[]string{"top"}, []string{"top", "bucket"},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
items := map[string]string{
|
||||
"a": "1",
|
||||
"b": "2",
|
||||
"c": "3",
|
||||
"d": "4",
|
||||
"e": "5",
|
||||
}
|
||||
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
top, err := tx.CreateTopLevelBucket([]byte("top"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, top)
|
||||
|
||||
for k, v := range items {
|
||||
require.NoError(t, top.Put([]byte(k), []byte(v)))
|
||||
}
|
||||
|
||||
bucket, err := top.CreateBucket([]byte("bucket"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, bucket)
|
||||
|
||||
for k, v := range items {
|
||||
require.NoError(t, bucket.Put([]byte(k), []byte(v)))
|
||||
}
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
require.NoError(t, err)
|
||||
|
||||
for k, v := range put {
|
||||
items[k] = v
|
||||
}
|
||||
|
||||
for _, k := range remove {
|
||||
delete(items, k)
|
||||
}
|
||||
|
||||
err = Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
prefetch(0, tx)
|
||||
top := tx.ReadWriteBucket([]byte("top"))
|
||||
require.NotNil(t, top)
|
||||
alterBucket(t, top, put, remove)
|
||||
|
||||
prefetch(1, tx)
|
||||
require.Equal(t, items, fetchBucket(t, top))
|
||||
|
||||
prefetch(2, tx)
|
||||
bucket := top.NestedReadWriteBucket([]byte("bucket"))
|
||||
require.NotNil(t, bucket)
|
||||
alterBucket(t, bucket, put, remove)
|
||||
|
||||
prefetch(3, tx)
|
||||
require.Equal(t, items, fetchBucket(t, bucket))
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
return tx.DeleteTopLevelBucket([]byte("top"))
|
||||
}, func() {})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// testPrefetch tests that prefetching buckets works as expected even when the
|
||||
// prefetch happens multiple times and the bucket contents change. Our expectation
|
||||
// is that with or without prefetches, the kvdb layer works accourding to the
|
||||
// interface specification.
|
||||
func testPrefetch(t *testing.T, db walletdb.DB) {
|
||||
tests := []struct {
|
||||
put map[string]string
|
||||
remove []string
|
||||
}{
|
||||
{
|
||||
put: nil,
|
||||
remove: nil,
|
||||
},
|
||||
{
|
||||
put: map[string]string{
|
||||
"a": "a",
|
||||
"aa": "aa",
|
||||
"aaa": "aaa",
|
||||
"x": "x",
|
||||
"y": "y",
|
||||
},
|
||||
remove: nil,
|
||||
},
|
||||
{
|
||||
put: map[string]string{
|
||||
"a": "a",
|
||||
"aa": "aa",
|
||||
"aaa": "aaa",
|
||||
"x": "x",
|
||||
"y": "y",
|
||||
},
|
||||
remove: []string{"a", "c", "d"},
|
||||
},
|
||||
{
|
||||
put: nil,
|
||||
remove: []string{"b", "d"},
|
||||
},
|
||||
}
|
||||
|
||||
prefetchAt := [][]bool{
|
||||
{false, false, false, false},
|
||||
{true, false, false, false},
|
||||
{false, true, false, false},
|
||||
{false, false, true, false},
|
||||
{false, false, false, true},
|
||||
{true, true, false, false},
|
||||
{true, true, true, false},
|
||||
{true, true, true, true},
|
||||
{true, false, true, true},
|
||||
{true, false, false, true},
|
||||
{true, false, true, false},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
test := test
|
||||
|
||||
for j := 0; j < len(prefetchAt); j++ {
|
||||
if !t.Run(
|
||||
fmt.Sprintf("prefetch %d %d", i, j),
|
||||
func(t *testing.T) {
|
||||
prefetchTest(
|
||||
t, db, prefetchAt[j], test.put,
|
||||
test.remove,
|
||||
)
|
||||
}) {
|
||||
|
||||
fmt.Printf("Prefetch test (%d, %d) failed:\n"+
|
||||
"testcase=%v\n prefetch=%v\n",
|
||||
i, j, spew.Sdump(test),
|
||||
spew.Sdump(prefetchAt[j]))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
636
server/pkg/kvdb/readwrite_bucket_test.go
Normal file
636
server/pkg/kvdb/readwrite_bucket_test.go
Normal file
@@ -0,0 +1,636 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testBucketCreation(t *testing.T, db walletdb.DB) {
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
// empty bucket name
|
||||
b, err := tx.CreateTopLevelBucket(nil)
|
||||
require.Error(t, walletdb.ErrBucketNameRequired, err)
|
||||
require.Nil(t, b)
|
||||
|
||||
// empty bucket name
|
||||
b, err = tx.CreateTopLevelBucket([]byte(""))
|
||||
require.Error(t, walletdb.ErrBucketNameRequired, err)
|
||||
require.Nil(t, b)
|
||||
|
||||
// "apple"
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
// Check bucket tx.
|
||||
require.Equal(t, tx, apple.Tx())
|
||||
|
||||
// "apple" already created
|
||||
b, err = tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
// "apple/banana"
|
||||
banana, err := apple.CreateBucket([]byte("banana"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, banana)
|
||||
|
||||
banana, err = apple.CreateBucketIfNotExists([]byte("banana"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, banana)
|
||||
|
||||
// Try creating "apple/banana" again
|
||||
b, err = apple.CreateBucket([]byte("banana"))
|
||||
require.Error(t, walletdb.ErrBucketExists, err)
|
||||
require.Nil(t, b)
|
||||
|
||||
// "apple/mango"
|
||||
mango, err := apple.CreateBucket([]byte("mango"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, mango)
|
||||
|
||||
// "apple/banana/pear"
|
||||
pear, err := banana.CreateBucket([]byte("pear"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, pear)
|
||||
|
||||
// empty bucket
|
||||
require.Nil(t, apple.NestedReadWriteBucket(nil))
|
||||
require.Nil(t, apple.NestedReadWriteBucket([]byte("")))
|
||||
|
||||
// "apple/pear" doesn't exist
|
||||
require.Nil(t, apple.NestedReadWriteBucket([]byte("pear")))
|
||||
|
||||
// "apple/banana" exits
|
||||
require.NotNil(t, apple.NestedReadWriteBucket([]byte("banana")))
|
||||
require.NotNil(t, apple.NestedReadBucket([]byte("banana")))
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func testBucketDeletion(t *testing.T, db walletdb.DB) {
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
// "apple"
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
// "apple/banana"
|
||||
banana, err := apple.CreateBucket([]byte("banana"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, banana)
|
||||
|
||||
kvs := []KV{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}
|
||||
|
||||
for _, kv := range kvs {
|
||||
require.NoError(t, banana.Put([]byte(kv.key), []byte(kv.val)))
|
||||
require.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key)))
|
||||
}
|
||||
|
||||
// Delete a k/v from "apple/banana"
|
||||
require.NoError(t, banana.Delete([]byte("key2")))
|
||||
// Try getting/putting/deleting invalid k/v's.
|
||||
require.Nil(t, banana.Get(nil))
|
||||
require.Error(t, walletdb.ErrKeyRequired, banana.Put(nil, []byte("val")))
|
||||
require.Error(t, walletdb.ErrKeyRequired, banana.Delete(nil))
|
||||
|
||||
// Try deleting a k/v that doesn't exist.
|
||||
require.NoError(t, banana.Delete([]byte("nokey")))
|
||||
|
||||
// "apple/pear"
|
||||
pear, err := apple.CreateBucket([]byte("pear"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, pear)
|
||||
|
||||
// Put some values into "apple/pear"
|
||||
for _, kv := range kvs {
|
||||
require.Nil(t, pear.Put([]byte(kv.key), []byte(kv.val)))
|
||||
require.Equal(t, []byte(kv.val), pear.Get([]byte(kv.key)))
|
||||
}
|
||||
|
||||
// Create nested bucket "apple/pear/cherry"
|
||||
cherry, err := pear.CreateBucket([]byte("cherry"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, cherry)
|
||||
|
||||
// Put some values into "apple/pear/cherry"
|
||||
for _, kv := range kvs {
|
||||
require.NoError(t, cherry.Put([]byte(kv.key), []byte(kv.val)))
|
||||
}
|
||||
|
||||
// Read back values in "apple/pear/cherry" trough a read bucket.
|
||||
cherryReadBucket := pear.NestedReadBucket([]byte("cherry"))
|
||||
for _, kv := range kvs {
|
||||
require.Equal(
|
||||
t, []byte(kv.val),
|
||||
cherryReadBucket.Get([]byte(kv.key)),
|
||||
)
|
||||
}
|
||||
|
||||
// Try deleting some invalid buckets.
|
||||
require.Error(t,
|
||||
walletdb.ErrBucketNameRequired, apple.DeleteNestedBucket(nil),
|
||||
)
|
||||
|
||||
// Try deleting a non existing bucket.
|
||||
require.Error(
|
||||
t,
|
||||
walletdb.ErrBucketNotFound,
|
||||
apple.DeleteNestedBucket([]byte("missing")),
|
||||
)
|
||||
|
||||
// Delete "apple/pear"
|
||||
require.Nil(t, apple.DeleteNestedBucket([]byte("pear")))
|
||||
|
||||
// "apple/pear" deleted
|
||||
require.Nil(t, apple.NestedReadWriteBucket([]byte("pear")))
|
||||
|
||||
// "aple/banana" exists
|
||||
require.NotNil(t, apple.NestedReadWriteBucket([]byte("banana")))
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
type bucketIterator = func(walletdb.ReadWriteBucket,
|
||||
func(key, val []byte) error) error
|
||||
|
||||
func testBucketForEach(t *testing.T, db walletdb.DB) {
|
||||
testBucketIterator(t, db, func(bucket walletdb.ReadWriteBucket,
|
||||
callback func(key, val []byte) error) error {
|
||||
|
||||
return bucket.ForEach(callback)
|
||||
})
|
||||
}
|
||||
|
||||
func testBucketIterator(t *testing.T, db walletdb.DB,
|
||||
iterator bucketIterator) {
|
||||
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
// "apple"
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
// "apple/banana"
|
||||
banana, err := apple.CreateBucket([]byte("banana"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, banana)
|
||||
|
||||
kvs := []KV{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}
|
||||
|
||||
// put some values into "apple" and "apple/banana" too
|
||||
for _, kv := range kvs {
|
||||
require.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val)))
|
||||
require.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key)))
|
||||
|
||||
require.Nil(t, banana.Put([]byte(kv.key), []byte(kv.val)))
|
||||
require.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key)))
|
||||
}
|
||||
|
||||
got := make(map[string]string)
|
||||
err = apple.ForEach(func(key, val []byte) error {
|
||||
got[string(key)] = string(val)
|
||||
return nil
|
||||
})
|
||||
|
||||
expected := map[string]string{
|
||||
"key1": "val1",
|
||||
"key2": "val2",
|
||||
"key3": "val3",
|
||||
"banana": "",
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, got)
|
||||
|
||||
got = make(map[string]string)
|
||||
err = iterator(banana, func(key, val []byte) error {
|
||||
got[string(key)] = string(val)
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
// remove the sub-bucket key
|
||||
delete(expected, "banana")
|
||||
require.Equal(t, expected, got)
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func testBucketForEachWithError(t *testing.T, db walletdb.DB) {
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
// "apple"
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
// "apple/banana"
|
||||
banana, err := apple.CreateBucket([]byte("banana"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, banana)
|
||||
|
||||
// "apple/pear"
|
||||
pear, err := apple.CreateBucket([]byte("pear"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, pear)
|
||||
|
||||
kvs := []KV{{"key1", "val1"}, {"key2", "val2"}}
|
||||
|
||||
// Put some values into "apple" and "apple/banana" too.
|
||||
for _, kv := range kvs {
|
||||
require.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val)))
|
||||
require.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key)))
|
||||
}
|
||||
|
||||
got := make(map[string]string)
|
||||
i := 0
|
||||
// Error while iterating value keys.
|
||||
err = apple.ForEach(func(key, val []byte) error {
|
||||
if i == 2 {
|
||||
return fmt.Errorf("error")
|
||||
}
|
||||
|
||||
got[string(key)] = string(val)
|
||||
i++
|
||||
return nil
|
||||
})
|
||||
|
||||
expected := map[string]string{
|
||||
"banana": "",
|
||||
"key1": "val1",
|
||||
}
|
||||
|
||||
require.Equal(t, expected, got)
|
||||
require.Error(t, err)
|
||||
|
||||
got = make(map[string]string)
|
||||
i = 0
|
||||
// Erro while iterating buckets.
|
||||
err = apple.ForEach(func(key, val []byte) error {
|
||||
if i == 3 {
|
||||
return fmt.Errorf("error")
|
||||
}
|
||||
|
||||
got[string(key)] = string(val)
|
||||
i++
|
||||
return nil
|
||||
})
|
||||
|
||||
expected = map[string]string{
|
||||
"banana": "",
|
||||
"key1": "val1",
|
||||
"key2": "val2",
|
||||
}
|
||||
|
||||
require.Equal(t, expected, got)
|
||||
require.Error(t, err)
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func testBucketSequence(t *testing.T, db walletdb.DB) {
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
banana, err := apple.CreateBucket([]byte("banana"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, banana)
|
||||
|
||||
require.Equal(t, uint64(0), apple.Sequence())
|
||||
require.Equal(t, uint64(0), banana.Sequence())
|
||||
|
||||
require.Nil(t, apple.SetSequence(math.MaxUint64))
|
||||
require.Equal(t, uint64(math.MaxUint64), apple.Sequence())
|
||||
|
||||
for i := uint64(0); i < uint64(5); i++ {
|
||||
s, err := apple.NextSequence()
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, i, s)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
// TestKeyClash tests that one cannot create a bucket if a value with the same
|
||||
// key exists and the same is true in reverse: that a value cannot be put if
|
||||
// a bucket with the same key exists.
|
||||
func testKeyClash(t *testing.T, db walletdb.DB) {
|
||||
// First:
|
||||
// put: /apple/key -> val
|
||||
// create bucket: /apple/banana
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
require.NoError(t, apple.Put([]byte("key"), []byte("val")))
|
||||
|
||||
banana, err := apple.CreateBucket([]byte("banana"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, banana)
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.Nil(t, err)
|
||||
|
||||
// Next try to:
|
||||
// put: /apple/banana -> val => will fail (as /apple/banana is a bucket)
|
||||
// create bucket: /apple/key => will fail (as /apple/key is a value)
|
||||
err = Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
require.Error(t,
|
||||
walletdb.ErrIncompatibleValue,
|
||||
apple.Put([]byte("banana"), []byte("val")),
|
||||
)
|
||||
|
||||
b, err := apple.CreateBucket([]byte("key"))
|
||||
require.Nil(t, b)
|
||||
require.Error(t, walletdb.ErrIncompatibleValue, b)
|
||||
|
||||
b, err = apple.CreateBucketIfNotExists([]byte("key"))
|
||||
require.Nil(t, b)
|
||||
require.Error(t, walletdb.ErrIncompatibleValue, b)
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
// TestBucketCreateDelete tests that creating then deleting then creating a
|
||||
// bucket succeeds.
|
||||
func testBucketCreateDelete(t *testing.T, db walletdb.DB) {
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
banana, err := apple.CreateBucket([]byte("banana"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, banana)
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
apple := tx.ReadWriteBucket([]byte("apple"))
|
||||
require.NotNil(t, apple)
|
||||
require.NoError(t, apple.DeleteNestedBucket([]byte("banana")))
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
apple := tx.ReadWriteBucket([]byte("apple"))
|
||||
require.NotNil(t, apple)
|
||||
require.NoError(t, apple.Put([]byte("banana"), []byte("value")))
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testTopLevelBucketCreation(t *testing.T, db walletdb.DB) {
|
||||
require.NoError(t, Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
// Try to delete all data (there is none).
|
||||
err := tx.DeleteTopLevelBucket([]byte("top"))
|
||||
require.ErrorIs(t, walletdb.ErrBucketNotFound, err)
|
||||
|
||||
// Create top level bucket.
|
||||
top, err := tx.CreateTopLevelBucket([]byte("top"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, top)
|
||||
|
||||
// Create second top level bucket with special characters.
|
||||
top2, err := tx.CreateTopLevelBucket([]byte{1, 2, 3})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, top2)
|
||||
|
||||
top2 = tx.ReadWriteBucket([]byte{1, 2, 3})
|
||||
require.NotNil(t, top2)
|
||||
|
||||
// List top level buckets.
|
||||
var tlKeys [][]byte
|
||||
require.NoError(t, tx.ForEachBucket(func(k []byte) error {
|
||||
tlKeys = append(tlKeys, k)
|
||||
return nil
|
||||
}))
|
||||
require.Equal(t, [][]byte{{1, 2, 3}, []byte("top")}, tlKeys)
|
||||
|
||||
// Create third top level bucket with special uppercase.
|
||||
top3, err := tx.CreateTopLevelBucket([]byte("UpperBucket"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, top3)
|
||||
|
||||
top3 = tx.ReadWriteBucket([]byte("UpperBucket"))
|
||||
require.NotNil(t, top3)
|
||||
|
||||
require.NoError(t, tx.DeleteTopLevelBucket([]byte("top")))
|
||||
require.NoError(t, tx.DeleteTopLevelBucket([]byte{1, 2, 3}))
|
||||
require.NoError(t, tx.DeleteTopLevelBucket([]byte("UpperBucket")))
|
||||
|
||||
tx.ForEachBucket(func(k []byte) error {
|
||||
require.Fail(t, "no top level buckets expected")
|
||||
return nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}, func() {}))
|
||||
}
|
||||
|
||||
func testBucketOperations(t *testing.T, db walletdb.DB) {
|
||||
require.NoError(t, Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
// Create top level bucket.
|
||||
top, err := tx.CreateTopLevelBucket([]byte("top"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, top)
|
||||
|
||||
// Assert that key doesn't exist.
|
||||
require.Nil(t, top.Get([]byte("key")))
|
||||
|
||||
require.NoError(t, top.ForEach(func(k, v []byte) error {
|
||||
require.Fail(t, "unexpected data")
|
||||
return nil
|
||||
}))
|
||||
|
||||
// Put key.
|
||||
require.NoError(t, top.Put([]byte("key"), []byte("val")))
|
||||
require.Equal(t, []byte("val"), top.Get([]byte("key")))
|
||||
|
||||
// Overwrite key.
|
||||
require.NoError(t, top.Put([]byte("key"), []byte("val2")))
|
||||
require.Equal(t, []byte("val2"), top.Get([]byte("key")))
|
||||
|
||||
// Put nil value.
|
||||
require.NoError(t, top.Put([]byte("nilkey"), nil))
|
||||
require.Equal(t, []byte(""), top.Get([]byte("nilkey")))
|
||||
|
||||
// Put empty value.
|
||||
require.NoError(t, top.Put([]byte("nilkey"), []byte{}))
|
||||
require.Equal(t, []byte(""), top.Get([]byte("nilkey")))
|
||||
|
||||
// Try to create bucket with same name as previous key.
|
||||
_, err = top.CreateBucket([]byte("key"))
|
||||
require.ErrorIs(t, err, walletdb.ErrIncompatibleValue)
|
||||
|
||||
_, err = top.CreateBucketIfNotExists([]byte("key"))
|
||||
require.ErrorIs(t, err, walletdb.ErrIncompatibleValue)
|
||||
|
||||
// Create sub-bucket.
|
||||
sub2, err := top.CreateBucket([]byte("sub2"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sub2)
|
||||
|
||||
// Assert that re-creating the bucket fails.
|
||||
_, err = top.CreateBucket([]byte("sub2"))
|
||||
require.ErrorIs(t, err, walletdb.ErrBucketExists)
|
||||
|
||||
// Assert that create-if-not-exists succeeds.
|
||||
_, err = top.CreateBucketIfNotExists([]byte("sub2"))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that fetching the bucket succeeds.
|
||||
sub2 = top.NestedReadWriteBucket([]byte("sub2"))
|
||||
require.NotNil(t, sub2)
|
||||
|
||||
// Try to put key with same name as bucket.
|
||||
require.ErrorIs(t, top.Put([]byte("sub2"), []byte("val")), walletdb.ErrIncompatibleValue)
|
||||
|
||||
// Put key into sub bucket.
|
||||
require.NoError(t, sub2.Put([]byte("subkey"), []byte("subval")))
|
||||
require.Equal(t, []byte("subval"), sub2.Get([]byte("subkey")))
|
||||
|
||||
// Overwrite key in sub bucket.
|
||||
require.NoError(t, sub2.Put([]byte("subkey"), []byte("subval2")))
|
||||
require.Equal(t, []byte("subval2"), sub2.Get([]byte("subkey")))
|
||||
|
||||
// Check for each result.
|
||||
kvs := make(map[string][]byte)
|
||||
require.NoError(t, top.ForEach(func(k, v []byte) error {
|
||||
kvs[string(k)] = v
|
||||
return nil
|
||||
}))
|
||||
require.Equal(t, map[string][]byte{
|
||||
"key": []byte("val2"),
|
||||
"nilkey": []byte(""),
|
||||
"sub2": nil,
|
||||
}, kvs)
|
||||
|
||||
// Delete key.
|
||||
require.NoError(t, top.Delete([]byte("key")))
|
||||
|
||||
// Delete non-existent key.
|
||||
require.NoError(t, top.Delete([]byte("keynonexistent")))
|
||||
|
||||
// Test cursor.
|
||||
cursor := top.ReadWriteCursor()
|
||||
k, v := cursor.First()
|
||||
require.Equal(t, []byte("nilkey"), k)
|
||||
require.Equal(t, []byte(""), v)
|
||||
|
||||
k, v = cursor.Last()
|
||||
require.Equal(t, []byte("sub2"), k)
|
||||
require.Nil(t, v)
|
||||
|
||||
k, v = cursor.Prev()
|
||||
require.Equal(t, []byte("nilkey"), k)
|
||||
require.Equal(t, []byte(""), v)
|
||||
|
||||
k, v = cursor.Prev()
|
||||
require.Nil(t, k)
|
||||
require.Nil(t, v)
|
||||
|
||||
k, v = cursor.Next()
|
||||
require.Equal(t, []byte("sub2"), k)
|
||||
require.Nil(t, v)
|
||||
|
||||
k, v = cursor.Next()
|
||||
require.Nil(t, k)
|
||||
require.Nil(t, v)
|
||||
|
||||
k, v = cursor.Seek([]byte("nilkey"))
|
||||
require.Equal(t, []byte("nilkey"), k)
|
||||
require.Equal(t, []byte(""), v)
|
||||
|
||||
require.NoError(t, sub2.Put([]byte("k1"), []byte("v1")))
|
||||
require.NoError(t, sub2.Put([]byte("k2"), []byte("v2")))
|
||||
require.NoError(t, sub2.Put([]byte("k3"), []byte("v3")))
|
||||
|
||||
cursor = sub2.ReadWriteCursor()
|
||||
cursor.First()
|
||||
for i := 0; i < 4; i++ {
|
||||
require.NoError(t, cursor.Delete())
|
||||
}
|
||||
require.NoError(t, sub2.ForEach(func(k, v []byte) error {
|
||||
require.Fail(t, "unexpected data")
|
||||
return nil
|
||||
}))
|
||||
|
||||
_, err = sub2.CreateBucket([]byte("sub3"))
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, cursor.Delete(), walletdb.ErrIncompatibleValue)
|
||||
|
||||
//Try to delete all data.
|
||||
require.NoError(t, tx.DeleteTopLevelBucket([]byte("top")))
|
||||
require.Nil(t, tx.ReadBucket([]byte("top")))
|
||||
|
||||
return nil
|
||||
}, func() {}))
|
||||
}
|
||||
|
||||
func testSubBucketSequence(t *testing.T, db walletdb.DB) {
|
||||
require.NoError(t, Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
// Create top level bucket.
|
||||
top, err := tx.CreateTopLevelBucket([]byte("top"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, top)
|
||||
|
||||
// Create sub-bucket.
|
||||
sub2, err := top.CreateBucket([]byte("sub2"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sub2)
|
||||
|
||||
// Test sequence.
|
||||
require.Equal(t, uint64(0), top.Sequence())
|
||||
|
||||
require.NoError(t, top.SetSequence(100))
|
||||
require.Equal(t, uint64(100), top.Sequence())
|
||||
|
||||
require.NoError(t, top.SetSequence(101))
|
||||
require.Equal(t, uint64(101), top.Sequence())
|
||||
|
||||
next, err := top.NextSequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(102), next)
|
||||
|
||||
next, err = sub2.NextSequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), next)
|
||||
|
||||
return nil
|
||||
}, func() {}))
|
||||
}
|
||||
316
server/pkg/kvdb/readwrite_cursor_test.go
Normal file
316
server/pkg/kvdb/readwrite_cursor_test.go
Normal file
@@ -0,0 +1,316 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testReadCursorEmptyInterval(t *testing.T, db walletdb.DB) {
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
b, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = View(db, func(tx walletdb.ReadTx) error {
|
||||
b := tx.ReadBucket([]byte("apple"))
|
||||
require.NotNil(t, b)
|
||||
|
||||
cursor := b.ReadCursor()
|
||||
k, v := cursor.First()
|
||||
require.Nil(t, k)
|
||||
require.Nil(t, v)
|
||||
|
||||
k, v = cursor.Next()
|
||||
require.Nil(t, k)
|
||||
require.Nil(t, v)
|
||||
|
||||
k, v = cursor.Last()
|
||||
require.Nil(t, k)
|
||||
require.Nil(t, v)
|
||||
|
||||
k, v = cursor.Prev()
|
||||
require.Nil(t, k)
|
||||
require.Nil(t, v)
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testReadCursorNonEmptyInterval(t *testing.T, db walletdb.DB) {
|
||||
testKeyValues := []KV{
|
||||
{"b", "1"},
|
||||
{"c", "2"},
|
||||
{"da", "3"},
|
||||
{"e", "4"},
|
||||
}
|
||||
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
b, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
for _, kv := range testKeyValues {
|
||||
require.NoError(t, b.Put([]byte(kv.key), []byte(kv.val)))
|
||||
}
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
err = View(db, func(tx walletdb.ReadTx) error {
|
||||
b := tx.ReadBucket([]byte("apple"))
|
||||
require.NotNil(t, b)
|
||||
|
||||
// Iterate from the front.
|
||||
var kvs []KV
|
||||
cursor := b.ReadCursor()
|
||||
k, v := cursor.First()
|
||||
|
||||
for k != nil && v != nil {
|
||||
kvs = append(kvs, KV{string(k), string(v)})
|
||||
k, v = cursor.Next()
|
||||
}
|
||||
require.Equal(t, testKeyValues, kvs)
|
||||
|
||||
// Iterate from the back.
|
||||
kvs = []KV{}
|
||||
k, v = cursor.Last()
|
||||
|
||||
for k != nil && v != nil {
|
||||
kvs = append(kvs, KV{string(k), string(v)})
|
||||
k, v = cursor.Prev()
|
||||
}
|
||||
require.Equal(t, reverseKVs(testKeyValues), kvs)
|
||||
|
||||
// Random access
|
||||
perm := []int{3, 0, 2, 1}
|
||||
for _, i := range perm {
|
||||
k, v := cursor.Seek([]byte(testKeyValues[i].key))
|
||||
require.Equal(t, []byte(testKeyValues[i].key), k)
|
||||
require.Equal(t, []byte(testKeyValues[i].val), v)
|
||||
}
|
||||
|
||||
// Seek to nonexisting key.
|
||||
k, v = cursor.Seek(nil)
|
||||
require.Equal(t, "b", string(k))
|
||||
require.Equal(t, "1", string(v))
|
||||
|
||||
k, v = cursor.Seek([]byte("x"))
|
||||
require.Nil(t, k)
|
||||
require.Nil(t, v)
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testReadWriteCursor(t *testing.T, db walletdb.DB) {
|
||||
testKeyValues := []KV{
|
||||
{"b", "1"},
|
||||
{"c", "2"},
|
||||
{"da", "3"},
|
||||
{"e", "4"},
|
||||
}
|
||||
|
||||
count := len(testKeyValues)
|
||||
|
||||
// Pre-store the first half of the interval.
|
||||
require.NoError(t, Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
b, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
for i := 0; i < count/2; i++ {
|
||||
err = b.Put(
|
||||
[]byte(testKeyValues[i].key),
|
||||
[]byte(testKeyValues[i].val),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return nil
|
||||
}, func() {}))
|
||||
|
||||
err := Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
b := tx.ReadWriteBucket([]byte("apple"))
|
||||
require.NotNil(t, b)
|
||||
|
||||
// Store the second half of the interval.
|
||||
for i := count / 2; i < count; i++ {
|
||||
err := b.Put(
|
||||
[]byte(testKeyValues[i].key),
|
||||
[]byte(testKeyValues[i].val),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
cursor := b.ReadWriteCursor()
|
||||
|
||||
// First on valid interval.
|
||||
fk, fv := cursor.First()
|
||||
require.Equal(t, []byte("b"), fk)
|
||||
require.Equal(t, []byte("1"), fv)
|
||||
|
||||
// Prev(First()) = nil
|
||||
k, v := cursor.Prev()
|
||||
require.Nil(t, k)
|
||||
require.Nil(t, v)
|
||||
|
||||
// Last on valid interval.
|
||||
lk, lv := cursor.Last()
|
||||
require.Equal(t, []byte("e"), lk)
|
||||
require.Equal(t, []byte("4"), lv)
|
||||
|
||||
// Next(Last()) = nil
|
||||
k, v = cursor.Next()
|
||||
require.Nil(t, k)
|
||||
require.Nil(t, v)
|
||||
|
||||
// Delete first item, then add an item before the
|
||||
// deleted one. Check that First/Next will "jump"
|
||||
// over the deleted item and return the new first.
|
||||
_, _ = cursor.First()
|
||||
require.NoError(t, cursor.Delete())
|
||||
require.NoError(t, b.Put([]byte("a"), []byte("0")))
|
||||
fk, fv = cursor.First()
|
||||
|
||||
require.Equal(t, []byte("a"), fk)
|
||||
require.Equal(t, []byte("0"), fv)
|
||||
|
||||
k, v = cursor.Next()
|
||||
require.Equal(t, []byte("c"), k)
|
||||
require.Equal(t, []byte("2"), v)
|
||||
|
||||
// Similarly test that a new end is returned if
|
||||
// the old end is deleted first.
|
||||
_, _ = cursor.Last()
|
||||
require.NoError(t, cursor.Delete())
|
||||
require.NoError(t, b.Put([]byte("f"), []byte("5")))
|
||||
|
||||
lk, lv = cursor.Last()
|
||||
require.Equal(t, []byte("f"), lk)
|
||||
require.Equal(t, []byte("5"), lv)
|
||||
|
||||
k, v = cursor.Prev()
|
||||
require.Equal(t, []byte("da"), k)
|
||||
require.Equal(t, []byte("3"), v)
|
||||
|
||||
// Overwrite k/v in the middle of the interval.
|
||||
require.NoError(t, b.Put([]byte("c"), []byte("3")))
|
||||
k, v = cursor.Prev()
|
||||
require.Equal(t, []byte("c"), k)
|
||||
require.Equal(t, []byte("3"), v)
|
||||
|
||||
// Insert new key/values.
|
||||
require.NoError(t, b.Put([]byte("cx"), []byte("x")))
|
||||
require.NoError(t, b.Put([]byte("cy"), []byte("y")))
|
||||
|
||||
k, v = cursor.Next()
|
||||
require.Equal(t, []byte("cx"), k)
|
||||
require.Equal(t, []byte("x"), v)
|
||||
|
||||
k, v = cursor.Next()
|
||||
require.Equal(t, []byte("cy"), k)
|
||||
require.Equal(t, []byte("y"), v)
|
||||
|
||||
expected := []KV{
|
||||
{"a", "0"},
|
||||
{"c", "3"},
|
||||
{"cx", "x"},
|
||||
{"cy", "y"},
|
||||
{"da", "3"},
|
||||
{"f", "5"},
|
||||
}
|
||||
|
||||
// Iterate from the front.
|
||||
var kvs []KV
|
||||
k, v = cursor.First()
|
||||
|
||||
for k != nil && v != nil {
|
||||
kvs = append(kvs, KV{string(k), string(v)})
|
||||
k, v = cursor.Next()
|
||||
}
|
||||
require.Equal(t, expected, kvs)
|
||||
|
||||
// Iterate from the back.
|
||||
kvs = []KV{}
|
||||
k, v = cursor.Last()
|
||||
|
||||
for k != nil && v != nil {
|
||||
kvs = append(kvs, KV{string(k), string(v)})
|
||||
k, v = cursor.Prev()
|
||||
}
|
||||
require.Equal(t, reverseKVs(expected), kvs)
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// testReadWriteCursorWithBucketAndValue tests that cursors are able to iterate
|
||||
// over both bucket and value keys if both are present in the iterated bucket.
|
||||
func testReadWriteCursorWithBucketAndValue(t *testing.T, db walletdb.DB) {
|
||||
|
||||
// Pre-store the first half of the interval.
|
||||
require.NoError(t, Update(db, func(tx walletdb.ReadWriteTx) error {
|
||||
b, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
require.NoError(t, b.Put([]byte("key"), []byte("val")))
|
||||
|
||||
b1, err := b.CreateBucket([]byte("banana"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b1)
|
||||
|
||||
b2, err := b.CreateBucket([]byte("pear"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b2)
|
||||
|
||||
return nil
|
||||
}, func() {}))
|
||||
|
||||
err := View(db, func(tx walletdb.ReadTx) error {
|
||||
b := tx.ReadBucket([]byte("apple"))
|
||||
require.NotNil(t, b)
|
||||
|
||||
cursor := b.ReadCursor()
|
||||
|
||||
// First on valid interval.
|
||||
k, v := cursor.First()
|
||||
require.Equal(t, []byte("banana"), k)
|
||||
require.Nil(t, v)
|
||||
|
||||
k, v = cursor.Next()
|
||||
require.Equal(t, []byte("key"), k)
|
||||
require.Equal(t, []byte("val"), v)
|
||||
|
||||
k, v = cursor.Last()
|
||||
require.Equal(t, []byte("pear"), k)
|
||||
require.Nil(t, v)
|
||||
|
||||
k, v = cursor.Seek([]byte("k"))
|
||||
require.Equal(t, []byte("key"), k)
|
||||
require.Equal(t, []byte("val"), v)
|
||||
|
||||
k, v = cursor.Seek([]byte("banana"))
|
||||
require.Equal(t, []byte("banana"), k)
|
||||
require.Nil(t, v)
|
||||
|
||||
k, v = cursor.Next()
|
||||
require.Equal(t, []byte("key"), k)
|
||||
require.Equal(t, []byte("val"), v)
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
49
server/pkg/kvdb/readwrite_tx_test.go
Normal file
49
server/pkg/kvdb/readwrite_tx_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testTxManualCommit(t *testing.T, db walletdb.DB) {
|
||||
tx, err := db.BeginReadWriteTx()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, tx)
|
||||
|
||||
committed := false
|
||||
|
||||
tx.OnCommit(func() {
|
||||
committed = true
|
||||
})
|
||||
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, apple)
|
||||
require.NoError(t, apple.Put([]byte("testKey"), []byte("testVal")))
|
||||
|
||||
banana, err := tx.CreateTopLevelBucket([]byte("banana"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, banana)
|
||||
require.NoError(t, banana.Put([]byte("testKey"), []byte("testVal")))
|
||||
require.NoError(t, tx.DeleteTopLevelBucket([]byte("banana")))
|
||||
|
||||
require.NoError(t, tx.Commit())
|
||||
require.True(t, committed)
|
||||
}
|
||||
|
||||
func testTxRollback(t *testing.T, db walletdb.DB) {
|
||||
tx, err := db.BeginReadWriteTx()
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, tx)
|
||||
|
||||
apple, err := tx.CreateTopLevelBucket([]byte("apple"))
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, apple)
|
||||
|
||||
require.NoError(t, apple.Put([]byte("testKey"), []byte("testVal")))
|
||||
|
||||
require.NoError(t, tx.Rollback())
|
||||
require.Error(t, walletdb.ErrTxClosed, tx.Commit())
|
||||
}
|
||||
14
server/pkg/kvdb/test.go
Normal file
14
server/pkg/kvdb/test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package kvdb
|
||||
|
||||
type KV struct {
|
||||
key string
|
||||
val string
|
||||
}
|
||||
|
||||
func reverseKVs(a []KV) []KV {
|
||||
for i, j := 0, len(a)-1; i < j; i, j = i+1, j-1 {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
26
server/pkg/kvdb/test_utils.go
Normal file
26
server/pkg/kvdb/test_utils.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package kvdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// RunTests is a helper function to run the tests in a package with
|
||||
// initialization and tear-down of a test kvdb backend.
|
||||
func RunTests(m *testing.M) {
|
||||
var close func() error
|
||||
|
||||
// os.Exit() does not respect defer statements
|
||||
code := m.Run()
|
||||
|
||||
if close != nil {
|
||||
err := close()
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
os.Exit(code)
|
||||
|
||||
}
|
||||
19
server/pkg/macaroons/LICENSE
Normal file
19
server/pkg/macaroons/LICENSE
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2015-2022 Lightning Labs and The Lightning Network Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
150
server/pkg/macaroons/README.md
Normal file
150
server/pkg/macaroons/README.md
Normal file
@@ -0,0 +1,150 @@
|
||||
# macaroons
|
||||
|
||||
This is a more detailed, technical description of how macaroons work and how
|
||||
authentication and authorization is implemented in `lnd`.
|
||||
|
||||
For a more high-level overview see
|
||||
[macaroons.md in the docs](../docs/macaroons.md).
|
||||
|
||||
## Root key
|
||||
|
||||
At startup, if the option `--no-macaroons` is **not** used, a Bolt DB key/value
|
||||
store named `data/macaroons.db` is created with a bucket named `macrootkeys`.
|
||||
In this DB the following two key/value pairs are stored:
|
||||
|
||||
* Key `0`: the encrypted root key (32 bytes).
|
||||
* If the root key does not exist yet, 32 bytes of pseudo-random data is
|
||||
generated and used.
|
||||
* Key `enckey`: the parameters used to derive a secret encryption key from a
|
||||
passphrase.
|
||||
* The following parameters are stored: `<salt><digest><N><R><P>`
|
||||
* `salt`: 32 byte of random data used as salt for the `scrypt` key
|
||||
derivation.
|
||||
* `digest`: sha256 hashed key derived from the `scrypt` operation. Is used
|
||||
to verify if the password is correct.
|
||||
* `N`, `P`, `R`: Parameters used for the `scrypt` operation.
|
||||
* The root key is symmetrically encrypted with the derived secret key, using
|
||||
the `secretbox` method of the library
|
||||
[btcsuite/golangcrypto](https://github.com/btcsuite/golangcrypto).
|
||||
* If the option `--noseedbackup` is used, then the default passphrase
|
||||
`hello` is used to encrypt the root key.
|
||||
|
||||
## Generated macaroons
|
||||
|
||||
With the root key set up, `lnd` continues with creating three macaroon files:
|
||||
|
||||
* `invoice.macaroon`: Grants read and write access to all invoice related gRPC
|
||||
commands (like generating an address or adding an invoice). Can be used for a
|
||||
web shop application for example. Paying an invoice is not possible, even if
|
||||
the name might suggest it. The permission `offchain` is needed to pay an
|
||||
invoice which is currently only granted in the admin macaroon.
|
||||
* `readonly.macaroon`: Grants read-only access to all gRPC commands. Could be
|
||||
given to a monitoring application for example.
|
||||
* `admin.macaroon`: Grants full read and write access to all gRPC commands.
|
||||
This is used by the `lncli` client.
|
||||
|
||||
These three macaroons all have the location field set to `lnd` and have no
|
||||
conditions/first party caveats or third party caveats set.
|
||||
|
||||
The access restrictions are implemented with a list of entity/action pairs that
|
||||
is mapped to the gRPC functions by the `rpcserver.go`.
|
||||
For example, the permissions for the `invoice.macaroon` looks like this:
|
||||
|
||||
```go
|
||||
// invoicePermissions is a slice of all the entities that allows a user
|
||||
// to only access calls that are related to invoices, so: streaming
|
||||
// RPCs, generating, and listening invoices.
|
||||
invoicePermissions = []bakery.Op{
|
||||
{
|
||||
Entity: "invoices",
|
||||
Action: "read",
|
||||
},
|
||||
{
|
||||
Entity: "invoices",
|
||||
Action: "write",
|
||||
},
|
||||
{
|
||||
Entity: "address",
|
||||
Action: "read",
|
||||
},
|
||||
{
|
||||
Entity: "address",
|
||||
Action: "write",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
## Constraints / First party caveats
|
||||
|
||||
There are currently two constraints implemented that can be used by `lncli` to
|
||||
restrict the macaroon it uses to communicate with the gRPC interface. These can
|
||||
be found in `constraints.go`:
|
||||
|
||||
* `TimeoutConstraint`: Set a timeout in seconds after which the macaroon is no
|
||||
longer valid.
|
||||
This constraint can be set by adding the parameter `--macaroontimeout xy` to
|
||||
the `lncli` command.
|
||||
* `IPLockConstraint`: Locks the macaroon to a specific IP address.
|
||||
This constraint can be set by adding the parameter `--macaroonip a.b.c.d` to
|
||||
the `lncli` command.
|
||||
|
||||
## Bakery
|
||||
|
||||
As of lnd `v0.9.0-beta` there is a macaroon bakery available through gRPC and
|
||||
command line.
|
||||
Users can create their own macaroons with custom permissions if the provided
|
||||
default macaroons (`admin`, `invoice` and `readonly`) are not sufficient.
|
||||
|
||||
For example, a macaroon that is only allowed to manage peers with a default root
|
||||
key `0` would be created with the following command:
|
||||
|
||||
```shell
|
||||
$ lncli bakemacaroon peers:read peers:write
|
||||
```
|
||||
|
||||
For even more fine-grained permission control, it is also possible to specify
|
||||
single RPC method URIs that are allowed to be accessed by a macaroon. This can
|
||||
be achieved by passing `uri:<methodURI>` pairs to `bakemacaroon`, for example:
|
||||
|
||||
```shell
|
||||
$ lncli bakemacaroon uri:/lnrpc.Lightning/GetInfo uri:/verrpc.Versioner/GetVersion
|
||||
```
|
||||
|
||||
The macaroon created by this call would only be allowed to call the `GetInfo` and
|
||||
`GetVersion` methods instead of all methods that have similar permissions (like
|
||||
`info:read` for example).
|
||||
|
||||
A full list of available entity/action pairs and RPC method URIs can be queried
|
||||
by using the `lncli listpermissions` command.
|
||||
|
||||
### Upgrading from v0.8.0-beta or earlier
|
||||
|
||||
Users upgrading from a version prior to `v0.9.0-beta` might get a `permission
|
||||
denied ` error when trying to use the `lncli bakemacaroon` command.
|
||||
This is because the bakery requires a new permission (`macaroon/generate`) to
|
||||
access.
|
||||
Users can obtain a new `admin.macaroon` that contains this permission by
|
||||
removing all three default macaroons (`admin.macaroon`, `invoice.macaroon` and
|
||||
`readonly.macaroon`, **NOT** the `macaroons.db`!) from their
|
||||
`data/chain/<chain>/<network>/` directory inside the lnd data directory and
|
||||
restarting lnd.
|
||||
|
||||
|
||||
## Root key rotation
|
||||
|
||||
To manage the root keys used by macaroons, there are `listmacaroonids` and
|
||||
`deletemacaroonid` available through gPRC and command line.
|
||||
Users can view a list of all macaroon root key IDs that are in use using:
|
||||
|
||||
```shell
|
||||
$ lncli listmacaroonids
|
||||
```
|
||||
|
||||
And remove a specific macaroon root key ID using command:
|
||||
|
||||
```shell
|
||||
$ lncli deletemacaroonid root_key_id
|
||||
```
|
||||
|
||||
Be careful with the `deletemacaroonid` command as when a root key is deleted,
|
||||
**all the macaroons created from it are invalidated**.
|
||||
53
server/pkg/macaroons/auth.go
Normal file
53
server/pkg/macaroons/auth.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package macaroons
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
macaroon "gopkg.in/macaroon.v2"
|
||||
)
|
||||
|
||||
// MacaroonCredential wraps a macaroon to implement the
|
||||
// credentials.PerRPCCredentials interface.
|
||||
type MacaroonCredential struct {
|
||||
*macaroon.Macaroon
|
||||
}
|
||||
|
||||
// RequireTransportSecurity implements the PerRPCCredentials interface.
|
||||
func (m MacaroonCredential) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetRequestMetadata implements the PerRPCCredentials interface. This method
|
||||
// is required in order to pass the wrapped macaroon into the gRPC context.
|
||||
// With this, the macaroon will be available within the request handling scope
|
||||
// of the ultimate gRPC server implementation.
|
||||
func (m MacaroonCredential) GetRequestMetadata(ctx context.Context,
|
||||
uri ...string) (map[string]string, error) {
|
||||
|
||||
macBytes, err := m.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
md := make(map[string]string)
|
||||
md["macaroon"] = hex.EncodeToString(macBytes)
|
||||
return md, nil
|
||||
}
|
||||
|
||||
// NewMacaroonCredential returns a copy of the passed macaroon wrapped in a
|
||||
// MacaroonCredential struct which implements PerRPCCredentials.
|
||||
func NewMacaroonCredential(m *macaroon.Macaroon) (MacaroonCredential, error) {
|
||||
ms := MacaroonCredential{}
|
||||
|
||||
// The macaroon library's Clone() method has a subtle bug that doesn't
|
||||
// correctly clone all caveats. We need to use our own, safe clone
|
||||
// function instead.
|
||||
var err error
|
||||
ms.Macaroon, err = SafeCopyMacaroon(m)
|
||||
if err != nil {
|
||||
return ms, err
|
||||
}
|
||||
|
||||
return ms, nil
|
||||
}
|
||||
250
server/pkg/macaroons/constraints.go
Normal file
250
server/pkg/macaroons/constraints.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package macaroons
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/peer"
|
||||
"gopkg.in/macaroon-bakery.v2/bakery/checkers"
|
||||
macaroon "gopkg.in/macaroon.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// CondLndCustom is the first party caveat condition name that is used
|
||||
// for all custom caveats in lnd. Every custom caveat entry will be
|
||||
// encoded as the string
|
||||
// "lnd-custom <custom-caveat-name> <custom-caveat-condition>"
|
||||
// in the serialized macaroon. We choose a single space as the delimiter
|
||||
// between the because that is also used by the macaroon bakery library.
|
||||
CondLndCustom = "lnd-custom"
|
||||
)
|
||||
|
||||
// CustomCaveatAcceptor is an interface that contains a single method for
|
||||
// checking whether a macaroon with the given custom caveat name should be
|
||||
// accepted or not.
|
||||
type CustomCaveatAcceptor interface {
|
||||
// CustomCaveatSupported returns nil if a macaroon with the given custom
|
||||
// caveat name can be validated by any component in lnd (for example an
|
||||
// RPC middleware). If no component is registered to handle the given
|
||||
// custom caveat then an error must be returned. This method only checks
|
||||
// the availability of a validating component, not the validity of the
|
||||
// macaroon itself.
|
||||
CustomCaveatSupported(customCaveatName string) error
|
||||
}
|
||||
|
||||
// Constraint type adds a layer of indirection over macaroon caveats.
|
||||
type Constraint func(*macaroon.Macaroon) error
|
||||
|
||||
// Checker type adds a layer of indirection over macaroon checkers. A Checker
|
||||
// returns the name of the checker and the checker function; these are used to
|
||||
// register the function with the bakery service's compound checker.
|
||||
type Checker func() (string, checkers.Func)
|
||||
|
||||
// AddConstraints returns new derived macaroon by applying every passed
|
||||
// constraint and tightening its restrictions.
|
||||
func AddConstraints(mac *macaroon.Macaroon,
|
||||
cs ...Constraint) (*macaroon.Macaroon, error) {
|
||||
|
||||
// The macaroon library's Clone() method has a subtle bug that doesn't
|
||||
// correctly clone all caveats. We need to use our own, safe clone
|
||||
// function instead.
|
||||
newMac, err := SafeCopyMacaroon(mac)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, constraint := range cs {
|
||||
if err := constraint(newMac); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return newMac, nil
|
||||
}
|
||||
|
||||
// Each *Constraint function is a functional option, which takes a pointer
|
||||
// to the macaroon and adds another restriction to it. For each *Constraint,
|
||||
// the corresponding *Checker is provided if not provided by default.
|
||||
|
||||
// TimeoutConstraint restricts the lifetime of the macaroon
|
||||
// to the amount of seconds given.
|
||||
func TimeoutConstraint(seconds int64) func(*macaroon.Macaroon) error {
|
||||
return func(mac *macaroon.Macaroon) error {
|
||||
macaroonTimeout := time.Duration(seconds)
|
||||
requestTimeout := time.Now().Add(time.Second * macaroonTimeout)
|
||||
caveat := checkers.TimeBeforeCaveat(requestTimeout)
|
||||
return mac.AddFirstPartyCaveat([]byte(caveat.Condition))
|
||||
}
|
||||
}
|
||||
|
||||
// IPLockConstraint locks macaroon to a specific IP address.
|
||||
// If address is an empty string, this constraint does nothing to
|
||||
// accommodate default value's desired behavior.
|
||||
func IPLockConstraint(ipAddr string) func(*macaroon.Macaroon) error {
|
||||
return func(mac *macaroon.Macaroon) error {
|
||||
if ipAddr != "" {
|
||||
macaroonIPAddr := net.ParseIP(ipAddr)
|
||||
if macaroonIPAddr == nil {
|
||||
return fmt.Errorf("incorrect macaroon IP-" +
|
||||
"lock address")
|
||||
}
|
||||
caveat := checkers.Condition("ipaddr",
|
||||
macaroonIPAddr.String())
|
||||
return mac.AddFirstPartyCaveat([]byte(caveat))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// IPLockChecker accepts client IP from the validation context and compares it
|
||||
// with IP locked in the macaroon. It is of the `Checker` type.
|
||||
func IPLockChecker() (string, checkers.Func) {
|
||||
return "ipaddr", func(ctx context.Context, cond, arg string) error {
|
||||
// Get peer info and extract IP address from it for macaroon
|
||||
// check.
|
||||
pr, ok := peer.FromContext(ctx)
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to get peer info from context")
|
||||
}
|
||||
peerAddr, _, err := net.SplitHostPort(pr.Addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse peer address")
|
||||
}
|
||||
|
||||
if !net.ParseIP(arg).Equal(net.ParseIP(peerAddr)) {
|
||||
msg := "macaroon locked to different IP address"
|
||||
return fmt.Errorf(msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// CustomConstraint returns a function that adds a custom caveat condition to
|
||||
// a macaroon.
|
||||
func CustomConstraint(name, condition string) func(*macaroon.Macaroon) error {
|
||||
return func(mac *macaroon.Macaroon) error {
|
||||
// We rely on a name being set for the interception, so don't
|
||||
// allow creating a caveat without a name in the first place.
|
||||
if name == "" {
|
||||
return fmt.Errorf("name cannot be empty")
|
||||
}
|
||||
|
||||
// The inner (custom) condition is optional.
|
||||
outerCondition := fmt.Sprintf("%s %s", name, condition)
|
||||
if condition == "" {
|
||||
outerCondition = name
|
||||
}
|
||||
|
||||
caveat := checkers.Condition(CondLndCustom, outerCondition)
|
||||
return mac.AddFirstPartyCaveat([]byte(caveat))
|
||||
}
|
||||
}
|
||||
|
||||
// CustomChecker returns a Checker function that is used by the macaroon bakery
|
||||
// library to check whether a custom caveat is supported by lnd in general or
|
||||
// not. Support in this context means: An additional gRPC interceptor was set up
|
||||
// that validates the content (=condition) of the custom caveat. If such an
|
||||
// interceptor is in place then the acceptor should return a nil error. If no
|
||||
// interceptor exists for the custom caveat in the macaroon of a request context
|
||||
// then a non-nil error should be returned and the macaroon is rejected as a
|
||||
// whole.
|
||||
func CustomChecker(acceptor CustomCaveatAcceptor) Checker {
|
||||
// We return the general name of all lnd custom macaroons and a function
|
||||
// that splits the outer condition to extract the name of the custom
|
||||
// condition and the condition itself. In the bakery library that's used
|
||||
// here, a caveat always has the following form:
|
||||
//
|
||||
// <condition-name> <condition-value>
|
||||
//
|
||||
// Because a checker function needs to be bound to the condition name we
|
||||
// have to choose a static name for the first part ("lnd-custom", see
|
||||
// CondLndCustom. Otherwise we'd need to register a new Checker function
|
||||
// for each custom caveat that's registered. To allow for a generic
|
||||
// custom caveat handling, we just add another layer and expand the
|
||||
// initial <condition-value> into
|
||||
//
|
||||
// "<custom-condition-name> <custom-condition-value>"
|
||||
//
|
||||
// The full caveat string entry of a macaroon that uses this generic
|
||||
// mechanism would therefore look like this:
|
||||
//
|
||||
// "lnd-custom <custom-condition-name> <custom-condition-value>"
|
||||
checker := func(_ context.Context, _, outerCondition string) error {
|
||||
if outerCondition != strings.TrimSpace(outerCondition) {
|
||||
return fmt.Errorf("unexpected white space found in " +
|
||||
"caveat condition")
|
||||
}
|
||||
if outerCondition == "" {
|
||||
return fmt.Errorf("expected custom caveat, got empty " +
|
||||
"string")
|
||||
}
|
||||
|
||||
// The condition part of the original caveat is now name and
|
||||
// condition of the custom caveat (we add a layer of conditions
|
||||
// to allow one custom checker to work for all custom lnd
|
||||
// conditions that implement arbitrary business logic).
|
||||
parts := strings.Split(outerCondition, " ")
|
||||
customCaveatName := parts[0]
|
||||
|
||||
return acceptor.CustomCaveatSupported(customCaveatName)
|
||||
}
|
||||
|
||||
return func() (string, checkers.Func) {
|
||||
return CondLndCustom, checker
|
||||
}
|
||||
}
|
||||
|
||||
// HasCustomCaveat tests if the given macaroon has a custom caveat with the
|
||||
// given custom caveat name.
|
||||
func HasCustomCaveat(mac *macaroon.Macaroon, customCaveatName string) bool {
|
||||
if mac == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
caveatPrefix := []byte(fmt.Sprintf(
|
||||
"%s %s", CondLndCustom, customCaveatName,
|
||||
))
|
||||
for _, caveat := range mac.Caveats() {
|
||||
if bytes.HasPrefix(caveat.Id, caveatPrefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetCustomCaveatCondition returns the custom caveat condition for the given
|
||||
// custom caveat name from the given macaroon.
|
||||
func GetCustomCaveatCondition(mac *macaroon.Macaroon,
|
||||
customCaveatName string) string {
|
||||
|
||||
if mac == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
caveatPrefix := []byte(fmt.Sprintf(
|
||||
"%s %s ", CondLndCustom, customCaveatName,
|
||||
))
|
||||
for _, caveat := range mac.Caveats() {
|
||||
// The caveat id has a format of
|
||||
// "lnd-custom [custom-caveat-name] [custom-caveat-condition]"
|
||||
// and we only want the condition part. If we match the prefix
|
||||
// part we return the condition that comes after the prefix.
|
||||
if bytes.HasPrefix(caveat.Id, caveatPrefix) {
|
||||
caveatSplit := strings.SplitN(
|
||||
string(caveat.Id),
|
||||
string(caveatPrefix),
|
||||
2,
|
||||
)
|
||||
if len(caveatSplit) == 2 {
|
||||
return caveatSplit[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We didn't find a condition for the given custom caveat name.
|
||||
return ""
|
||||
}
|
||||
159
server/pkg/macaroons/constraints_test.go
Normal file
159
server/pkg/macaroons/constraints_test.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package macaroons_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ark-network/tools/macaroons"
|
||||
"github.com/stretchr/testify/require"
|
||||
macaroon "gopkg.in/macaroon.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
testRootKey = []byte("dummyRootKey")
|
||||
testID = []byte("dummyId")
|
||||
testLocation = "lnd"
|
||||
testVersion = macaroon.LatestVersion
|
||||
expectedTimeCaveatSubstring = fmt.Sprintf("time-before %d", time.Now().Year())
|
||||
)
|
||||
|
||||
func createDummyMacaroon(t *testing.T) *macaroon.Macaroon {
|
||||
dummyMacaroon, err := macaroon.New(
|
||||
testRootKey, testID, testLocation, testVersion,
|
||||
)
|
||||
require.NoError(t, err, "Error creating initial macaroon")
|
||||
return dummyMacaroon
|
||||
}
|
||||
|
||||
// TestAddConstraints tests that constraints can be added to an existing
|
||||
// macaroon and therefore tighten its restrictions.
|
||||
func TestAddConstraints(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// We need a dummy macaroon to start with. Create one without
|
||||
// a bakery, because we mock everything anyway.
|
||||
initialMac := createDummyMacaroon(t)
|
||||
|
||||
// Now add a constraint and make sure we have a cloned macaroon
|
||||
// with the constraint applied instead of a mutated initial one.
|
||||
newMac, err := macaroons.AddConstraints(
|
||||
initialMac, macaroons.TimeoutConstraint(1),
|
||||
)
|
||||
require.NoError(t, err, "Error adding constraint")
|
||||
if &newMac == &initialMac {
|
||||
t.Fatalf("Initial macaroon has been changed, something " +
|
||||
"went wrong!")
|
||||
}
|
||||
|
||||
// Finally, test that the constraint has been added.
|
||||
if len(initialMac.Caveats()) == len(newMac.Caveats()) {
|
||||
t.Fatalf("No caveat has been added to the macaroon when " +
|
||||
"constraint was applied")
|
||||
}
|
||||
}
|
||||
|
||||
// TestTimeoutConstraint tests that a caveat for the lifetime of
|
||||
// a macaroon is created.
|
||||
func TestTimeoutConstraint(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Get a configured version of the constraint function.
|
||||
constraintFunc := macaroons.TimeoutConstraint(3)
|
||||
|
||||
// Now we need a dummy macaroon that we can apply the constraint
|
||||
// function to.
|
||||
testMacaroon := createDummyMacaroon(t)
|
||||
err := constraintFunc(testMacaroon)
|
||||
require.NoError(t, err, "Error applying timeout constraint")
|
||||
|
||||
// Finally, check that the created caveat has an
|
||||
// acceptable value.
|
||||
if !strings.HasPrefix(
|
||||
string(testMacaroon.Caveats()[0].Id),
|
||||
expectedTimeCaveatSubstring,
|
||||
) {
|
||||
|
||||
t.Fatalf("Added caveat '%s' does not meet the expectations!",
|
||||
testMacaroon.Caveats()[0].Id)
|
||||
}
|
||||
}
|
||||
|
||||
// TestTimeoutConstraint tests that a caveat for the lifetime of
|
||||
// a macaroon is created.
|
||||
func TestIpLockConstraint(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Get a configured version of the constraint function.
|
||||
constraintFunc := macaroons.IPLockConstraint("127.0.0.1")
|
||||
|
||||
// Now we need a dummy macaroon that we can apply the constraint
|
||||
// function to.
|
||||
testMacaroon := createDummyMacaroon(t)
|
||||
err := constraintFunc(testMacaroon)
|
||||
require.NoError(t, err, "Error applying timeout constraint")
|
||||
|
||||
// Finally, check that the created caveat has an
|
||||
// acceptable value.
|
||||
if string(testMacaroon.Caveats()[0].Id) != "ipaddr 127.0.0.1" {
|
||||
t.Fatalf("Added caveat '%s' does not meet the expectations!",
|
||||
testMacaroon.Caveats()[0].Id)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIPLockBadIP tests that an IP constraint cannot be added if the
|
||||
// provided string is not a valid IP address.
|
||||
func TestIPLockBadIP(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
constraintFunc := macaroons.IPLockConstraint("127.0.0/800")
|
||||
testMacaroon := createDummyMacaroon(t)
|
||||
err := constraintFunc(testMacaroon)
|
||||
if err == nil {
|
||||
t.Fatalf("IPLockConstraint with bad IP should fail.")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCustomConstraint tests that a custom constraint with a name and value can
|
||||
// be added to a macaroon.
|
||||
func TestCustomConstraint(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Test a custom caveat with a value first.
|
||||
constraintFunc := macaroons.CustomConstraint("unit-test", "test-value")
|
||||
testMacaroon := createDummyMacaroon(t)
|
||||
require.NoError(t, constraintFunc(testMacaroon))
|
||||
|
||||
require.Equal(
|
||||
t, []byte("lnd-custom unit-test test-value"),
|
||||
testMacaroon.Caveats()[0].Id,
|
||||
)
|
||||
require.True(t, macaroons.HasCustomCaveat(testMacaroon, "unit-test"))
|
||||
require.False(t, macaroons.HasCustomCaveat(testMacaroon, "test-value"))
|
||||
require.False(t, macaroons.HasCustomCaveat(testMacaroon, "something"))
|
||||
require.False(t, macaroons.HasCustomCaveat(nil, "foo"))
|
||||
|
||||
customCaveatCondition := macaroons.GetCustomCaveatCondition(
|
||||
testMacaroon, "unit-test",
|
||||
)
|
||||
require.Equal(t, customCaveatCondition, "test-value")
|
||||
|
||||
// Custom caveats don't necessarily need a value, just the name is fine
|
||||
// too to create a tagged macaroon.
|
||||
constraintFunc = macaroons.CustomConstraint("unit-test", "")
|
||||
testMacaroon = createDummyMacaroon(t)
|
||||
require.NoError(t, constraintFunc(testMacaroon))
|
||||
|
||||
require.Equal(
|
||||
t, []byte("lnd-custom unit-test"), testMacaroon.Caveats()[0].Id,
|
||||
)
|
||||
require.True(t, macaroons.HasCustomCaveat(testMacaroon, "unit-test"))
|
||||
require.False(t, macaroons.HasCustomCaveat(testMacaroon, "test-value"))
|
||||
require.False(t, macaroons.HasCustomCaveat(testMacaroon, "something"))
|
||||
|
||||
customCaveatCondition = macaroons.GetCustomCaveatCondition(
|
||||
testMacaroon, "unit-test",
|
||||
)
|
||||
require.Equal(t, customCaveatCondition, "")
|
||||
}
|
||||
44
server/pkg/macaroons/context.go
Normal file
44
server/pkg/macaroons/context.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package macaroons
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// RootKeyIDContextKey is the key to get rootKeyID from context.
|
||||
RootKeyIDContextKey = contextKey{"rootkeyid"}
|
||||
|
||||
// ErrContextRootKeyID is used when the supplied context doesn't have
|
||||
// a root key ID.
|
||||
ErrContextRootKeyID = fmt.Errorf("failed to read root key ID " +
|
||||
"from context")
|
||||
)
|
||||
|
||||
// contextKey is the type we use to identify values in the context.
|
||||
type contextKey struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// ContextWithRootKeyID passes the root key ID value to context.
|
||||
func ContextWithRootKeyID(ctx context.Context,
|
||||
value interface{}) context.Context {
|
||||
|
||||
return context.WithValue(ctx, RootKeyIDContextKey, value)
|
||||
}
|
||||
|
||||
// RootKeyIDFromContext retrieves the root key ID from context using the key
|
||||
// RootKeyIDContextKey.
|
||||
func RootKeyIDFromContext(ctx context.Context) ([]byte, error) {
|
||||
id, ok := ctx.Value(RootKeyIDContextKey).([]byte)
|
||||
if !ok {
|
||||
return nil, ErrContextRootKeyID
|
||||
}
|
||||
|
||||
// Check that the id is not empty.
|
||||
if len(id) == 0 {
|
||||
return nil, ErrMissingRootKeyID
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
99
server/pkg/macaroons/go.mod
Normal file
99
server/pkg/macaroons/go.mod
Normal file
@@ -0,0 +1,99 @@
|
||||
module github.com/ark-network/tools/macaroons
|
||||
|
||||
go 1.22.4
|
||||
|
||||
replace github.com/ark-network/tools/kvdb => ../kvdb
|
||||
|
||||
require (
|
||||
github.com/ark-network/tools/kvdb v0.0.0-00010101000000-000000000000
|
||||
github.com/btcsuite/btcwallet v0.16.9
|
||||
github.com/btcsuite/btcwallet/walletdb v1.4.2
|
||||
github.com/stretchr/testify v1.9.0
|
||||
google.golang.org/grpc v1.65.0
|
||||
gopkg.in/macaroon-bakery.v2 v2.3.0
|
||||
gopkg.in/macaroon.v2 v2.1.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.23.4 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.2 // indirect
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-macaroon-bakery/macaroonpb v1.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.2.2 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/lightninglabs/neutrino/cache v1.1.0 // indirect
|
||||
github.com/lightningnetwork/lnd/healthcheck v1.2.5 // indirect
|
||||
github.com/lightningnetwork/lnd/ticker v1.1.0 // indirect
|
||||
github.com/lightningnetwork/lnd/tlv v1.0.2 // indirect
|
||||
github.com/lightningnetwork/lnd/tor v1.0.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/miekg/dns v1.1.43 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.11.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.26.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/rogpeppe/fastuuid v1.2.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/soheilhy/cmux v0.1.5 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
|
||||
go.etcd.io/bbolt v1.3.10 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.15 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.15 // indirect
|
||||
go.etcd.io/etcd/client/v2 v2.305.15 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.15 // indirect
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.15 // indirect
|
||||
go.etcd.io/etcd/raft/v3 v3.5.15 // indirect
|
||||
go.etcd.io/etcd/server/v3 v3.5.15 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect
|
||||
go.opentelemetry.io/otel v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.20.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.17.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
gopkg.in/errgo.v1 v1.0.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
492
server/pkg/macaroons/go.sum
Normal file
492
server/pkg/macaroons/go.sum
Normal file
@@ -0,0 +1,492 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
|
||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
|
||||
github.com/btcsuite/btcd v0.22.0-beta.0.20220207191057-4dc4ff7963b4/go.mod h1:7alexyj/lHlOtr2PJK7L/+HDJZpcGDn/pAU98r7DY08=
|
||||
github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
|
||||
github.com/btcsuite/btcd v0.23.4 h1:IzV6qqkfwbItOS/sg/aDfPDsjPP8twrCOE2R93hxMlQ=
|
||||
github.com/btcsuite/btcd v0.23.4/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.2 h1:5uxe5YjoCq+JeOpg0gZSNHuFgeogrocBYxvg6w9sAgc=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.2/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8=
|
||||
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcwallet v0.16.9 h1:hLAzEJvsiSn+r6j374G7ThnrYD/toa+Lv7l1Rm6+0oM=
|
||||
github.com/btcsuite/btcwallet v0.16.9/go.mod h1:T3DjEAMZYIqQ28l+ixlB6DX4mFJXCX8Pzz+yACQcLsc=
|
||||
github.com/btcsuite/btcwallet/walletdb v1.4.2 h1:zwZZ+zaHo4mK+FAN6KeK85S3oOm+92x2avsHvFAhVBE=
|
||||
github.com/btcsuite/btcwallet/walletdb v1.4.2/go.mod h1:7ZQ+BvOEre90YT7eSq8bLoxTsgXidUzA/mqbRS114CQ=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
|
||||
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k=
|
||||
github.com/frankban/quicktest v1.1.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k=
|
||||
github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
|
||||
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-macaroon-bakery/macaroonpb v1.0.0 h1:It9exBaRMZ9iix1iJ6gwzfwsDE6ExNuwtAJ9e09v6XE=
|
||||
github.com/go-macaroon-bakery/macaroonpb v1.0.0/go.mod h1:UzrGOcbiwTXISFP2XDLDPjfhMINZa+fX/7A2lMd31zc=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
|
||||
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/juju/mgotest v1.0.1/go.mod h1:vTaDufYul+Ps8D7bgseHjq87X8eu0ivlKLp9mVc/Bfc=
|
||||
github.com/juju/postgrestest v1.1.0/go.mod h1:/n17Y2T6iFozzXwSCO0JYJ5gSiz2caEtSwAjh/uLXDM=
|
||||
github.com/juju/qthttptest v0.0.1/go.mod h1://LCf/Ls22/rPw2u1yWukUJvYtfPY4nYpWUl2uZhryo=
|
||||
github.com/juju/schema v1.0.0/go.mod h1:Y+ThzXpUJ0E7NYYocAbuvJ7vTivXfrof/IfRPq/0abI=
|
||||
github.com/juju/webbrowser v0.0.0-20160309143629-54b8c57083b4/go.mod h1:G6PCelgkM6cuvyD10iYJsjLBsSadVXtJ+nBxFAxE2BU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lightninglabs/neutrino/cache v1.1.0 h1:szZIhVabiQIsGzJjhvo76sj05Au+zVotj2M34EquGME=
|
||||
github.com/lightninglabs/neutrino/cache v1.1.0/go.mod h1:XJNcgdOw1LQnanGjw8Vj44CvguYA25IMKjWFZczwZuo=
|
||||
github.com/lightningnetwork/lnd/healthcheck v1.2.5 h1:aTJy5xeBpcWgRtW/PGBDe+LMQEmNm/HQewlQx2jt7OA=
|
||||
github.com/lightningnetwork/lnd/healthcheck v1.2.5/go.mod h1:G7Tst2tVvWo7cx6mSBEToQC5L1XOGxzZTPB29g9Rv2I=
|
||||
github.com/lightningnetwork/lnd/ticker v1.1.0 h1:ShoBiRP3pIxZHaETndfQ5kEe+S4NdAY1hiX7YbZ4QE4=
|
||||
github.com/lightningnetwork/lnd/ticker v1.1.0/go.mod h1:ubqbSVCn6RlE0LazXuBr7/Zi6QT0uQo++OgIRBxQUrk=
|
||||
github.com/lightningnetwork/lnd/tlv v1.0.2 h1:LG7H3Uw/mHYGnEeHRPg+STavAH+UsFvuBflD0PzcYFQ=
|
||||
github.com/lightningnetwork/lnd/tlv v1.0.2/go.mod h1:fICAfsqk1IOsC1J7G9IdsWX1EqWRMqEDCNxZJSKr9C4=
|
||||
github.com/lightningnetwork/lnd/tor v1.0.0 h1:wvEc7I+Y7IOtPglVP3cVBbYhiVhc7uTd7cMF9gQRzwA=
|
||||
github.com/lightningnetwork/lnd/tor v1.0.0/go.mod h1:RDtaAdwfAm+ONuPYwUhNIH1RAvKPv+75lHPOegUcz64=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
|
||||
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
|
||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
|
||||
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
|
||||
go.etcd.io/etcd/api/v3 v3.5.15 h1:3KpLJir1ZEBrYuV2v+Twaa/e2MdDCEZ/70H+lzEiwsk=
|
||||
go.etcd.io/etcd/api/v3 v3.5.15/go.mod h1:N9EhGzXq58WuMllgH9ZvnEr7SI9pS0k0+DHZezGp7jM=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.15 h1:fo0HpWz/KlHGMCC+YejpiCmyWDEuIpnTDzpJLB5fWlA=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.15/go.mod h1:mXDI4NAOwEiszrHCb0aqfAYNCrZP4e9hRca3d1YK8EU=
|
||||
go.etcd.io/etcd/client/v2 v2.305.15 h1:VG2xbf8Vz1KJh65Ar2V5eDmfkp1bpzkSEHlhJM3usp8=
|
||||
go.etcd.io/etcd/client/v2 v2.305.15/go.mod h1:Ad5dRjPVb/n5yXgAWQ/hXzuXXkBk0Y658ocuXYaUU48=
|
||||
go.etcd.io/etcd/client/v3 v3.5.15 h1:23M0eY4Fd/inNv1ZfU3AxrbbOdW79r9V9Rl62Nm6ip4=
|
||||
go.etcd.io/etcd/client/v3 v3.5.15/go.mod h1:CLSJxrYjvLtHsrPKsy7LmZEE+DK2ktfd2bN4RhBMwlU=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.15 h1:/Iu6Sr3iYaAjy++8sIDoZW9/EfhcwLZwd4FOZX2mMOU=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.15/go.mod h1:e3Acf298sPFmTCGTrnGvkClEw9RYIyPtNzi1XM8rets=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.15 h1:jOA2HJF7zb3wy8H/pL13e8geWqkEa/kUs0waUggZC0I=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.15/go.mod h1:k3r7P4seEiUcgxOPLp+mloJWV3Q4QLPGNvy/OgC8OtM=
|
||||
go.etcd.io/etcd/server/v3 v3.5.15 h1:x35jrWnZgsRwMsFsUJIUdT1bvzIz1B+29HjMfRYVN/E=
|
||||
go.etcd.io/etcd/server/v3 v3.5.15/go.mod h1:l9jX9oa/iuArjqz0RNX/TDbc70dLXxRZo/nmPucrpFo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
|
||||
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
|
||||
go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
|
||||
go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA=
|
||||
go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM=
|
||||
go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM=
|
||||
go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0=
|
||||
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
|
||||
go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20150829230318-ea47fc708ee3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
|
||||
golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA=
|
||||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk=
|
||||
gopkg.in/errgo.v1 v1.0.1 h1:oQFRXzZ7CkBGdm1XZm/EbQYaYNNEElNBOd09M6cqNso=
|
||||
gopkg.in/errgo.v1 v1.0.1/go.mod h1:3NjfXwocQRYAPTq4/fzX+CwUhPRcR/azYRhj8G+LqMo=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/httprequest.v1 v1.2.0/go.mod h1:T61ZUaJLpMnzvoJDO03ZD8yRXD4nZzBeDoW5e9sffjg=
|
||||
gopkg.in/juju/environschema.v1 v1.0.0/go.mod h1:WTgU3KXKCVoO9bMmG/4KHzoaRvLeoxfjArpgd1MGWFA=
|
||||
gopkg.in/macaroon-bakery.v2 v2.3.0 h1:b40knPgPTke1QLTE8BSYeH7+R/hiIozB1A8CTLYN0Ic=
|
||||
gopkg.in/macaroon-bakery.v2 v2.3.0/go.mod h1:/8YhtPARXeRzbpEPLmRB66+gQE8/pzBBkWwg7Vz/guc=
|
||||
gopkg.in/macaroon.v2 v2.1.0 h1:HZcsjBCzq9t0eBPMKqTN/uSN6JOm78ZJ2INbqcBQOUI=
|
||||
gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
11
server/pkg/macaroons/security.go
Normal file
11
server/pkg/macaroons/security.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package macaroons
|
||||
|
||||
import "github.com/btcsuite/btcwallet/snacl"
|
||||
|
||||
var (
|
||||
// Below are the default scrypt parameters that are used when creating
|
||||
// the encryption key for the macaroon database with snacl.NewSecretKey.
|
||||
scryptN = snacl.DefaultN
|
||||
scryptR = snacl.DefaultR
|
||||
scryptP = snacl.DefaultP
|
||||
)
|
||||
14
server/pkg/macaroons/security_integration.go
Normal file
14
server/pkg/macaroons/security_integration.go
Normal file
@@ -0,0 +1,14 @@
|
||||
//go:build integration
|
||||
|
||||
package macaroons
|
||||
|
||||
import "github.com/btcsuite/btcwallet/waddrmgr"
|
||||
|
||||
func init() {
|
||||
// Below are the reduced scrypt parameters that are used when creating
|
||||
// the encryption key for the macaroon database with snacl.NewSecretKey.
|
||||
// We use very low values for our itest/rpctest to speed things up.
|
||||
scryptN = waddrmgr.FastScryptOptions.N
|
||||
scryptR = waddrmgr.FastScryptOptions.R
|
||||
scryptP = waddrmgr.FastScryptOptions.P
|
||||
}
|
||||
12
server/pkg/macaroons/security_test.go
Normal file
12
server/pkg/macaroons/security_test.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package macaroons
|
||||
|
||||
import "github.com/btcsuite/btcwallet/waddrmgr"
|
||||
|
||||
func init() {
|
||||
// Below are the reduced scrypt parameters that are used when creating
|
||||
// the encryption key for the macaroon database with snacl.NewSecretKey.
|
||||
// We use very low values for our itest/rpctest to speed things up.
|
||||
scryptN = waddrmgr.FastScryptOptions.N
|
||||
scryptR = waddrmgr.FastScryptOptions.R
|
||||
scryptP = waddrmgr.FastScryptOptions.P
|
||||
}
|
||||
399
server/pkg/macaroons/service.go
Normal file
399
server/pkg/macaroons/service.go
Normal file
@@ -0,0 +1,399 @@
|
||||
package macaroons
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
"gopkg.in/macaroon-bakery.v2/bakery"
|
||||
"gopkg.in/macaroon-bakery.v2/bakery/checkers"
|
||||
macaroon "gopkg.in/macaroon.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMissingRootKeyID specifies the root key ID is missing.
|
||||
ErrMissingRootKeyID = fmt.Errorf("missing root key ID")
|
||||
|
||||
// ErrDeletionForbidden is used when attempting to delete the
|
||||
// DefaultRootKeyID or the encryptedKeyID.
|
||||
ErrDeletionForbidden = fmt.Errorf("the specified ID cannot be deleted")
|
||||
|
||||
// PermissionEntityCustomURI is a special entity name for a permission
|
||||
// that does not describe an entity:action pair but instead specifies a
|
||||
// specific URI that needs to be granted access to. This can be used for
|
||||
// more fine-grained permissions where a macaroon only grants access to
|
||||
// certain methods instead of a whole list of methods that define the
|
||||
// same entity:action pairs. For example: uri:/lnrpc.Lightning/GetInfo
|
||||
// only gives access to the GetInfo call.
|
||||
PermissionEntityCustomURI = "uri"
|
||||
|
||||
// ErrUnknownVersion is returned when a macaroon is of an unknown
|
||||
// is presented.
|
||||
ErrUnknownVersion = fmt.Errorf("unknown macaroon version")
|
||||
|
||||
// ErrInvalidID is returned when a macaroon ID is invalid.
|
||||
ErrInvalidID = fmt.Errorf("invalid ID")
|
||||
)
|
||||
|
||||
// MacaroonValidator is an interface type that can check if macaroons are valid.
|
||||
type MacaroonValidator interface {
|
||||
// ValidateMacaroon extracts the macaroon from the context's gRPC
|
||||
// metadata, checks its signature, makes sure all specified permissions
|
||||
// for the called method are contained within and finally ensures all
|
||||
// caveat conditions are met. A non-nil error is returned if any of the
|
||||
// checks fail.
|
||||
ValidateMacaroon(ctx context.Context,
|
||||
requiredPermissions []bakery.Op, fullMethod string) error
|
||||
}
|
||||
|
||||
// ExtendedRootKeyStore is an interface augments the existing
|
||||
// macaroons.RootKeyStorage interface by adding a number of additional utility
|
||||
// methods such as encrypting and decrypting the root key given a password.
|
||||
type ExtendedRootKeyStore interface {
|
||||
bakery.RootKeyStore
|
||||
|
||||
// Close closes the RKS and zeros out any in-memory encryption keys.
|
||||
Close() error
|
||||
|
||||
// CreateUnlock calls the underlying root key store's CreateUnlock and
|
||||
// returns the result.
|
||||
CreateUnlock(password *[]byte) error
|
||||
|
||||
// ListMacaroonIDs returns all the root key ID values except the value
|
||||
// of encryptedKeyID.
|
||||
ListMacaroonIDs(ctxt context.Context) ([][]byte, error)
|
||||
|
||||
// DeleteMacaroonID removes one specific root key ID. If the root key
|
||||
// ID is found and deleted, it will be returned.
|
||||
DeleteMacaroonID(ctxt context.Context, rootKeyID []byte) ([]byte, error)
|
||||
|
||||
// ChangePassword calls the underlying root key store's ChangePassword
|
||||
// and returns the result.
|
||||
ChangePassword(oldPw, newPw []byte) error
|
||||
|
||||
// GenerateNewRootKey calls the underlying root key store's
|
||||
// GenerateNewRootKey and returns the result.
|
||||
GenerateNewRootKey() error
|
||||
|
||||
// SetRootKey calls the underlying root key store's SetRootKey and
|
||||
// returns the result.
|
||||
SetRootKey(rootKey []byte) error
|
||||
}
|
||||
|
||||
// Service encapsulates bakery.Bakery and adds a Close() method that zeroes the
|
||||
// root key service encryption keys, as well as utility methods to validate a
|
||||
// macaroon against the bakery and gRPC middleware for macaroon-based auth.
|
||||
type Service struct {
|
||||
bakery.Bakery
|
||||
|
||||
rks bakery.RootKeyStore
|
||||
|
||||
// ExternalValidators is a map between an absolute gRPC URIs and the
|
||||
// corresponding external macaroon validator to be used for that URI.
|
||||
// If no external validator for an URI is specified, the service will
|
||||
// use the internal validator.
|
||||
ExternalValidators map[string]MacaroonValidator
|
||||
|
||||
// StatelessInit denotes if the service was initialized in the stateless
|
||||
// mode where no macaroon files should be created on disk.
|
||||
StatelessInit bool
|
||||
}
|
||||
|
||||
// NewService returns a service backed by the macaroon DB backend. The `checks`
|
||||
// argument can be any of the `Checker` type functions defined in this package,
|
||||
// or a custom checker if desired. This constructor prevents double-registration
|
||||
// of checkers to prevent panics, so listing the same checker more than once is
|
||||
// not harmful. Default checkers, such as those for `allow`, `time-before`,
|
||||
// `declared`, and `error` caveats are registered automatically and don't need
|
||||
// to be added.
|
||||
func NewService(keyStore bakery.RootKeyStore, location string,
|
||||
statelessInit bool, checks ...Checker) (*Service, error) {
|
||||
|
||||
macaroonParams := bakery.BakeryParams{
|
||||
Location: location,
|
||||
RootKeyStore: keyStore,
|
||||
// No third-party caveat support for now.
|
||||
// TODO(aakselrod): Add third-party caveat support.
|
||||
Locator: nil,
|
||||
Key: nil,
|
||||
}
|
||||
|
||||
svc := bakery.New(macaroonParams)
|
||||
|
||||
// Register all custom caveat checkers with the bakery's checker.
|
||||
// TODO(aakselrod): Add more checks as required.
|
||||
checker := svc.Checker.FirstPartyCaveatChecker.(*checkers.Checker)
|
||||
for _, check := range checks {
|
||||
cond, fun := check()
|
||||
if !isRegistered(checker, cond) {
|
||||
checker.Register(cond, "std", fun)
|
||||
}
|
||||
}
|
||||
|
||||
return &Service{
|
||||
Bakery: *svc,
|
||||
rks: keyStore,
|
||||
ExternalValidators: make(map[string]MacaroonValidator),
|
||||
StatelessInit: statelessInit,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// isRegistered checks to see if the required checker has already been
|
||||
// registered in order to avoid a panic caused by double registration.
|
||||
func isRegistered(c *checkers.Checker, name string) bool {
|
||||
if c == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, info := range c.Info() {
|
||||
if info.Name == name &&
|
||||
info.Prefix == "" &&
|
||||
info.Namespace == "std" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// RegisterExternalValidator registers a custom, external macaroon validator for
|
||||
// the specified absolute gRPC URI. That validator is then fully responsible to
|
||||
// make sure any macaroon passed for a request to that URI is valid and
|
||||
// satisfies all conditions.
|
||||
func (svc *Service) RegisterExternalValidator(fullMethod string,
|
||||
validator MacaroonValidator) error {
|
||||
|
||||
if validator == nil {
|
||||
return fmt.Errorf("validator cannot be nil")
|
||||
}
|
||||
|
||||
_, ok := svc.ExternalValidators[fullMethod]
|
||||
if ok {
|
||||
return fmt.Errorf("external validator for method %s already "+
|
||||
"registered", fullMethod)
|
||||
}
|
||||
|
||||
svc.ExternalValidators[fullMethod] = validator
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateMacaroon validates the capabilities of a given request given a
|
||||
// bakery service, context, and uri. Within the passed context.Context, we
|
||||
// expect a macaroon to be encoded as request metadata using the key
|
||||
// "macaroon".
|
||||
func (svc *Service) ValidateMacaroon(ctx context.Context,
|
||||
requiredPermissions []bakery.Op, fullMethod string) error {
|
||||
|
||||
// Get macaroon bytes from context and unmarshal into macaroon.
|
||||
macHex, err := RawMacaroonFromContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// With the macaroon obtained, we'll now decode the hex-string encoding.
|
||||
macBytes, err := hex.DecodeString(macHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return svc.CheckMacAuth(
|
||||
ctx, macBytes, requiredPermissions, fullMethod,
|
||||
)
|
||||
}
|
||||
|
||||
// CheckMacAuth checks that the macaroon is not disobeying any caveats and is
|
||||
// authorized to perform the operation the user wants to perform.
|
||||
func (svc *Service) CheckMacAuth(ctx context.Context, macBytes []byte,
|
||||
requiredPermissions []bakery.Op, fullMethod string) error {
|
||||
|
||||
// With the macaroon obtained, we'll now unmarshal it from binary into
|
||||
// its concrete struct representation.
|
||||
mac := &macaroon.Macaroon{}
|
||||
err := mac.UnmarshalBinary(macBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure that the macaroon is using the exact same version as we
|
||||
// expect. In the future, we can relax this check to phase in new
|
||||
// versions.
|
||||
if mac.Version() != macaroon.V2 {
|
||||
return fmt.Errorf("%w: %v", ErrUnknownVersion,
|
||||
mac.Version())
|
||||
}
|
||||
|
||||
// Run a similar version check on the ID used for the macaroon as well.
|
||||
const minIDLength = 1
|
||||
if len(mac.Id()) < minIDLength {
|
||||
return ErrInvalidID
|
||||
}
|
||||
if mac.Id()[0] != byte(bakery.Version3) {
|
||||
return ErrInvalidID
|
||||
}
|
||||
|
||||
// Check the method being called against the permitted operation, the
|
||||
// expiration time and IP address and return the result.
|
||||
authChecker := svc.Checker.Auth(macaroon.Slice{mac})
|
||||
_, err = authChecker.Allow(ctx, requiredPermissions...)
|
||||
|
||||
// If the macaroon contains broad permissions and checks out, we're
|
||||
// done.
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// To also allow the special permission of "uri:<FullMethod>" to be a
|
||||
// valid permission, we need to check it manually in case there is no
|
||||
// broader scope permission defined.
|
||||
_, err = authChecker.Allow(ctx, bakery.Op{
|
||||
Entity: PermissionEntityCustomURI,
|
||||
Action: fullMethod,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Close closes the database that underlies the RootKeyStore and zeroes the
|
||||
// encryption keys.
|
||||
func (svc *Service) Close() error {
|
||||
if boltRKS, ok := svc.rks.(ExtendedRootKeyStore); ok {
|
||||
return boltRKS.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateUnlock calls the underlying root key store's CreateUnlock and returns
|
||||
// the result.
|
||||
func (svc *Service) CreateUnlock(password *[]byte) error {
|
||||
if boltRKS, ok := svc.rks.(ExtendedRootKeyStore); ok {
|
||||
return boltRKS.CreateUnlock(password)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewMacaroon wraps around the function Oven.NewMacaroon with the defaults,
|
||||
// - version is always bakery.LatestVersion;
|
||||
// - caveats is always nil.
|
||||
//
|
||||
// In addition, it takes a rootKeyID parameter, and puts it into the context.
|
||||
// The context is passed through Oven.NewMacaroon(), in which calls the function
|
||||
// RootKey(), that reads the context for rootKeyID.
|
||||
func (svc *Service) NewMacaroon(
|
||||
ctx context.Context, rootKeyID []byte,
|
||||
ops ...bakery.Op) (*bakery.Macaroon, error) {
|
||||
|
||||
// Check rootKeyID is not called with nil or empty bytes. We want the
|
||||
// caller to be aware the value of root key ID used, so we won't replace
|
||||
// it with the DefaultRootKeyID if not specified.
|
||||
if len(rootKeyID) == 0 {
|
||||
return nil, ErrMissingRootKeyID
|
||||
}
|
||||
|
||||
// Pass the root key ID to context.
|
||||
ctx = ContextWithRootKeyID(ctx, rootKeyID)
|
||||
|
||||
return svc.Oven.NewMacaroon(ctx, bakery.LatestVersion, nil, ops...)
|
||||
}
|
||||
|
||||
// ListMacaroonIDs returns all the root key ID values except the value of
|
||||
// encryptedKeyID.
|
||||
func (svc *Service) ListMacaroonIDs(ctxt context.Context) ([][]byte, error) {
|
||||
if boltRKS, ok := svc.rks.(ExtendedRootKeyStore); ok {
|
||||
return boltRKS.ListMacaroonIDs(ctxt)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// DeleteMacaroonID removes one specific root key ID. If the root key ID is
|
||||
// found and deleted, it will be returned.
|
||||
func (svc *Service) DeleteMacaroonID(ctxt context.Context,
|
||||
rootKeyID []byte) ([]byte, error) {
|
||||
|
||||
if boltRKS, ok := svc.rks.(ExtendedRootKeyStore); ok {
|
||||
return boltRKS.DeleteMacaroonID(ctxt, rootKeyID)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GenerateNewRootKey calls the underlying root key store's GenerateNewRootKey
|
||||
// and returns the result.
|
||||
func (svc *Service) GenerateNewRootKey() error {
|
||||
if boltRKS, ok := svc.rks.(ExtendedRootKeyStore); ok {
|
||||
return boltRKS.GenerateNewRootKey()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRootKey calls the underlying root key store's SetRootKey and returns the
|
||||
// result.
|
||||
func (svc *Service) SetRootKey(rootKey []byte) error {
|
||||
if boltRKS, ok := svc.rks.(ExtendedRootKeyStore); ok {
|
||||
return boltRKS.SetRootKey(rootKey)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChangePassword calls the underlying root key store's ChangePassword and
|
||||
// returns the result.
|
||||
func (svc *Service) ChangePassword(oldPw, newPw []byte) error {
|
||||
if boltRKS, ok := svc.rks.(ExtendedRootKeyStore); ok {
|
||||
return boltRKS.ChangePassword(oldPw, newPw)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// bakeMacaroon creates a new macaroon with newest version and the given
|
||||
// permissions then returns it binary serialized.
|
||||
func (svc *Service) BakeMacaroon(
|
||||
ctx context.Context, permissions []bakery.Op,
|
||||
) ([]byte, error) {
|
||||
mac, err := svc.NewMacaroon(
|
||||
ctx, DefaultRootKeyID, permissions...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mac.M().MarshalBinary()
|
||||
}
|
||||
|
||||
// RawMacaroonFromContext is a helper function that extracts a raw macaroon
|
||||
// from the given incoming gRPC request context.
|
||||
func RawMacaroonFromContext(ctx context.Context) (string, error) {
|
||||
// Get macaroon bytes from context and unmarshal into macaroon.
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unable to get metadata from context")
|
||||
}
|
||||
if len(md["macaroon"]) != 1 {
|
||||
return "", fmt.Errorf("expected 1 macaroon, got %d",
|
||||
len(md["macaroon"]))
|
||||
}
|
||||
|
||||
return md["macaroon"][0], nil
|
||||
}
|
||||
|
||||
// SafeCopyMacaroon creates a copy of a macaroon that is safe to be used and
|
||||
// modified. This is necessary because the macaroon library's own Clone() method
|
||||
// is unsafe for certain edge cases, resulting in both the cloned and the
|
||||
// original macaroons to be modified.
|
||||
func SafeCopyMacaroon(mac *macaroon.Macaroon) (*macaroon.Macaroon, error) {
|
||||
macBytes, err := mac.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newMac := &macaroon.Macaroon{}
|
||||
if err := newMac.UnmarshalBinary(macBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newMac, nil
|
||||
}
|
||||
362
server/pkg/macaroons/service_test.go
Normal file
362
server/pkg/macaroons/service_test.go
Normal file
@@ -0,0 +1,362 @@
|
||||
package macaroons_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/ark-network/tools/kvdb"
|
||||
"github.com/ark-network/tools/macaroons"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"gopkg.in/macaroon-bakery.v2/bakery"
|
||||
"gopkg.in/macaroon-bakery.v2/bakery/checkers"
|
||||
macaroon "gopkg.in/macaroon.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
testOperation = bakery.Op{
|
||||
Entity: "testEntity",
|
||||
Action: "read",
|
||||
}
|
||||
testOperationURI = bakery.Op{
|
||||
Entity: macaroons.PermissionEntityCustomURI,
|
||||
Action: "SomeMethod",
|
||||
}
|
||||
defaultPw = []byte("hello")
|
||||
)
|
||||
|
||||
// setupTestRootKeyStorage creates a dummy root key storage by
|
||||
// creating a temporary macaroons.db and initializing it with the
|
||||
// default password of 'hello'. Only the path to the temporary
|
||||
// DB file is returned, because the service will open the file
|
||||
// and read the store on its own.
|
||||
func setupTestRootKeyStorage(t *testing.T) kvdb.Backend {
|
||||
db, err := kvdb.Create(
|
||||
kvdb.BoltBackendName, path.Join(t.TempDir(), "macaroons.db"), true,
|
||||
kvdb.DefaultDBTimeout,
|
||||
)
|
||||
require.NoError(t, err, "Error opening store DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
|
||||
store, err := macaroons.NewRootKeyStorage(db)
|
||||
require.NoError(t, err, "Error creating root key store")
|
||||
|
||||
err = store.CreateUnlock(&defaultPw)
|
||||
require.NoError(t, store.Close())
|
||||
require.NoError(t, err, "error creating unlock")
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
// TestNewService tests the creation of the macaroon service.
|
||||
func TestNewService(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// First, initialize a dummy DB file with a store that the service
|
||||
// can read from. Make sure the file is removed in the end.
|
||||
db := setupTestRootKeyStorage(t)
|
||||
|
||||
rootKeyStore, err := macaroons.NewRootKeyStorage(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second, create the new service instance, unlock it and pass in a
|
||||
// checker that we expect it to add to the bakery.
|
||||
service, err := macaroons.NewService(
|
||||
rootKeyStore, "lnd", false, macaroons.IPLockChecker,
|
||||
)
|
||||
require.NoError(t, err, "Error creating new service")
|
||||
defer service.Close()
|
||||
err = service.CreateUnlock(&defaultPw)
|
||||
require.NoError(t, err, "Error unlocking root key storage")
|
||||
|
||||
// Third, check if the created service can bake macaroons.
|
||||
_, err = service.NewMacaroon(context.TODO(), nil, testOperation)
|
||||
if err != macaroons.ErrMissingRootKeyID {
|
||||
t.Fatalf("Received %v instead of ErrMissingRootKeyID", err)
|
||||
}
|
||||
|
||||
macaroon, err := service.NewMacaroon(
|
||||
context.TODO(), macaroons.DefaultRootKeyID, testOperation,
|
||||
)
|
||||
require.NoError(t, err, "Error creating macaroon from service")
|
||||
if macaroon.Namespace().String() != "std:" {
|
||||
t.Fatalf("The created macaroon has an invalid namespace: %s",
|
||||
macaroon.Namespace().String())
|
||||
}
|
||||
|
||||
// Finally, check if the service has been initialized correctly and
|
||||
// the checker has been added.
|
||||
var checkerFound = false
|
||||
checker := service.Checker.FirstPartyCaveatChecker.(*checkers.Checker)
|
||||
for _, info := range checker.Info() {
|
||||
if info.Name == "ipaddr" &&
|
||||
info.Prefix == "" &&
|
||||
info.Namespace == "std" {
|
||||
checkerFound = true
|
||||
}
|
||||
}
|
||||
if !checkerFound {
|
||||
t.Fatalf("Checker '%s' not found in service.", "ipaddr")
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidateMacaroon tests the validation of a macaroon that is in an
|
||||
// incoming context.
|
||||
func TestValidateMacaroon(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// First, initialize the service and unlock it.
|
||||
db := setupTestRootKeyStorage(t)
|
||||
rootKeyStore, err := macaroons.NewRootKeyStorage(db)
|
||||
require.NoError(t, err)
|
||||
service, err := macaroons.NewService(
|
||||
rootKeyStore, "lnd", false, macaroons.IPLockChecker,
|
||||
)
|
||||
require.NoError(t, err, "Error creating new service")
|
||||
defer service.Close()
|
||||
|
||||
err = service.CreateUnlock(&defaultPw)
|
||||
require.NoError(t, err, "Error unlocking root key storage")
|
||||
|
||||
// Then, create a new macaroon that we can serialize.
|
||||
macaroon, err := service.NewMacaroon(
|
||||
context.TODO(), macaroons.DefaultRootKeyID, testOperation,
|
||||
testOperationURI,
|
||||
)
|
||||
require.NoError(t, err, "Error creating macaroon from service")
|
||||
macaroonBinary, err := macaroon.M().MarshalBinary()
|
||||
require.NoError(t, err, "Error serializing macaroon")
|
||||
|
||||
// Because the macaroons are always passed in a context, we need to
|
||||
// mock one that has just the serialized macaroon as a value.
|
||||
md := metadata.New(map[string]string{
|
||||
"macaroon": hex.EncodeToString(macaroonBinary),
|
||||
})
|
||||
mockContext := metadata.NewIncomingContext(context.Background(), md)
|
||||
|
||||
// Finally, validate the macaroon against the required permissions.
|
||||
err = service.ValidateMacaroon(
|
||||
mockContext, []bakery.Op{testOperation}, "FooMethod",
|
||||
)
|
||||
require.NoError(t, err, "Error validating the macaroon")
|
||||
|
||||
// If the macaroon has the method specific URI permission, the list of
|
||||
// required entity/action pairs is irrelevant.
|
||||
err = service.ValidateMacaroon(
|
||||
mockContext, []bakery.Op{{Entity: "irrelevant"}}, "SomeMethod",
|
||||
)
|
||||
require.NoError(t, err, "Error validating the macaroon")
|
||||
}
|
||||
|
||||
// TestListMacaroonIDs checks that ListMacaroonIDs returns the expected result.
|
||||
func TestListMacaroonIDs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// First, initialize a dummy DB file with a store that the service
|
||||
// can read from. Make sure the file is removed in the end.
|
||||
db := setupTestRootKeyStorage(t)
|
||||
|
||||
// Second, create the new service instance, unlock it and pass in a
|
||||
// checker that we expect it to add to the bakery.
|
||||
rootKeyStore, err := macaroons.NewRootKeyStorage(db)
|
||||
require.NoError(t, err)
|
||||
service, err := macaroons.NewService(
|
||||
rootKeyStore, "lnd", false, macaroons.IPLockChecker,
|
||||
)
|
||||
require.NoError(t, err, "Error creating new service")
|
||||
defer service.Close()
|
||||
|
||||
err = service.CreateUnlock(&defaultPw)
|
||||
require.NoError(t, err, "Error unlocking root key storage")
|
||||
|
||||
// Third, make 3 new macaroons with different root key IDs.
|
||||
expectedIDs := [][]byte{{1}, {2}, {3}}
|
||||
for _, v := range expectedIDs {
|
||||
_, err := service.NewMacaroon(context.TODO(), v, testOperation)
|
||||
require.NoError(t, err, "Error creating macaroon from service")
|
||||
}
|
||||
|
||||
// Finally, check that calling List return the expected values.
|
||||
ids, _ := service.ListMacaroonIDs(context.TODO())
|
||||
require.Equal(t, expectedIDs, ids, "root key IDs mismatch")
|
||||
}
|
||||
|
||||
// TestDeleteMacaroonID removes the specific root key ID.
|
||||
func TestDeleteMacaroonID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctxb := context.Background()
|
||||
|
||||
// First, initialize a dummy DB file with a store that the service
|
||||
// can read from. Make sure the file is removed in the end.
|
||||
db := setupTestRootKeyStorage(t)
|
||||
|
||||
// Second, create the new service instance, unlock it and pass in a
|
||||
// checker that we expect it to add to the bakery.
|
||||
rootKeyStore, err := macaroons.NewRootKeyStorage(db)
|
||||
require.NoError(t, err)
|
||||
service, err := macaroons.NewService(
|
||||
rootKeyStore, "lnd", false, macaroons.IPLockChecker,
|
||||
)
|
||||
require.NoError(t, err, "Error creating new service")
|
||||
defer service.Close()
|
||||
|
||||
err = service.CreateUnlock(&defaultPw)
|
||||
require.NoError(t, err, "Error unlocking root key storage")
|
||||
|
||||
// Third, checks that removing encryptedKeyID returns an error.
|
||||
encryptedKeyID := []byte("enckey")
|
||||
_, err = service.DeleteMacaroonID(ctxb, encryptedKeyID)
|
||||
require.Equal(t, macaroons.ErrDeletionForbidden, err)
|
||||
|
||||
// Fourth, checks that removing DefaultKeyID returns an error.
|
||||
_, err = service.DeleteMacaroonID(ctxb, macaroons.DefaultRootKeyID)
|
||||
require.Equal(t, macaroons.ErrDeletionForbidden, err)
|
||||
|
||||
// Fifth, checks that removing empty key id returns an error.
|
||||
_, err = service.DeleteMacaroonID(ctxb, []byte{})
|
||||
require.Equal(t, macaroons.ErrMissingRootKeyID, err)
|
||||
|
||||
// Sixth, checks that removing a non-existed key id returns nil.
|
||||
nonExistedID := []byte("test-non-existed")
|
||||
deletedID, err := service.DeleteMacaroonID(ctxb, nonExistedID)
|
||||
require.NoError(t, err, "deleting macaroon ID got an error")
|
||||
require.Nil(t, deletedID, "deleting non-existed ID should return nil")
|
||||
|
||||
// Seventh, make 3 new macaroons with different root key IDs, and delete
|
||||
// one.
|
||||
expectedIDs := [][]byte{{1}, {2}, {3}}
|
||||
for _, v := range expectedIDs {
|
||||
_, err := service.NewMacaroon(ctxb, v, testOperation)
|
||||
require.NoError(t, err, "Error creating macaroon from service")
|
||||
}
|
||||
deletedID, err = service.DeleteMacaroonID(ctxb, expectedIDs[0])
|
||||
require.NoError(t, err, "deleting macaroon ID got an error")
|
||||
|
||||
// Finally, check that the ID is deleted.
|
||||
require.Equal(t, expectedIDs[0], deletedID, "expected ID to be removed")
|
||||
ids, _ := service.ListMacaroonIDs(ctxb)
|
||||
require.Equal(t, expectedIDs[1:], ids, "root key IDs mismatch")
|
||||
}
|
||||
|
||||
// TestCloneMacaroons tests that macaroons can be cloned correctly and that
|
||||
// modifications to the copy don't affect the original.
|
||||
func TestCloneMacaroons(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Get a configured version of the constraint function.
|
||||
constraintFunc := macaroons.TimeoutConstraint(3)
|
||||
|
||||
// Now we need a dummy macaroon that we can apply the constraint
|
||||
// function to.
|
||||
testMacaroon := createDummyMacaroon(t)
|
||||
err := constraintFunc(testMacaroon)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the caveat has an empty location.
|
||||
require.Equal(
|
||||
t, "", testMacaroon.Caveats()[0].Location,
|
||||
"expected caveat location to be empty, found: %s",
|
||||
testMacaroon.Caveats()[0].Location,
|
||||
)
|
||||
|
||||
// Make a copy of the macaroon.
|
||||
newMacCred, err := macaroons.NewMacaroonCredential(testMacaroon)
|
||||
require.NoError(t, err)
|
||||
|
||||
newMac := newMacCred.Macaroon
|
||||
require.Equal(
|
||||
t, "", newMac.Caveats()[0].Location,
|
||||
"expected new caveat location to be empty, found: %s",
|
||||
newMac.Caveats()[0].Location,
|
||||
)
|
||||
|
||||
// They should be deep equal as well.
|
||||
testMacaroonBytes, err := testMacaroon.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
newMacBytes, err := newMac.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testMacaroonBytes, newMacBytes)
|
||||
|
||||
// Modify the caveat location on the old macaroon.
|
||||
testMacaroon.Caveats()[0].Location = "mars"
|
||||
|
||||
// The old macaroon's caveat location should be changed.
|
||||
require.Equal(
|
||||
t, "mars", testMacaroon.Caveats()[0].Location,
|
||||
"expected caveat location to be empty, found: %s",
|
||||
testMacaroon.Caveats()[0].Location,
|
||||
)
|
||||
|
||||
// The new macaroon's caveat location should stay untouched.
|
||||
require.Equal(
|
||||
t, "", newMac.Caveats()[0].Location,
|
||||
"expected new caveat location to be empty, found: %s",
|
||||
newMac.Caveats()[0].Location,
|
||||
)
|
||||
}
|
||||
|
||||
// TestMacaroonVersionDecode tests that we'll reject macaroons with an unknown
|
||||
// version.
|
||||
func TestMacaroonVersionDecode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctxb := context.Background()
|
||||
|
||||
// First, initialize a dummy DB file with a store that the service
|
||||
// can read from. Make sure the file is removed in the end.
|
||||
db := setupTestRootKeyStorage(t)
|
||||
|
||||
// Second, create the new service instance, unlock it and pass in a
|
||||
// checker that we expect it to add to the bakery.
|
||||
rootKeyStore, err := macaroons.NewRootKeyStorage(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
service, err := macaroons.NewService(
|
||||
rootKeyStore, "lnd", false, macaroons.IPLockChecker,
|
||||
)
|
||||
require.NoError(t, err, "Error creating new service")
|
||||
|
||||
defer service.Close()
|
||||
|
||||
t.Run("invalid_version", func(t *testing.T) {
|
||||
// Now that we have our sample service, we'll make a new custom
|
||||
// macaroon with an unknown version.
|
||||
testMac, err := macaroon.New(
|
||||
testRootKey, testID, testLocation, 1,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Next, we'll serialize the macaroon to the binary form,
|
||||
// modifying the first byte to signal an unknown version.
|
||||
testMacBytes, err := testMac.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
|
||||
// If we attempt to check the mac auth, then we should get a
|
||||
// failure that the version is unknown.
|
||||
err = service.CheckMacAuth(ctxb, testMacBytes, nil, "")
|
||||
require.ErrorIs(t, err, macaroons.ErrUnknownVersion)
|
||||
})
|
||||
|
||||
t.Run("invalid_id", func(t *testing.T) {
|
||||
// We'll now make a macaroon with a valid version, but modify
|
||||
// the ID to be rejected.
|
||||
badID := []byte{}
|
||||
testMac, err := macaroon.New(
|
||||
testRootKey, badID, testLocation, testVersion,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
testMacBytes, err := testMac.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
|
||||
// If we attempt to check the mac auth, then we should get a
|
||||
// failure that the ID is bad.
|
||||
err = service.CheckMacAuth(ctxb, testMacBytes, nil, "")
|
||||
require.ErrorIs(t, err, macaroons.ErrInvalidID)
|
||||
})
|
||||
}
|
||||
532
server/pkg/macaroons/store.go
Normal file
532
server/pkg/macaroons/store.go
Normal file
@@ -0,0 +1,532 @@
|
||||
package macaroons
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ark-network/tools/kvdb"
|
||||
"github.com/btcsuite/btcwallet/snacl"
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
const (
|
||||
// RootKeyLen is the length of a root key.
|
||||
RootKeyLen = 32
|
||||
)
|
||||
|
||||
var (
|
||||
// rootKeyBucketName is the name of the root key store bucket.
|
||||
rootKeyBucketName = []byte("macrootkeys")
|
||||
|
||||
// DefaultRootKeyID is the ID of the default root key. The first is
|
||||
// just 0, to emulate the memory storage that comes with bakery.
|
||||
DefaultRootKeyID = []byte("0")
|
||||
|
||||
// encryptionKeyID is the name of the database key that stores the
|
||||
// encryption key, encrypted with a salted + hashed password. The
|
||||
// format is 32 bytes of salt, and the rest is encrypted key.
|
||||
encryptionKeyID = []byte("enckey")
|
||||
|
||||
// ErrAlreadyUnlocked specifies that the store has already been
|
||||
// unlocked.
|
||||
ErrAlreadyUnlocked = fmt.Errorf("macaroon store already unlocked")
|
||||
|
||||
// ErrStoreLocked specifies that the store needs to be unlocked with
|
||||
// a password.
|
||||
ErrStoreLocked = fmt.Errorf("macaroon store is locked")
|
||||
|
||||
// ErrPasswordRequired specifies that a nil password has been passed.
|
||||
ErrPasswordRequired = fmt.Errorf("a non-nil password is required")
|
||||
|
||||
// ErrKeyValueForbidden is used when the root key ID uses encryptedKeyID as
|
||||
// its value.
|
||||
ErrKeyValueForbidden = fmt.Errorf("root key ID value is not allowed")
|
||||
|
||||
// ErrRootKeyBucketNotFound specifies that there is no macaroon root key
|
||||
// bucket yet which can/should only happen if the store has been
|
||||
// corrupted or was initialized incorrectly.
|
||||
ErrRootKeyBucketNotFound = fmt.Errorf("root key bucket not found")
|
||||
|
||||
// ErrEncKeyNotFound specifies that there was no encryption key found
|
||||
// even if one was expected to be generated.
|
||||
ErrEncKeyNotFound = fmt.Errorf("macaroon encryption key not found")
|
||||
|
||||
// ErrDefaultRootKeyNotFound is returned when the default root key is
|
||||
// not found in the DB when it is expected to be.
|
||||
ErrDefaultRootKeyNotFound = fmt.Errorf("default root key not found")
|
||||
)
|
||||
|
||||
// RootKeyStorage implements the bakery.RootKeyStorage interface.
|
||||
type RootKeyStorage struct {
|
||||
kvdb.Backend
|
||||
|
||||
encKeyMtx sync.RWMutex
|
||||
encKey *snacl.SecretKey
|
||||
}
|
||||
|
||||
// NewRootKeyStorage creates a RootKeyStorage instance.
|
||||
func NewRootKeyStorage(db kvdb.Backend) (*RootKeyStorage, error) {
|
||||
// If the store's bucket doesn't exist, create it.
|
||||
err := kvdb.Update(db, func(tx kvdb.RwTx) error {
|
||||
_, err := tx.CreateTopLevelBucket(rootKeyBucketName)
|
||||
return err
|
||||
}, func() {})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return the DB wrapped in a RootKeyStorage object.
|
||||
return &RootKeyStorage{
|
||||
Backend: db,
|
||||
encKey: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateUnlock sets an encryption key if one is not already set, otherwise it
|
||||
// checks if the password is correct for the stored encryption key.
|
||||
func (r *RootKeyStorage) CreateUnlock(password *[]byte) error {
|
||||
r.encKeyMtx.Lock()
|
||||
defer r.encKeyMtx.Unlock()
|
||||
|
||||
// Check if we've already unlocked the store; return an error if so.
|
||||
if r.encKey != nil {
|
||||
return ErrAlreadyUnlocked
|
||||
}
|
||||
|
||||
// Check if a nil password has been passed; return an error if so.
|
||||
if password == nil {
|
||||
return ErrPasswordRequired
|
||||
}
|
||||
|
||||
return kvdb.Update(r.Backend, func(tx kvdb.RwTx) error {
|
||||
bucket := tx.ReadWriteBucket(rootKeyBucketName)
|
||||
if bucket == nil {
|
||||
return ErrRootKeyBucketNotFound
|
||||
}
|
||||
dbKey := bucket.Get(encryptionKeyID)
|
||||
if len(dbKey) > 0 {
|
||||
// We've already stored a key, so try to unlock with
|
||||
// the password.
|
||||
encKey := &snacl.SecretKey{}
|
||||
err := encKey.Unmarshal(dbKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = encKey.DeriveKey(password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.encKey = encKey
|
||||
return nil
|
||||
}
|
||||
|
||||
// We haven't yet stored a key, so create a new one.
|
||||
encKey, err := snacl.NewSecretKey(
|
||||
password, scryptN, scryptR, scryptP,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bucket.Put(encryptionKeyID, encKey.Marshal())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.encKey = encKey
|
||||
return nil
|
||||
}, func() {})
|
||||
}
|
||||
|
||||
// ChangePassword decrypts all the macaroon root keys with the old password and
|
||||
// then encrypts them again with the new password.
|
||||
func (r *RootKeyStorage) ChangePassword(oldPw, newPw []byte) error {
|
||||
// We need the store to already be unlocked. With this we can make sure
|
||||
// that there already is a key in the DB.
|
||||
if r.encKey == nil {
|
||||
return ErrStoreLocked
|
||||
}
|
||||
|
||||
// Check if a nil password has been passed; return an error if so.
|
||||
if oldPw == nil || newPw == nil {
|
||||
return ErrPasswordRequired
|
||||
}
|
||||
|
||||
return kvdb.Update(r.Backend, func(tx kvdb.RwTx) error {
|
||||
bucket := tx.ReadWriteBucket(rootKeyBucketName)
|
||||
if bucket == nil {
|
||||
return ErrRootKeyBucketNotFound
|
||||
}
|
||||
|
||||
// The encryption key must be present, otherwise we are in the
|
||||
// wrong state to change the password.
|
||||
encKeyDB := bucket.Get(encryptionKeyID)
|
||||
if len(encKeyDB) == 0 {
|
||||
return ErrEncKeyNotFound
|
||||
}
|
||||
|
||||
// Unmarshal parameters for old encryption key and derive the
|
||||
// old key with them.
|
||||
encKeyOld := &snacl.SecretKey{}
|
||||
err := encKeyOld.Unmarshal(encKeyDB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = encKeyOld.DeriveKey(&oldPw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new encryption key from the new password.
|
||||
encKeyNew, err := snacl.NewSecretKey(
|
||||
&newPw, scryptN, scryptR, scryptP,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// foundDefaultRootKey is used to keep track of if we have
|
||||
// found and re-encrypted the default root key so that we can
|
||||
// return an error if it is not found.
|
||||
var foundDefaultRootKey bool
|
||||
err = bucket.ForEach(func(k, v []byte) error {
|
||||
// Skip the key if it is the encryption key ID since
|
||||
// we do not want to re-encrypt this.
|
||||
if bytes.Equal(k, encryptionKeyID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bytes.Equal(k, DefaultRootKeyID) {
|
||||
foundDefaultRootKey = true
|
||||
}
|
||||
|
||||
// Now try to decrypt the root key with the old
|
||||
// encryption key, encrypt it with the new one and then
|
||||
// store it in the DB.
|
||||
decryptedKey, err := encKeyOld.Decrypt(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encryptedKey, err := encKeyNew.Encrypt(decryptedKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bucket.Put(k, encryptedKey)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !foundDefaultRootKey {
|
||||
return ErrDefaultRootKeyNotFound
|
||||
}
|
||||
|
||||
// Finally, store the new encryption key parameters in the DB
|
||||
// as well.
|
||||
err = bucket.Put(encryptionKeyID, encKeyNew.Marshal())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.encKey = encKeyNew
|
||||
return nil
|
||||
}, func() {})
|
||||
}
|
||||
|
||||
// Get implements the Get method for the bakery.RootKeyStorage interface.
|
||||
func (r *RootKeyStorage) Get(_ context.Context, id []byte) ([]byte, error) {
|
||||
r.encKeyMtx.RLock()
|
||||
defer r.encKeyMtx.RUnlock()
|
||||
|
||||
if r.encKey == nil {
|
||||
return nil, ErrStoreLocked
|
||||
}
|
||||
var rootKey []byte
|
||||
err := kvdb.View(r.Backend, func(tx kvdb.RTx) error {
|
||||
bucket := tx.ReadBucket(rootKeyBucketName)
|
||||
if bucket == nil {
|
||||
return ErrRootKeyBucketNotFound
|
||||
}
|
||||
dbKey := bucket.Get(id)
|
||||
if len(dbKey) == 0 {
|
||||
return fmt.Errorf("root key with id %s doesn't exist",
|
||||
string(id))
|
||||
}
|
||||
|
||||
decKey, err := r.encKey.Decrypt(dbKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootKey = make([]byte, len(decKey))
|
||||
copy(rootKey[:], decKey)
|
||||
return nil
|
||||
}, func() {
|
||||
rootKey = nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rootKey, nil
|
||||
}
|
||||
|
||||
// RootKey implements the RootKey method for the bakery.RootKeyStorage
|
||||
// interface.
|
||||
func (r *RootKeyStorage) RootKey(ctx context.Context) ([]byte, []byte, error) {
|
||||
r.encKeyMtx.RLock()
|
||||
defer r.encKeyMtx.RUnlock()
|
||||
|
||||
if r.encKey == nil {
|
||||
return nil, nil, ErrStoreLocked
|
||||
}
|
||||
var rootKey []byte
|
||||
|
||||
// Read the root key ID from the context. If no key is specified in the
|
||||
// context, an error will be returned.
|
||||
id, err := RootKeyIDFromContext(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if bytes.Equal(id, encryptionKeyID) {
|
||||
return nil, nil, ErrKeyValueForbidden
|
||||
}
|
||||
|
||||
err = kvdb.Update(r.Backend, func(tx kvdb.RwTx) error {
|
||||
bucket := tx.ReadWriteBucket(rootKeyBucketName)
|
||||
if bucket == nil {
|
||||
return ErrRootKeyBucketNotFound
|
||||
}
|
||||
dbKey := bucket.Get(id)
|
||||
|
||||
// If there's a root key stored in the bucket, decrypt it and
|
||||
// return it.
|
||||
if len(dbKey) != 0 {
|
||||
decKey, err := r.encKey.Decrypt(dbKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootKey = make([]byte, len(decKey))
|
||||
copy(rootKey[:], decKey[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise, create a new root key, encrypt it,
|
||||
// and store it in the bucket.
|
||||
newKey, err := generateAndStoreNewRootKey(bucket, id, r.encKey)
|
||||
rootKey = newKey
|
||||
return err
|
||||
}, func() {
|
||||
rootKey = nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return rootKey, id, nil
|
||||
}
|
||||
|
||||
// GenerateNewRootKey generates a new macaroon root key, replacing the previous
|
||||
// root key if it existed.
|
||||
func (r *RootKeyStorage) GenerateNewRootKey() error {
|
||||
// We need the store to already be unlocked. With this we can make sure
|
||||
// that there already is a key in the DB that can be replaced.
|
||||
if r.encKey == nil {
|
||||
return ErrStoreLocked
|
||||
}
|
||||
return kvdb.Update(r.Backend, func(tx kvdb.RwTx) error {
|
||||
bucket := tx.ReadWriteBucket(rootKeyBucketName)
|
||||
if bucket == nil {
|
||||
return ErrRootKeyBucketNotFound
|
||||
}
|
||||
|
||||
// The default root key should be created even if it does not
|
||||
// yet exist, so we do this separately from the rest of the
|
||||
// root keys.
|
||||
_, err := generateAndStoreNewRootKey(
|
||||
bucket, DefaultRootKeyID, r.encKey,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now iterate over all the other root keys that may exist
|
||||
// and re-generate each of them.
|
||||
return bucket.ForEach(func(k, v []byte) error {
|
||||
if bytes.Equal(k, encryptionKeyID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bytes.Equal(k, DefaultRootKeyID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := generateAndStoreNewRootKey(
|
||||
bucket, k, r.encKey,
|
||||
)
|
||||
|
||||
return err
|
||||
})
|
||||
}, func() {})
|
||||
}
|
||||
|
||||
// SetRootKey sets the default macaroon root key, replacing the previous root
|
||||
// key if it existed.
|
||||
func (r *RootKeyStorage) SetRootKey(rootKey []byte) error {
|
||||
if r.encKey == nil {
|
||||
return ErrStoreLocked
|
||||
}
|
||||
if len(rootKey) != RootKeyLen {
|
||||
return fmt.Errorf("root key must be %v bytes",
|
||||
RootKeyLen)
|
||||
}
|
||||
|
||||
encryptedKey, err := r.encKey.Encrypt(rootKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return kvdb.Update(r.Backend, func(tx kvdb.RwTx) error {
|
||||
bucket := tx.ReadWriteBucket(rootKeyBucketName)
|
||||
if bucket == nil {
|
||||
return ErrRootKeyBucketNotFound
|
||||
}
|
||||
|
||||
return bucket.Put(DefaultRootKeyID, encryptedKey)
|
||||
}, func() {})
|
||||
}
|
||||
|
||||
// Close closes the underlying database and zeroes the encryption key stored
|
||||
// in memory.
|
||||
func (r *RootKeyStorage) Close() error {
|
||||
r.encKeyMtx.Lock()
|
||||
defer r.encKeyMtx.Unlock()
|
||||
|
||||
if r.encKey != nil {
|
||||
r.encKey.Zero()
|
||||
r.encKey = nil
|
||||
}
|
||||
|
||||
// Since we're not responsible for _creating_ the connection to our DB
|
||||
// backend, we also shouldn't close it. This should be handled
|
||||
// externally as to not interfere with remote DB connections in case we
|
||||
// need to open/close the store twice as happens in the password change
|
||||
// case.
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateAndStoreNewRootKey creates a new random RootKeyLen-byte root key,
|
||||
// encrypts it with the given encryption key and stores it in the bucket.
|
||||
// Any previously set key will be overwritten.
|
||||
func generateAndStoreNewRootKey(bucket walletdb.ReadWriteBucket, id []byte,
|
||||
key *snacl.SecretKey) ([]byte, error) {
|
||||
|
||||
rootKey := make([]byte, RootKeyLen)
|
||||
if _, err := io.ReadFull(rand.Reader, rootKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encryptedKey, err := key.Encrypt(rootKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rootKey, bucket.Put(id, encryptedKey)
|
||||
}
|
||||
|
||||
// ListMacaroonIDs returns all the root key ID values except the value of
|
||||
// encryptedKeyID.
|
||||
func (r *RootKeyStorage) ListMacaroonIDs(_ context.Context) ([][]byte, error) {
|
||||
r.encKeyMtx.RLock()
|
||||
defer r.encKeyMtx.RUnlock()
|
||||
|
||||
// Check it's unlocked.
|
||||
if r.encKey == nil {
|
||||
return nil, ErrStoreLocked
|
||||
}
|
||||
|
||||
var rootKeySlice [][]byte
|
||||
|
||||
// Read all the items in the bucket and append the keys, which are the
|
||||
// root key IDs we want.
|
||||
err := kvdb.View(r.Backend, func(tx kvdb.RTx) error {
|
||||
// appendRootKey is a function closure that appends root key ID
|
||||
// to rootKeySlice.
|
||||
appendRootKey := func(k, _ []byte) error {
|
||||
// Only append when the key value is not encryptedKeyID.
|
||||
if !bytes.Equal(k, encryptionKeyID) {
|
||||
rootKeySlice = append(rootKeySlice, k)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return tx.ReadBucket(rootKeyBucketName).ForEach(appendRootKey)
|
||||
}, func() {
|
||||
rootKeySlice = nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rootKeySlice, nil
|
||||
}
|
||||
|
||||
// DeleteMacaroonID removes one specific root key ID. If the root key ID is
|
||||
// found and deleted, it will be returned.
|
||||
func (r *RootKeyStorage) DeleteMacaroonID(
|
||||
_ context.Context, rootKeyID []byte) ([]byte, error) {
|
||||
|
||||
r.encKeyMtx.RLock()
|
||||
defer r.encKeyMtx.RUnlock()
|
||||
|
||||
// Check it's unlocked.
|
||||
if r.encKey == nil {
|
||||
return nil, ErrStoreLocked
|
||||
}
|
||||
|
||||
// Check the rootKeyID is not empty.
|
||||
if len(rootKeyID) == 0 {
|
||||
return nil, ErrMissingRootKeyID
|
||||
}
|
||||
|
||||
// Deleting encryptedKeyID or DefaultRootKeyID is not allowed.
|
||||
if bytes.Equal(rootKeyID, encryptionKeyID) ||
|
||||
bytes.Equal(rootKeyID, DefaultRootKeyID) {
|
||||
|
||||
return nil, ErrDeletionForbidden
|
||||
}
|
||||
|
||||
var rootKeyIDDeleted []byte
|
||||
err := kvdb.Update(r.Backend, func(tx kvdb.RwTx) error {
|
||||
bucket := tx.ReadWriteBucket(rootKeyBucketName)
|
||||
|
||||
// Check the key can be found. If not, return nil.
|
||||
if bucket.Get(rootKeyID) == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Once the key is found, we do the deletion.
|
||||
if err := bucket.Delete(rootKeyID); err != nil {
|
||||
return err
|
||||
}
|
||||
rootKeyIDDeleted = rootKeyID
|
||||
|
||||
return nil
|
||||
}, func() {
|
||||
rootKeyIDDeleted = nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rootKeyIDDeleted, nil
|
||||
}
|
||||
273
server/pkg/macaroons/store_test.go
Normal file
273
server/pkg/macaroons/store_test.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package macaroons_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/ark-network/tools/kvdb"
|
||||
"github.com/ark-network/tools/macaroons"
|
||||
"github.com/btcsuite/btcwallet/snacl"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultRootKeyIDContext = macaroons.ContextWithRootKeyID(
|
||||
context.Background(), macaroons.DefaultRootKeyID,
|
||||
)
|
||||
|
||||
nonDefaultRootKeyIDContext = macaroons.ContextWithRootKeyID(
|
||||
context.Background(), []byte{1},
|
||||
)
|
||||
)
|
||||
|
||||
// newTestStore creates a new bolt DB in a temporary directory and then
|
||||
// initializes a root key storage for that DB.
|
||||
func newTestStore(t *testing.T) (string, *macaroons.RootKeyStorage) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
store := openTestStore(t, tempDir)
|
||||
|
||||
return tempDir, store
|
||||
}
|
||||
|
||||
// openTestStore opens an existing bolt DB and then initializes a root key
|
||||
// storage for that DB.
|
||||
func openTestStore(t *testing.T, tempDir string) *macaroons.RootKeyStorage {
|
||||
db, err := kvdb.Create(
|
||||
kvdb.BoltBackendName, path.Join(tempDir, "weks.db"), true,
|
||||
kvdb.DefaultDBTimeout,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
store, err := macaroons.NewRootKeyStorage(db)
|
||||
if err != nil {
|
||||
_ = db.Close()
|
||||
t.Fatalf("Error creating root key store: %v", err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = store.Close()
|
||||
_ = db.Close()
|
||||
})
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
// TestStore tests the normal use cases of the store like creating, unlocking,
|
||||
// reading keys and closing it.
|
||||
func TestStore(t *testing.T) {
|
||||
tempDir, store := newTestStore(t)
|
||||
|
||||
_, _, err := store.RootKey(context.TODO())
|
||||
require.Equal(t, macaroons.ErrStoreLocked, err)
|
||||
|
||||
_, err = store.Get(context.TODO(), nil)
|
||||
require.Equal(t, macaroons.ErrStoreLocked, err)
|
||||
|
||||
pw := []byte("weks")
|
||||
err = store.CreateUnlock(&pw)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check ErrContextRootKeyID is returned when no root key ID found in
|
||||
// context.
|
||||
_, _, err = store.RootKey(context.TODO())
|
||||
require.Equal(t, macaroons.ErrContextRootKeyID, err)
|
||||
|
||||
// Check ErrMissingRootKeyID is returned when empty root key ID is used.
|
||||
emptyKeyID := make([]byte, 0)
|
||||
badCtx := macaroons.ContextWithRootKeyID(context.TODO(), emptyKeyID)
|
||||
_, _, err = store.RootKey(badCtx)
|
||||
require.Equal(t, macaroons.ErrMissingRootKeyID, err)
|
||||
|
||||
// Create a context with illegal root key ID value.
|
||||
encryptedKeyID := []byte("enckey")
|
||||
badCtx = macaroons.ContextWithRootKeyID(context.TODO(), encryptedKeyID)
|
||||
_, _, err = store.RootKey(badCtx)
|
||||
require.Equal(t, macaroons.ErrKeyValueForbidden, err)
|
||||
|
||||
// Create a context with root key ID value.
|
||||
key, id, err := store.RootKey(defaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
|
||||
rootID := id
|
||||
require.Equal(t, macaroons.DefaultRootKeyID, rootID)
|
||||
|
||||
key2, err := store.Get(defaultRootKeyIDContext, id)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, key, key2)
|
||||
|
||||
badpw := []byte("badweks")
|
||||
err = store.CreateUnlock(&badpw)
|
||||
require.Equal(t, macaroons.ErrAlreadyUnlocked, err)
|
||||
|
||||
_ = store.Close()
|
||||
_ = store.Backend.Close()
|
||||
|
||||
// Between here and the re-opening of the store, it's possible to get
|
||||
// a double-close, but that's not such a big deal since the tests will
|
||||
// fail anyway in that case.
|
||||
store = openTestStore(t, tempDir)
|
||||
|
||||
err = store.CreateUnlock(&badpw)
|
||||
require.Equal(t, snacl.ErrInvalidPassword, err)
|
||||
|
||||
err = store.CreateUnlock(nil)
|
||||
require.Equal(t, macaroons.ErrPasswordRequired, err)
|
||||
|
||||
_, _, err = store.RootKey(defaultRootKeyIDContext)
|
||||
require.Equal(t, macaroons.ErrStoreLocked, err)
|
||||
|
||||
_, err = store.Get(defaultRootKeyIDContext, nil)
|
||||
require.Equal(t, macaroons.ErrStoreLocked, err)
|
||||
|
||||
err = store.CreateUnlock(&pw)
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err = store.Get(defaultRootKeyIDContext, rootID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, key, key2)
|
||||
|
||||
key, id, err = store.RootKey(defaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, key, key2)
|
||||
require.Equal(t, rootID, id)
|
||||
}
|
||||
|
||||
// TestStoreGenerateNewRootKey tests that root keys can be replaced with new
|
||||
// ones in the store without changing the password.
|
||||
func TestStoreGenerateNewRootKey(t *testing.T) {
|
||||
_, store := newTestStore(t)
|
||||
|
||||
// The store must be unlocked to replace the root key.
|
||||
err := store.GenerateNewRootKey()
|
||||
require.Equal(t, macaroons.ErrStoreLocked, err)
|
||||
|
||||
// Unlock the store.
|
||||
pw := []byte("weks")
|
||||
err = store.CreateUnlock(&pw)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read the default root key.
|
||||
oldRootKey1, _, err := store.RootKey(defaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read the non-default root-key.
|
||||
oldRootKey2, _, err := store.RootKey(nonDefaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Replace the root keys with new random keys.
|
||||
err = store.GenerateNewRootKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Finally, read both root keys from the DB and compare them to the ones
|
||||
// we got returned earlier. This makes sure that the encryption/
|
||||
// decryption of the key in the DB worked as expected too.
|
||||
newRootKey1, _, err := store.RootKey(defaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, oldRootKey1, newRootKey1)
|
||||
|
||||
newRootKey2, _, err := store.RootKey(nonDefaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, oldRootKey2, newRootKey2)
|
||||
}
|
||||
|
||||
// TestStoreSetRootKey tests that a root key can be set to a specified value.
|
||||
func TestStoreSetRootKey(t *testing.T) {
|
||||
_, store := newTestStore(t)
|
||||
|
||||
// Create a new random key
|
||||
rootKey := make([]byte, 32)
|
||||
_, err := rand.Read(rootKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The store must be unlocked to set the root key.
|
||||
err = store.SetRootKey(rootKey)
|
||||
require.Equal(t, macaroons.ErrStoreLocked, err)
|
||||
|
||||
// Unlock the store and read the current key.
|
||||
pw := []byte("weks")
|
||||
err = store.CreateUnlock(&pw)
|
||||
require.NoError(t, err)
|
||||
oldRootKey, _, err := store.RootKey(defaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Ensure the new key is different from the old key.
|
||||
require.NotEqual(t, oldRootKey, rootKey)
|
||||
|
||||
// Replace the root key with the new key.
|
||||
err = store.SetRootKey(rootKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Finally, read the root key from the DB and compare it to the one
|
||||
// we created earlier. This makes sure that the encryption/
|
||||
// decryption of the key in the DB worked as expected too.
|
||||
newRootKey, _, err := store.RootKey(defaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, rootKey, newRootKey)
|
||||
}
|
||||
|
||||
// TestStoreChangePassword tests that the password for the store can be changed
|
||||
// without changing the root keys.
|
||||
func TestStoreChangePassword(t *testing.T) {
|
||||
tempDir, store := newTestStore(t)
|
||||
|
||||
// The store must be unlocked to replace the root keys.
|
||||
err := store.ChangePassword(nil, nil)
|
||||
require.Equal(t, macaroons.ErrStoreLocked, err)
|
||||
|
||||
// Unlock the DB and read the current default root key and one other
|
||||
// non-default root key. Both of these should stay the same after
|
||||
// changing the password for the test to succeed.
|
||||
pw := []byte("weks")
|
||||
err = store.CreateUnlock(&pw)
|
||||
require.NoError(t, err)
|
||||
|
||||
rootKey1, _, err := store.RootKey(defaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
|
||||
rootKey2, _, err := store.RootKey(nonDefaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Both passwords must be set.
|
||||
err = store.ChangePassword(nil, nil)
|
||||
require.Equal(t, macaroons.ErrPasswordRequired, err)
|
||||
|
||||
// Make sure that an error is returned if we try to change the password
|
||||
// without the correct old password.
|
||||
wrongPw := []byte("wrong")
|
||||
newPw := []byte("newpassword")
|
||||
err = store.ChangePassword(wrongPw, newPw)
|
||||
require.Equal(t, snacl.ErrInvalidPassword, err)
|
||||
|
||||
// Now really do change the password.
|
||||
err = store.ChangePassword(pw, newPw)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Close the store. This will close the underlying DB and we need to
|
||||
// create a new store instance. Let's make sure we can't use it again
|
||||
// after closing.
|
||||
err = store.Close()
|
||||
require.NoError(t, err)
|
||||
err = store.Backend.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = store.CreateUnlock(&newPw)
|
||||
require.Error(t, err)
|
||||
|
||||
// Let's open it again and try unlocking with the new password.
|
||||
store = openTestStore(t, tempDir)
|
||||
err = store.CreateUnlock(&newPw)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Finally, read the root keys from the DB using the new password and
|
||||
// make sure that both root keys stayed the same.
|
||||
rootKeyDB1, _, err := store.RootKey(defaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, rootKey1, rootKeyDB1)
|
||||
|
||||
rootKeyDB2, _, err := store.RootKey(nonDefaultRootKeyIDContext)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, rootKey2, rootKeyDB2)
|
||||
}
|
||||
Reference in New Issue
Block a user