Skip to content
Merged
Show file tree
Hide file tree
Changes from 31 commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
1a123f3
stub out reusable storage test
mjarmy May 9, 2020
b0deb8d
implement reusable inmem test
mjarmy May 9, 2020
d1478b4
work on reusable raft test
mjarmy May 9, 2020
78564b4
stub out simple raft test
mjarmy May 10, 2020
a471e20
switch to reusable raft storage
mjarmy May 10, 2020
19df1dd
cleanup tests
mjarmy May 10, 2020
16c1c76
cleanup tests
mjarmy May 10, 2020
bbc4fd8
refactor tests
mjarmy May 10, 2020
8bfa728
verify raft configuration
mjarmy May 10, 2020
ee70e7a
cleanup tests
mjarmy May 10, 2020
81bf813
stub out reuseStorage
mjarmy May 10, 2020
1945ca3
use common base address across clusters
mjarmy May 10, 2020
bd24198
attempt to reuse raft cluster
mjarmy May 10, 2020
1d4d630
tinker with test
mjarmy May 11, 2020
a2b00cd
fix typo
mjarmy May 11, 2020
1383331
start debugging
mjarmy May 11, 2020
dac71b6
debug raft configuration
mjarmy May 11, 2020
4d71213
add BaseClusterListenPort to TestCluster options
mjarmy May 11, 2020
41be768
use BaseClusterListenPort in test
mjarmy May 11, 2020
0d21616
raft join works now
mjarmy May 12, 2020
b4bbbf8
misc cleanup of raft tests
mjarmy May 12, 2020
03843f1
use configurable base port for raft test
mjarmy May 12, 2020
5092aa7
clean up raft tests
mjarmy May 12, 2020
7957339
add parallelized tests for all backends
mjarmy May 12, 2020
a8b1c79
clean up reusable storage tests
mjarmy May 12, 2020
46809ce
remove debugging code from startClusterListener()
mjarmy May 12, 2020
c76e1fe
improve comments in testhelpers
mjarmy May 12, 2020
af12c62
improve comments in teststorage
mjarmy May 12, 2020
b7343d1
improve comments and test logging
mjarmy May 12, 2020
c67cf50
fix typo in vault/testing
mjarmy May 12, 2020
7cabc82
fix typo in comments
mjarmy May 12, 2020
c7cdbbf
remove debugging code
mjarmy May 13, 2020
e66fc83
make number of cores parameterizable in test
mjarmy May 13, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 53 additions & 1 deletion helper/testhelpers/testhelpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ func WaitForNCoresUnsealed(t testing.T, cluster *vault.TestCluster, n int) {
time.Sleep(time.Second)
}

t.Fatalf("%d cores were not sealed", n)
t.Fatalf("%d cores were not unsealed", n)
}

func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) {
Expand Down Expand Up @@ -386,6 +386,12 @@ func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]by
return newKeys
}

// TestRaftServerAddressProvider is a ServerAddressProvider that uses the
// ClusterAddr() of each node to provide raft addresses.
//
// Note that TestRaftServerAddressProvider should only be used in cases where
// cores that are part of a raft configuration have already had
// startClusterListener() called (via either unsealing or raft joining).
type TestRaftServerAddressProvider struct {
Cluster *vault.TestCluster
}
Expand Down Expand Up @@ -457,3 +463,49 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {

WaitForNCoresUnsealed(t, cluster, 3)
}

// HardcodedServerAddressProvider is a ServerAddressProvider that uses
// a hardcoded map of raft node addresses.
//
// It is useful in cases where the raft configuration is known ahead of time,
// but some of the cores have not yet had startClusterListener() called (via
// either unsealing or raft joining), and thus do not yet have a ClusterAddr()
// assigned.
type HardcodedServerAddressProvider struct {
Entries map[raftlib.ServerID]raftlib.ServerAddress
}

func (p *HardcodedServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) {
if addr, ok := p.Entries[id]; ok {
return addr, nil
}
return "", errors.New("could not find cluster addr")
}

// NewHardcodedServerAddressProvider is a convenience function that makes a
// ServerAddressProvider from a given cluster address base port.
func NewHardcodedServerAddressProvider(baseClusterPort int) raftlib.ServerAddressProvider {

entries := make(map[raftlib.ServerID]raftlib.ServerAddress)

for i := 0; i < vault.DefaultNumCores; i++ {
id := fmt.Sprintf("core-%d", i)
addr := fmt.Sprintf("127.0.0.1:%d", baseClusterPort+i)
entries[raftlib.ServerID(id)] = raftlib.ServerAddress(addr)
}

return &HardcodedServerAddressProvider{
entries,
}
}

// SetRaftAddressProviders sets a ServerAddressProvider for all the nodes in a
// cluster.
func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider raftlib.ServerAddressProvider) {

atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1)

for _, core := range cluster.Cores {
core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(provider)
}
}
142 changes: 142 additions & 0 deletions helper/testhelpers/teststorage/teststorage_reusable.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
package teststorage

import (
"fmt"
"io/ioutil"
"os"
"runtime/debug"

"github.com/mitchellh/go-testing-interface"

hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/physical/raft"
"github.com/hashicorp/vault/vault"
)

// ReusableStorage is a physical backend that can be re-used across
// multiple test clusters in sequence. It is useful for testing things like
// seal migration, wherein a given physical backend must be re-used as several
// test clusters are sequentially created, tested, and discarded.
type ReusableStorage struct {

// IsRaft specifies whether the storage is using a raft backend.
IsRaft bool

// Setup should be called just before a new TestCluster is created.
Setup ClusterSetupMutator

// Cleanup should be called after a TestCluster is no longer
// needed -- generally in a defer, just before the call to
// cluster.Cleanup().
Cleanup func(t testing.T, cluster *vault.TestCluster)
}

// StorageCleanup is a function that should be called once -- at the very end
// of a given unit test, after each of the sequence of clusters have been
// created, tested, and discarded.
type StorageCleanup func()

// MakeReusableStorage makes a physical backend that can be re-used across
// multiple test clusters in sequence.
func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) {

storage := ReusableStorage{
IsRaft: false,

Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger) *vault.PhysicalBackendBundle {
if coreIdx == 0 {
// We intentionally do not clone the backend's Cleanup func,
// because we don't want it to be run until the entire test has
// been completed.
return &vault.PhysicalBackendBundle{
Backend: bundle.Backend,
HABackend: bundle.HABackend,
}
}
return nil
}
},

// No-op
Cleanup: func(t testing.T, cluster *vault.TestCluster) {
},
}

cleanup := func() {
if bundle.Cleanup != nil {
bundle.Cleanup()
}
}

return storage, cleanup
}

// MakeReusableRaftStorage makes a physical raft backend that can be re-used
// across multiple test clusters in sequence.
func MakeReusableRaftStorage(t testing.T, logger hclog.Logger) (ReusableStorage, StorageCleanup) {

raftDirs := make([]string, vault.DefaultNumCores)
for i := 0; i < vault.DefaultNumCores; i++ {
raftDirs[i] = makeRaftDir(t)
}

storage := ReusableStorage{
IsRaft: true,

Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
conf.DisablePerformanceStandby = true
opts.KeepStandbysSealed = true
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger) *vault.PhysicalBackendBundle {
return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx])
}
},

// Close open files being used by raft.
Cleanup: func(t testing.T, cluster *vault.TestCluster) {
for _, core := range cluster.Cores {
raftStorage := core.UnderlyingRawStorage.(*raft.RaftBackend)
if err := raftStorage.Close(); err != nil {
t.Fatal(err)
}
}
},
}

cleanup := func() {
for _, rd := range raftDirs {
os.RemoveAll(rd)
}
}

return storage, cleanup
}

func makeRaftDir(t testing.T) string {
raftDir, err := ioutil.TempDir("", "vault-raft-")
if err != nil {
t.Fatal(err)
}
t.Logf("raft dir: %s", raftDir)
return raftDir
}

func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string) *vault.PhysicalBackendBundle {

nodeID := fmt.Sprintf("core-%d", coreIdx)
conf := map[string]string{
"path": raftDir,
"node_id": nodeID,
"performance_multiplier": "8",
}

backend, err := raft.NewRaftBackend(conf, logger)
if err != nil {
debug.PrintStack()
t.Fatal(err)
}

return &vault.PhysicalBackendBundle{
Backend: backend,
}
}
Loading