mirror of
https://github.com/opentffoundation/opentf.git
synced 2025-12-19 17:59:05 -05:00
This extends statemgr.Persistent, statemgr.Locker and remote.Client to all expect context.Context parameters, and then updates all of the existing implementations of those interfaces to support them. All of the calls to statemgr.Persistent and statemgr.Locker methods outside of tests are consistently context.TODO() for now, because the caller landscape of these interfaces has some complications: 1. statemgr.Locker is also used by the clistate package for its state implementation that was derived from statemgr.Filesystem's predecessor, even though what clistate manages is not actually "state" in the sense of package statemgr. The callers of that are not yet ready to provide real contexts. In a future commit we'll either need to plumb context through to all of the clistate callers, or continue the effort to separate statemgr from clistate by introducing a clistate-specific "locker" API for it to use instead. 2. We call statemgr.Persistent and statemgr.Locker methods in situations where the active context might have already been cancelled, and so we'll need to make sure to ignore cancellation when calling those. This is mainly limited to PersistState and Unlock, since both need to be able to complete after a cancellation, but there are various codepaths that perform a Lock, Refresh, Persist, Unlock sequence and so it isn't yet clear where is the best place to enforce the invariant that Persist and Unlock must not be called with a cancelable context. We'll deal with that more in subsequent commits. Within the various state manager and remote client implementations the contexts _are_ wired together as best as possible with how these subsystems are already laid out, and so once we deal with the problems above and make callers provide suitable contexts they should be able to reach all of the leaf API clients that might want to generate OpenTelemetry traces. Signed-off-by: Martin Atkins <mart@degeneration.co.uk>
508 lines
12 KiB
Go
508 lines
12 KiB
Go
// Copyright (c) The OpenTofu Authors
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
// Copyright (c) 2023 HashiCorp, Inc.
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
package consul
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"math/rand"
|
|
"net"
|
|
"reflect"
|
|
"strings"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/opentofu/opentofu/internal/backend"
|
|
"github.com/opentofu/opentofu/internal/encryption"
|
|
"github.com/opentofu/opentofu/internal/states/remote"
|
|
"github.com/opentofu/opentofu/internal/states/statemgr"
|
|
)
|
|
|
|
func TestRemoteClient_impl(t *testing.T) {
|
|
var _ remote.Client = new(RemoteClient)
|
|
var _ remote.ClientLocker = new(RemoteClient)
|
|
}
|
|
|
|
func TestRemoteClient(t *testing.T) {
|
|
srv := newConsulTestServer(t)
|
|
defer func() { _ = srv.Stop() }()
|
|
|
|
testCases := []string{
|
|
fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
|
fmt.Sprintf("tf-unit/%s/", time.Now().String()),
|
|
}
|
|
|
|
for _, path := range testCases {
|
|
t.Run(path, func(*testing.T) {
|
|
// Get the backend
|
|
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": path,
|
|
}))
|
|
|
|
// Grab the client
|
|
state, err := b.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatalf("err: %s", err)
|
|
}
|
|
|
|
// Test
|
|
remote.TestClient(t, state.(*remote.State).Client)
|
|
})
|
|
}
|
|
}
|
|
|
|
// test the gzip functionality of the client
|
|
func TestRemoteClient_gzipUpgrade(t *testing.T) {
|
|
srv := newConsulTestServer(t)
|
|
defer func() { _ = srv.Stop() }()
|
|
|
|
statePath := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
|
|
|
// Get the backend
|
|
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": statePath,
|
|
}))
|
|
|
|
// Grab the client
|
|
state, err := b.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatalf("err: %s", err)
|
|
}
|
|
|
|
// Test
|
|
remote.TestClient(t, state.(*remote.State).Client)
|
|
|
|
// create a new backend with gzip
|
|
b = backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": statePath,
|
|
"gzip": true,
|
|
}))
|
|
|
|
// Grab the client
|
|
state, err = b.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatalf("err: %s", err)
|
|
}
|
|
|
|
// Test
|
|
remote.TestClient(t, state.(*remote.State).Client)
|
|
}
|
|
|
|
// TestConsul_largeState tries to write a large payload using the Consul state
|
|
// manager, as there is a limit to the size of the values in the KV store it
|
|
// will need to be split up before being saved and put back together when read.
|
|
func TestConsul_largeState(t *testing.T) {
|
|
srv := newConsulTestServer(t)
|
|
defer func() { _ = srv.Stop() }()
|
|
|
|
path := "tf-unit/test-large-state"
|
|
|
|
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": path,
|
|
}))
|
|
|
|
s, err := b.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
c := s.(*remote.State).Client.(*RemoteClient)
|
|
c.Path = path
|
|
|
|
// testPaths fails the test if the keys found at the prefix don't match
|
|
// what is expected
|
|
testPaths := func(t *testing.T, expected []string) {
|
|
kv := c.Client.KV()
|
|
pairs, _, err := kv.List(c.Path, nil)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
res := make([]string, 0)
|
|
for _, p := range pairs {
|
|
res = append(res, p.Key)
|
|
}
|
|
if !reflect.DeepEqual(res, expected) {
|
|
t.Fatalf("Wrong keys: %#v", res)
|
|
}
|
|
}
|
|
|
|
testPayload := func(t *testing.T, data map[string]string, keys []string) {
|
|
payload, err := json.Marshal(data)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
err = c.Put(t.Context(), payload)
|
|
if err != nil {
|
|
t.Fatal("could not put payload", err)
|
|
}
|
|
|
|
remote, err := c.Get(t.Context())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if !bytes.Equal(payload, remote.Data) {
|
|
t.Fatal("the data do not match")
|
|
}
|
|
|
|
testPaths(t, keys)
|
|
}
|
|
|
|
// The default limit for the size of the value in Consul is 524288 bytes
|
|
testPayload(
|
|
t,
|
|
map[string]string{
|
|
"foo": strings.Repeat("a", 524288+2),
|
|
},
|
|
[]string{
|
|
"tf-unit/test-large-state",
|
|
"tf-unit/test-large-state/tfstate.2cb96f52c9fff8e0b56cb786ec4d2bed/0",
|
|
"tf-unit/test-large-state/tfstate.2cb96f52c9fff8e0b56cb786ec4d2bed/1",
|
|
},
|
|
)
|
|
|
|
// This payload is just short enough to be stored but will be bigger when
|
|
// going through the Transaction API as it will be base64 encoded
|
|
testPayload(
|
|
t,
|
|
map[string]string{
|
|
"foo": strings.Repeat("a", 524288-10),
|
|
},
|
|
[]string{
|
|
"tf-unit/test-large-state",
|
|
"tf-unit/test-large-state/tfstate.4f407ace136a86521fd0d366972fe5c7/0",
|
|
},
|
|
)
|
|
|
|
// We try to replace the payload with a small one, the old chunks should be removed
|
|
testPayload(
|
|
t,
|
|
map[string]string{"var": "a"},
|
|
[]string{"tf-unit/test-large-state"},
|
|
)
|
|
|
|
// Test with gzip and chunks
|
|
b = backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": path,
|
|
"gzip": true,
|
|
}))
|
|
|
|
s, err = b.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
c = s.(*remote.State).Client.(*RemoteClient)
|
|
c.Path = path
|
|
|
|
// We need a long random string so it results in multiple chunks even after
|
|
// being gzipped
|
|
|
|
// We use a fixed seed so the test can be reproducible
|
|
randomizer := rand.New(rand.NewSource(1234))
|
|
RandStringRunes := func(n int) string {
|
|
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
|
b := make([]rune, n)
|
|
for i := range b {
|
|
b[i] = letterRunes[randomizer.Intn(len(letterRunes))]
|
|
}
|
|
return string(b)
|
|
}
|
|
|
|
testPayload(
|
|
t,
|
|
map[string]string{
|
|
"bar": RandStringRunes(5 * (524288 + 2)),
|
|
},
|
|
[]string{
|
|
"tf-unit/test-large-state",
|
|
"tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/0",
|
|
"tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/1",
|
|
"tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/2",
|
|
"tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/3",
|
|
},
|
|
)
|
|
|
|
// Deleting the state should remove all chunks
|
|
err = c.Delete(t.Context())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
testPaths(t, []string{})
|
|
}
|
|
|
|
func TestConsul_stateLock(t *testing.T) {
|
|
srv := newConsulTestServer(t)
|
|
defer func() { _ = srv.Stop() }()
|
|
|
|
testCases := []string{
|
|
fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
|
fmt.Sprintf("tf-unit/%s/", time.Now().String()),
|
|
}
|
|
|
|
for _, path := range testCases {
|
|
t.Run(path, func(*testing.T) {
|
|
// create 2 instances to get 2 remote.Clients
|
|
sA, err := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": path,
|
|
})).StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
sB, err := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": path,
|
|
})).StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
remote.TestRemoteLocks(t, sA.(*remote.State).Client, sB.(*remote.State).Client)
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestConsul_destroyLock(t *testing.T) {
|
|
srv := newConsulTestServer(t)
|
|
defer func() { _ = srv.Stop() }()
|
|
|
|
testCases := []string{
|
|
fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
|
fmt.Sprintf("tf-unit/%s/", time.Now().String()),
|
|
}
|
|
|
|
testLock := func(client *RemoteClient, lockPath string) {
|
|
// get the lock val
|
|
pair, _, err := client.Client.KV().Get(lockPath, nil)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if pair != nil {
|
|
t.Fatalf("lock key not cleaned up at: %s", pair.Key)
|
|
}
|
|
}
|
|
|
|
for _, path := range testCases {
|
|
t.Run(path, func(*testing.T) {
|
|
// Get the backend
|
|
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": path,
|
|
}))
|
|
|
|
// Grab the client
|
|
s, err := b.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatalf("err: %s", err)
|
|
}
|
|
|
|
clientA := s.(*remote.State).Client.(*RemoteClient)
|
|
|
|
info := statemgr.NewLockInfo()
|
|
id, err := clientA.Lock(t.Context(), info)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
lockPath := clientA.Path + lockSuffix
|
|
|
|
if err := clientA.Unlock(t.Context(), id); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
testLock(clientA, lockPath)
|
|
|
|
// The release the lock from a second client to test the
|
|
// `tofu force-unlock <lock_id>` functionality
|
|
s, err = b.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatalf("err: %s", err)
|
|
}
|
|
|
|
clientB := s.(*remote.State).Client.(*RemoteClient)
|
|
|
|
info = statemgr.NewLockInfo()
|
|
id, err = clientA.Lock(t.Context(), info)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if err := clientB.Unlock(t.Context(), id); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
testLock(clientA, lockPath)
|
|
|
|
err = clientA.Unlock(t.Context(), id)
|
|
|
|
if err == nil {
|
|
t.Fatal("consul lock should have been lost")
|
|
}
|
|
if err.Error() != "consul lock was lost" {
|
|
t.Fatal("got wrong error", err)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestConsul_lostLock(t *testing.T) {
|
|
srv := newConsulTestServer(t)
|
|
defer func() { _ = srv.Stop() }()
|
|
|
|
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
|
|
|
// create 2 instances to get 2 remote.Clients
|
|
sA, err := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": path,
|
|
})).StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
sB, err := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": path + "-not-used",
|
|
})).StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
info := statemgr.NewLockInfo()
|
|
info.Operation = "test-lost-lock"
|
|
id, err := sA.Lock(t.Context(), info)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
reLocked := make(chan struct{})
|
|
testLockHook = func() {
|
|
close(reLocked)
|
|
testLockHook = nil
|
|
}
|
|
|
|
// now we use the second client to break the lock
|
|
kv := sB.(*remote.State).Client.(*RemoteClient).Client.KV()
|
|
_, err = kv.Delete(path+lockSuffix, nil)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
<-reLocked
|
|
|
|
if err := sA.Unlock(t.Context(), id); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
func TestConsul_lostLockConnection(t *testing.T) {
|
|
srv := newConsulTestServer(t)
|
|
defer func() { _ = srv.Stop() }()
|
|
|
|
// create an "unreliable" network by closing all the consul client's
|
|
// network connections
|
|
conns := &unreliableConns{}
|
|
origDialFn := dialContext
|
|
defer func() {
|
|
dialContext = origDialFn
|
|
}()
|
|
dialContext = conns.DialContext
|
|
|
|
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
|
|
|
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"address": srv.HTTPAddr,
|
|
"path": path,
|
|
}))
|
|
|
|
s, err := b.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
info := statemgr.NewLockInfo()
|
|
info.Operation = "test-lost-lock-connection"
|
|
id, err := s.Lock(t.Context(), info)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// kill the connection a few times
|
|
for i := 0; i < 3; i++ {
|
|
dialed := conns.dialedDone()
|
|
// kill any open connections
|
|
conns.Kill(t)
|
|
// wait for a new connection to be dialed, and kill it again
|
|
<-dialed
|
|
}
|
|
|
|
if err := s.Unlock(t.Context(), id); err != nil {
|
|
t.Fatal("unlock error:", err)
|
|
}
|
|
}
|
|
|
|
type unreliableConns struct {
|
|
sync.Mutex
|
|
conns []net.Conn
|
|
dialCallback func()
|
|
}
|
|
|
|
func (u *unreliableConns) DialContext(ctx context.Context, netw, addr string) (net.Conn, error) {
|
|
u.Lock()
|
|
defer u.Unlock()
|
|
|
|
dialer := &net.Dialer{}
|
|
conn, err := dialer.DialContext(ctx, netw, addr)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
u.conns = append(u.conns, conn)
|
|
|
|
if u.dialCallback != nil {
|
|
u.dialCallback()
|
|
}
|
|
|
|
return conn, nil
|
|
}
|
|
|
|
func (u *unreliableConns) dialedDone() chan struct{} {
|
|
u.Lock()
|
|
defer u.Unlock()
|
|
dialed := make(chan struct{})
|
|
u.dialCallback = func() {
|
|
defer close(dialed)
|
|
u.dialCallback = nil
|
|
}
|
|
|
|
return dialed
|
|
}
|
|
|
|
// Kill these with a deadline, just to make sure we don't end up with any EOFs
|
|
// that get ignored.
|
|
func (u *unreliableConns) Kill(t *testing.T) {
|
|
u.Lock()
|
|
defer u.Unlock()
|
|
|
|
for _, conn := range u.conns {
|
|
err := conn.(*net.TCPConn).SetDeadline(time.Now())
|
|
if err != nil {
|
|
t.Fatal("failed to kill connection:", err)
|
|
}
|
|
}
|
|
u.conns = nil
|
|
}
|