mirror of
https://github.com/opentffoundation/opentf.git
synced 2025-12-19 17:59:05 -05:00
This extends statemgr.Persistent, statemgr.Locker and remote.Client to all expect context.Context parameters, and then updates all of the existing implementations of those interfaces to support them. All of the calls to statemgr.Persistent and statemgr.Locker methods outside of tests are consistently context.TODO() for now, because the caller landscape of these interfaces has some complications: 1. statemgr.Locker is also used by the clistate package for its state implementation that was derived from statemgr.Filesystem's predecessor, even though what clistate manages is not actually "state" in the sense of package statemgr. The callers of that are not yet ready to provide real contexts. In a future commit we'll either need to plumb context through to all of the clistate callers, or continue the effort to separate statemgr from clistate by introducing a clistate-specific "locker" API for it to use instead. 2. We call statemgr.Persistent and statemgr.Locker methods in situations where the active context might have already been cancelled, and so we'll need to make sure to ignore cancellation when calling those. This is mainly limited to PersistState and Unlock, since both need to be able to complete after a cancellation, but there are various codepaths that perform a Lock, Refresh, Persist, Unlock sequence and so it isn't yet clear where is the best place to enforce the invariant that Persist and Unlock must not be called with a cancelable context. We'll deal with that more in subsequent commits. Within the various state manager and remote client implementations the contexts _are_ wired together as best as possible with how these subsystems are already laid out, and so once we deal with the problems above and make callers provide suitable contexts they should be able to reach all of the leaf API clients that might want to generate OpenTelemetry traces. Signed-off-by: Martin Atkins <mart@degeneration.co.uk>
418 lines
13 KiB
Go
418 lines
13 KiB
Go
// Copyright (c) The OpenTofu Authors
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
// Copyright (c) 2023 HashiCorp, Inc.
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
package oss
|
|
|
|
import (
|
|
"fmt"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"bytes"
|
|
"crypto/md5"
|
|
|
|
"github.com/opentofu/opentofu/internal/backend"
|
|
"github.com/opentofu/opentofu/internal/encryption"
|
|
"github.com/opentofu/opentofu/internal/states/remote"
|
|
"github.com/opentofu/opentofu/internal/states/statefile"
|
|
"github.com/opentofu/opentofu/internal/states/statemgr"
|
|
)
|
|
|
|
// NOTE: Before running this testcase, please create a OTS instance called 'tf-oss-remote'
|
|
var RemoteTestUsedOTSEndpoint = "https://tf-oss-remote.cn-hangzhou.ots.aliyuncs.com"
|
|
|
|
func TestRemoteClient_impl(t *testing.T) {
|
|
var _ remote.Client = new(RemoteClient)
|
|
var _ remote.ClientLocker = new(RemoteClient)
|
|
}
|
|
|
|
func TestRemoteClient(t *testing.T) {
|
|
testACC(t)
|
|
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
|
|
path := "testState"
|
|
|
|
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"bucket": bucketName,
|
|
"prefix": path,
|
|
"encrypt": true,
|
|
})).(*Backend)
|
|
|
|
createOSSBucket(t, b.ossClient, bucketName)
|
|
defer deleteOSSBucket(t, b.ossClient, bucketName)
|
|
|
|
state, err := b.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
remote.TestClient(t, state.(*remote.State).Client)
|
|
}
|
|
|
|
func TestRemoteClientLocks(t *testing.T) {
|
|
testACC(t)
|
|
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
|
|
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
|
|
path := "testState"
|
|
|
|
b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"bucket": bucketName,
|
|
"prefix": path,
|
|
"encrypt": true,
|
|
"tablestore_table": tableName,
|
|
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
|
})).(*Backend)
|
|
|
|
b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"bucket": bucketName,
|
|
"prefix": path,
|
|
"encrypt": true,
|
|
"tablestore_table": tableName,
|
|
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
|
})).(*Backend)
|
|
|
|
createOSSBucket(t, b1.ossClient, bucketName)
|
|
defer deleteOSSBucket(t, b1.ossClient, bucketName)
|
|
createTablestoreTable(t, b1.otsClient, tableName)
|
|
defer deleteTablestoreTable(t, b1.otsClient, tableName)
|
|
|
|
s1, err := b1.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
s2, err := b2.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
|
|
}
|
|
|
|
// verify that the backend can handle more than one state in the same table
|
|
func TestRemoteClientLocks_multipleStates(t *testing.T) {
|
|
testACC(t)
|
|
bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix())
|
|
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
|
|
path := "testState"
|
|
|
|
b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"bucket": bucketName,
|
|
"prefix": path,
|
|
"encrypt": true,
|
|
"tablestore_table": tableName,
|
|
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
|
})).(*Backend)
|
|
|
|
b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"bucket": bucketName,
|
|
"prefix": path,
|
|
"encrypt": true,
|
|
"tablestore_table": tableName,
|
|
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
|
})).(*Backend)
|
|
|
|
createOSSBucket(t, b1.ossClient, bucketName)
|
|
defer deleteOSSBucket(t, b1.ossClient, bucketName)
|
|
createTablestoreTable(t, b1.otsClient, tableName)
|
|
defer deleteTablestoreTable(t, b1.otsClient, tableName)
|
|
|
|
s1, err := b1.StateMgr(t.Context(), "s1")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err := s1.Lock(t.Context(), statemgr.NewLockInfo()); err != nil {
|
|
t.Fatal("failed to get lock for s1:", err)
|
|
}
|
|
|
|
// s1 is now locked, s2 should not be locked as it's a different state file
|
|
s2, err := b2.StateMgr(t.Context(), "s2")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err := s2.Lock(t.Context(), statemgr.NewLockInfo()); err != nil {
|
|
t.Fatal("failed to get lock for s2:", err)
|
|
}
|
|
}
|
|
|
|
// verify that we can unlock a state with an existing lock
|
|
func TestRemoteForceUnlock(t *testing.T) {
|
|
testACC(t)
|
|
bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix())
|
|
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
|
|
path := "testState"
|
|
|
|
b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"bucket": bucketName,
|
|
"prefix": path,
|
|
"encrypt": true,
|
|
"tablestore_table": tableName,
|
|
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
|
})).(*Backend)
|
|
|
|
b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"bucket": bucketName,
|
|
"prefix": path,
|
|
"encrypt": true,
|
|
"tablestore_table": tableName,
|
|
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
|
})).(*Backend)
|
|
|
|
createOSSBucket(t, b1.ossClient, bucketName)
|
|
defer deleteOSSBucket(t, b1.ossClient, bucketName)
|
|
createTablestoreTable(t, b1.otsClient, tableName)
|
|
defer deleteTablestoreTable(t, b1.otsClient, tableName)
|
|
|
|
// first test with default
|
|
s1, err := b1.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
info := statemgr.NewLockInfo()
|
|
info.Operation = "test"
|
|
info.Who = "clientA"
|
|
|
|
lockID, err := s1.Lock(t.Context(), info)
|
|
if err != nil {
|
|
t.Fatal("unable to get initial lock:", err)
|
|
}
|
|
|
|
// s1 is now locked, get the same state through s2 and unlock it
|
|
s2, err := b2.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal("failed to get default state to force unlock:", err)
|
|
}
|
|
|
|
if err := s2.Unlock(t.Context(), lockID); err != nil {
|
|
t.Fatal("failed to force-unlock default state")
|
|
}
|
|
|
|
// now try the same thing with a named state
|
|
// first test with default
|
|
s1, err = b1.StateMgr(t.Context(), "test")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
info = statemgr.NewLockInfo()
|
|
info.Operation = "test"
|
|
info.Who = "clientA"
|
|
|
|
lockID, err = s1.Lock(t.Context(), info)
|
|
if err != nil {
|
|
t.Fatal("unable to get initial lock:", err)
|
|
}
|
|
|
|
// s1 is now locked, get the same state through s2 and unlock it
|
|
s2, err = b2.StateMgr(t.Context(), "test")
|
|
if err != nil {
|
|
t.Fatal("failed to get named state to force unlock:", err)
|
|
}
|
|
|
|
if err = s2.Unlock(t.Context(), lockID); err != nil {
|
|
t.Fatal("failed to force-unlock named state")
|
|
}
|
|
}
|
|
|
|
func TestRemoteClient_clientMD5(t *testing.T) {
|
|
testACC(t)
|
|
|
|
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
|
|
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
|
|
path := "testState"
|
|
|
|
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"bucket": bucketName,
|
|
"prefix": path,
|
|
"tablestore_table": tableName,
|
|
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
|
})).(*Backend)
|
|
|
|
createOSSBucket(t, b.ossClient, bucketName)
|
|
defer deleteOSSBucket(t, b.ossClient, bucketName)
|
|
createTablestoreTable(t, b.otsClient, tableName)
|
|
defer deleteTablestoreTable(t, b.otsClient, tableName)
|
|
|
|
s, err := b.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
client := s.(*remote.State).Client.(*RemoteClient)
|
|
|
|
sum := md5.Sum([]byte("test"))
|
|
|
|
if err := client.putMD5(sum[:]); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
getSum, err := client.getMD5()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if !bytes.Equal(getSum, sum[:]) {
|
|
t.Fatalf("getMD5 returned the wrong checksum: expected %x, got %x", sum[:], getSum)
|
|
}
|
|
|
|
if err := client.deleteMD5(); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if getSum, err := client.getMD5(); err == nil {
|
|
t.Fatalf("expected getMD5 error, got none. checksum: %x", getSum)
|
|
}
|
|
}
|
|
|
|
// verify that a client won't return a state with an incorrect checksum.
|
|
func TestRemoteClient_stateChecksum(t *testing.T) {
|
|
testACC(t)
|
|
|
|
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
|
|
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
|
|
path := "testState"
|
|
|
|
b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"bucket": bucketName,
|
|
"prefix": path,
|
|
"tablestore_table": tableName,
|
|
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
|
})).(*Backend)
|
|
|
|
createOSSBucket(t, b1.ossClient, bucketName)
|
|
defer deleteOSSBucket(t, b1.ossClient, bucketName)
|
|
createTablestoreTable(t, b1.otsClient, tableName)
|
|
defer deleteTablestoreTable(t, b1.otsClient, tableName)
|
|
|
|
s1, err := b1.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
client1 := s1.(*remote.State).Client
|
|
|
|
// create an old and new state version to persist
|
|
s := statemgr.TestFullInitialState()
|
|
sf := &statefile.File{State: s}
|
|
var oldState bytes.Buffer
|
|
if err := statefile.Write(sf, &oldState, encryption.StateEncryptionDisabled()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
sf.Serial++
|
|
var newState bytes.Buffer
|
|
if err := statefile.Write(sf, &newState, encryption.StateEncryptionDisabled()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Use b2 without a tablestore_table to bypass the lock table to write the state directly.
|
|
// client2 will write the "incorrect" state, simulating oss eventually consistency delays
|
|
b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
|
"bucket": bucketName,
|
|
"prefix": path,
|
|
})).(*Backend)
|
|
s2, err := b2.StateMgr(t.Context(), backend.DefaultStateName)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
client2 := s2.(*remote.State).Client
|
|
|
|
// write the new state through client2 so that there is no checksum yet
|
|
if err := client2.Put(t.Context(), newState.Bytes()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// verify that we can pull a state without a checksum
|
|
if _, err := client1.Get(t.Context()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// write the new state back with its checksum
|
|
if err := client1.Put(t.Context(), newState.Bytes()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// put an empty state in place to check for panics during get
|
|
if err := client2.Put(t.Context(), []byte{}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// remove the timeouts so we can fail immediately
|
|
origTimeout := consistencyRetryTimeout
|
|
origInterval := consistencyRetryPollInterval
|
|
defer func() {
|
|
consistencyRetryTimeout = origTimeout
|
|
consistencyRetryPollInterval = origInterval
|
|
}()
|
|
consistencyRetryTimeout = 0
|
|
consistencyRetryPollInterval = 0
|
|
|
|
// fetching an empty state through client1 should now error out due to a
|
|
// mismatched checksum.
|
|
if _, err := client1.Get(t.Context()); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) {
|
|
t.Fatalf("expected state checksum error: got %s", err)
|
|
}
|
|
|
|
// put the old state in place of the new, without updating the checksum
|
|
if err := client2.Put(t.Context(), oldState.Bytes()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// fetching the wrong state through client1 should now error out due to a
|
|
// mismatched checksum.
|
|
if _, err := client1.Get(t.Context()); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) {
|
|
t.Fatalf("expected state checksum error: got %s", err)
|
|
}
|
|
|
|
// update the state with the correct one after we Get again
|
|
testChecksumHook = func() {
|
|
if err := client2.Put(t.Context(), newState.Bytes()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
testChecksumHook = nil
|
|
}
|
|
|
|
consistencyRetryTimeout = origTimeout
|
|
|
|
// this final Get will fail to fail the checksum verification, the above
|
|
// callback will update the state with the correct version, and Get should
|
|
// retry automatically.
|
|
if _, err := client1.Get(t.Context()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
// Tests the IsLockingEnabled method for the OSS remote client.
|
|
// It checks if locking is enabled based on the otsTable field.
|
|
func TestRemoteClient_IsLockingEnabled(t *testing.T) {
|
|
tests := []struct {
|
|
name string
|
|
otsTable string
|
|
wantResult bool
|
|
}{
|
|
{
|
|
name: "Locking enabled when otsTable is set",
|
|
otsTable: "my-lock-table",
|
|
wantResult: true,
|
|
},
|
|
{
|
|
name: "Locking disabled when otsTable is empty",
|
|
otsTable: "",
|
|
wantResult: false,
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
client := &RemoteClient{
|
|
otsTable: tt.otsTable,
|
|
}
|
|
|
|
gotResult := client.IsLockingEnabled()
|
|
if gotResult != tt.wantResult {
|
|
t.Errorf("IsLockingEnabled() = %v; want %v", gotResult, tt.wantResult)
|
|
}
|
|
})
|
|
}
|
|
}
|