Files
opentf/internal/backend/remote-state/kubernetes/backend_test.go
Martin Atkins 67a5cd0911 statemgr+remote: context.Context parameters
This extends statemgr.Persistent, statemgr.Locker and remote.Client to
all expect context.Context parameters, and then updates all of the existing
implementations of those interfaces to support them.

All of the calls to statemgr.Persistent and statemgr.Locker methods outside
of tests are consistently context.TODO() for now, because the caller
landscape of these interfaces has some complications:

1. statemgr.Locker is also used by the clistate package for its state
   implementation that was derived from statemgr.Filesystem's predecessor,
   even though what clistate manages is not actually "state" in the sense
   of package statemgr. The callers of that are not yet ready to provide
   real contexts.

   In a future commit we'll either need to plumb context through to all of
   the clistate callers, or continue the effort to separate statemgr from
   clistate by introducing a clistate-specific "locker" API for it
   to use instead.

2. We call statemgr.Persistent and statemgr.Locker methods in situations
   where the active context might have already been cancelled, and so we'll
   need to make sure to ignore cancellation when calling those.

   This is mainly limited to PersistState and Unlock, since both need to
   be able to complete after a cancellation, but there are various
   codepaths that perform a Lock, Refresh, Persist, Unlock sequence and so
   it isn't yet clear where is the best place to enforce the invariant that
   Persist and Unlock must not be called with a cancelable context. We'll
   deal with that more in subsequent commits.

Within the various state manager and remote client implementations the
contexts _are_ wired together as best as possible with how these subsystems
are already laid out, and so once we deal with the problems above and make
callers provide suitable contexts they should be able to reach all of the
leaf API clients that might want to generate OpenTelemetry traces.

Signed-off-by: Martin Atkins <mart@degeneration.co.uk>
2025-07-10 08:11:39 -07:00

203 lines
4.3 KiB
Go

// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package kubernetes
import (
"context"
"fmt"
"math/rand"
"os"
"sync"
"testing"
"time"
"github.com/opentofu/opentofu/internal/backend"
"github.com/opentofu/opentofu/internal/encryption"
"github.com/opentofu/opentofu/internal/states/statemgr"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
secretSuffix = "test-state"
)
var namespace string
// verify that we are doing ACC tests or the k8s tests specifically
func testACC(t *testing.T) {
skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_K8S_TEST") == ""
if skip {
t.Log("k8s backend tests require setting TF_ACC or TF_K8S_TEST")
t.Skip()
}
ns := os.Getenv("KUBE_NAMESPACE")
if ns != "" {
namespace = ns
} else {
namespace = "default"
}
cleanupK8sResources(t)
}
func TestBackend_impl(t *testing.T) {
var _ backend.Backend = new(Backend)
}
func TestBackend(t *testing.T) {
testACC(t)
defer cleanupK8sResources(t)
b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
"secret_suffix": secretSuffix,
}))
// Test
backend.TestBackendStates(t, b1)
}
func TestBackendLocks(t *testing.T) {
testACC(t)
defer cleanupK8sResources(t)
// Get the backend. We need two to test locking.
b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
"secret_suffix": secretSuffix,
}))
b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
"secret_suffix": secretSuffix,
}))
// Test
backend.TestBackendStateLocks(t, b1, b2)
backend.TestBackendStateForceUnlock(t, b1, b2)
}
func TestBackendLocksSoak(t *testing.T) {
testACC(t)
defer cleanupK8sResources(t)
clientCount := 100
lockAttempts := 100
lockers := []statemgr.Locker{}
for i := 0; i < clientCount; i++ {
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
"secret_suffix": secretSuffix,
}))
s, err := b.StateMgr(t.Context(), backend.DefaultStateName)
if err != nil {
t.Fatalf("Error creating state manager: %v", err)
}
lockers = append(lockers, s.(statemgr.Locker))
}
wg := sync.WaitGroup{}
for i, l := range lockers {
wg.Add(1)
go func(locker statemgr.Locker, n int) {
defer wg.Done()
li := statemgr.NewLockInfo()
li.Operation = "test"
li.Who = fmt.Sprintf("client-%v", n)
for i := 0; i < lockAttempts; i++ {
id, err := locker.Lock(t.Context(), li)
if err != nil {
continue
}
// hold onto the lock for a little bit
time.Sleep(time.Duration(rand.Intn(10)) * time.Microsecond)
err = locker.Unlock(t.Context(), id)
if err != nil {
t.Errorf("failed to unlock: %v", err)
}
}
}(l, i)
}
wg.Wait()
}
func cleanupK8sResources(t *testing.T) {
ctx := context.Background()
// Get a backend to use the k8s client
b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
"secret_suffix": secretSuffix,
}))
b := b1.(*Backend)
sClient, err := b.getKubernetesSecretClient()
if err != nil {
t.Fatal(err)
}
// Delete secrets
opts := metav1.ListOptions{LabelSelector: tfstateKey + "=true"}
secrets, err := sClient.List(ctx, opts)
if err != nil {
t.Fatal(err)
}
delProp := metav1.DeletePropagationBackground
delOps := metav1.DeleteOptions{PropagationPolicy: &delProp}
var errs []error
for _, secret := range secrets.Items {
labels := secret.GetLabels()
key, ok := labels[tfstateSecretSuffixKey]
if !ok {
continue
}
if key == secretSuffix {
err = sClient.Delete(ctx, secret.GetName(), delOps)
if err != nil {
errs = append(errs, err)
}
}
}
leaseClient, err := b.getKubernetesLeaseClient()
if err != nil {
t.Fatal(err)
}
// Delete leases
leases, err := leaseClient.List(ctx, opts)
if err != nil {
t.Fatal(err)
}
for _, lease := range leases.Items {
labels := lease.GetLabels()
key, ok := labels[tfstateSecretSuffixKey]
if !ok {
continue
}
if key == secretSuffix {
err = leaseClient.Delete(ctx, lease.GetName(), delOps)
if err != nil {
errs = append(errs, err)
}
}
}
if len(errs) > 0 {
t.Fatal(errs)
}
}