Files
opentf/internal/backend/remote-state/pg/client_test.go
Martin Atkins 67a5cd0911 statemgr+remote: context.Context parameters
This extends statemgr.Persistent, statemgr.Locker and remote.Client to
all expect context.Context parameters, and then updates all of the existing
implementations of those interfaces to support them.

All of the calls to statemgr.Persistent and statemgr.Locker methods outside
of tests are consistently context.TODO() for now, because the caller
landscape of these interfaces has some complications:

1. statemgr.Locker is also used by the clistate package for its state
   implementation that was derived from statemgr.Filesystem's predecessor,
   even though what clistate manages is not actually "state" in the sense
   of package statemgr. The callers of that are not yet ready to provide
   real contexts.

   In a future commit we'll either need to plumb context through to all of
   the clistate callers, or continue the effort to separate statemgr from
   clistate by introducing a clistate-specific "locker" API for it
   to use instead.

2. We call statemgr.Persistent and statemgr.Locker methods in situations
   where the active context might have already been cancelled, and so we'll
   need to make sure to ignore cancellation when calling those.

   This is mainly limited to PersistState and Unlock, since both need to
   be able to complete after a cancellation, but there are various
   codepaths that perform a Lock, Refresh, Persist, Unlock sequence and so
   it isn't yet clear where is the best place to enforce the invariant that
   Persist and Unlock must not be called with a cancelable context. We'll
   deal with that more in subsequent commits.

Within the various state manager and remote client implementations the
contexts _are_ wired together as best as possible with how these subsystems
are already laid out, and so once we deal with the problems above and make
callers provide suitable contexts they should be able to reach all of the
leaf API clients that might want to generate OpenTelemetry traces.

Signed-off-by: Martin Atkins <mart@degeneration.co.uk>
2025-07-10 08:11:39 -07:00

298 lines
9.9 KiB
Go

// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package pg
// Create the test database: createdb terraform_backend_pg_test
// TF_ACC=1 GO111MODULE=on go test -v -mod=vendor -timeout=2m -parallel=4 github.com/opentofu/opentofu/backend/remote-state/pg
import (
"database/sql"
"fmt"
"testing"
"time"
"github.com/opentofu/opentofu/internal/backend"
"github.com/opentofu/opentofu/internal/encryption"
"github.com/opentofu/opentofu/internal/states/remote"
"github.com/opentofu/opentofu/internal/states/statemgr"
)
func TestRemoteClient_impl(t *testing.T) {
var _ remote.Client = new(RemoteClient)
var _ remote.ClientLocker = new(RemoteClient)
}
func TestRemoteClient(t *testing.T) {
testACC(t)
connStr := getDatabaseUrl()
schemaName := fmt.Sprintf("terraform_%s", t.Name())
tableName := fmt.Sprintf("terraform_%s", t.Name())
indexName := fmt.Sprintf("terraform_%s", t.Name())
dbCleaner, err := sql.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
}
defer dropSchema(t, dbCleaner, schemaName)
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": schemaName,
"table_name": tableName,
"index_name": indexName,
})
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend)
if b == nil {
t.Fatal("Backend could not be configured")
}
s, err := b.StateMgr(t.Context(), backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
remote.TestClient(t, s.(*remote.State).Client)
}
func TestRemoteLocks(t *testing.T) {
testACC(t)
connStr := getDatabaseUrl()
schemaName := fmt.Sprintf("terraform_%s", t.Name())
tableName := fmt.Sprintf("terraform_%s", t.Name())
indexName := fmt.Sprintf("terraform_%s", t.Name())
dbCleaner, err := sql.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
}
defer dropSchema(t, dbCleaner, schemaName)
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": schemaName,
"table_name": tableName,
"index_name": indexName,
})
b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend)
s1, err := b1.StateMgr(t.Context(), backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend)
s2, err := b2.StateMgr(t.Context(), backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
}
// TestConcurrentCreationLocksInDifferentSchemas tests whether backends with different schemas
// affect each other while taking global workspace creation locks.
func TestConcurrentCreationLocksInDifferentSchemas(t *testing.T) {
testACC(t)
connStr := getDatabaseUrl()
dbCleaner, err := sql.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
}
firstSchema := fmt.Sprintf("terraform_%s_1", t.Name())
firstTable := fmt.Sprintf("terraform_%s_1", t.Name())
firstIndex := fmt.Sprintf("terraform_%s_1", t.Name())
secondSchema := fmt.Sprintf("terraform_%s_2", t.Name())
secondTable := fmt.Sprintf("terraform_%s_2", t.Name())
secondIndex := fmt.Sprintf("terraform_%s_2", t.Name())
defer dropSchema(t, dbCleaner, firstSchema)
defer dropSchema(t, dbCleaner, secondSchema)
firstConfig := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": firstSchema,
"table_name": firstTable,
"index_name": firstIndex,
})
secondConfig := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": secondSchema,
"table_name": secondTable,
"index_name": secondIndex,
})
//nolint:errcheck // this is a test, I am fine with panic here
firstBackend := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), firstConfig).(*Backend)
//nolint:errcheck // this is a test, I am fine with panic here
secondBackend := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), secondConfig).(*Backend)
//nolint:errcheck // this is a test, I am fine with panic here
thirdBackend := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), secondConfig).(*Backend)
// We operate on remote clients instead of state managers to simulate the
// first call to backend.StateMgr(), which creates an empty state in default
// workspace.
firstClient := &RemoteClient{
Client: firstBackend.db,
Name: backend.DefaultStateName,
SchemaName: firstBackend.schemaName,
TableName: firstBackend.tableName,
IndexName: firstBackend.indexName,
}
secondClient := &RemoteClient{
Client: secondBackend.db,
Name: backend.DefaultStateName,
SchemaName: secondBackend.schemaName,
TableName: secondBackend.tableName,
IndexName: secondBackend.indexName,
}
thirdClient := &RemoteClient{
Client: thirdBackend.db,
Name: backend.DefaultStateName,
SchemaName: thirdBackend.schemaName,
TableName: thirdBackend.tableName,
IndexName: thirdBackend.indexName,
}
// It doesn't matter what lock info to supply for workspace creation.
lock := &statemgr.LockInfo{
ID: "1",
Operation: "test",
Info: "This needs to lock for workspace creation",
Who: "me",
Version: "1",
Created: time.Date(1999, 8, 19, 0, 0, 0, 0, time.UTC),
}
// Those calls with empty database must think they are locking
// for workspace creation, both of them must succeed since they
// are operating on different schemas.
if _, err = firstClient.Lock(t.Context(), lock); err != nil {
t.Fatal(err)
}
if _, err = secondClient.Lock(t.Context(), lock); err != nil {
t.Fatal(err)
}
// This call must fail since we are trying to acquire the same
// lock as the first client. We need to make this call from a
// separate session, since advisory locks are okay to be re-acquired
// during the same session.
if _, err = thirdClient.Lock(t.Context(), lock); err == nil {
t.Fatal("Expected an error to be thrown on a second lock attempt")
} else if lockErr := err.(*statemgr.LockError); lockErr.Info != lock && //nolint:errcheck,errorlint // this is a test, I am fine with panic here
lockErr.Err.Error() != "Already locked for workspace creation: default" {
t.Fatalf("Unexpected error thrown on a second lock attempt: %v", err)
}
}
// TestConcurrentCreationLocksInDifferentTables tests whether backends with different tables
// affect each other while taking global workspace creation locks.
func TestConcurrentCreationLocksInDifferentTables(t *testing.T) {
testACC(t)
connStr := getDatabaseUrl()
dbCleaner, err := sql.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
}
schema := fmt.Sprintf("terraform_%s", t.Name())
firstTable := fmt.Sprintf("terraform_%s_1", t.Name())
firstIndex := fmt.Sprintf("terraform_%s_1", t.Name())
secondTable := fmt.Sprintf("terraform_%s_2", t.Name())
secondIndex := fmt.Sprintf("terraform_%s_2", t.Name())
defer dropSchema(t, dbCleaner, schema)
firstConfig := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": schema,
"table_name": firstTable,
"index_name": firstIndex,
})
secondConfig := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": schema,
"table_name": secondTable,
"index_name": secondIndex,
})
//nolint:errcheck // this is a test, I am fine with panic here
firstBackend := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), firstConfig).(*Backend)
//nolint:errcheck // this is a test, I am fine with panic here
secondBackend := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), secondConfig).(*Backend)
//nolint:errcheck // this is a test, I am fine with panic here
thirdBackend := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), secondConfig).(*Backend)
// We operate on remote clients instead of state managers to simulate the
// first call to backend.StateMgr(), which creates an empty state in default
// workspace.
firstClient := &RemoteClient{
Client: firstBackend.db,
Name: backend.DefaultStateName,
SchemaName: firstBackend.schemaName,
TableName: firstBackend.tableName,
IndexName: firstBackend.indexName,
}
secondClient := &RemoteClient{
Client: secondBackend.db,
Name: backend.DefaultStateName,
SchemaName: secondBackend.schemaName,
TableName: secondBackend.tableName,
IndexName: secondBackend.indexName,
}
thirdClient := &RemoteClient{
Client: thirdBackend.db,
Name: backend.DefaultStateName,
SchemaName: thirdBackend.schemaName,
TableName: thirdBackend.tableName,
IndexName: thirdBackend.indexName,
}
// It doesn't matter what lock info to supply for workspace creation.
lock := &statemgr.LockInfo{
ID: "1",
Operation: "test",
Info: "This needs to lock for workspace creation",
Who: "me",
Version: "1",
Created: time.Date(1999, 8, 19, 0, 0, 0, 0, time.UTC),
}
// Those calls with empty database must think they are locking
// for workspace creation, both of them must succeed since they
// are operating on different schemas.
if _, err = firstClient.Lock(t.Context(), lock); err != nil {
t.Fatal(err)
}
if _, err = secondClient.Lock(t.Context(), lock); err != nil {
t.Fatal(err)
}
// This call must fail since we are trying to acquire the same
// lock as the first client. We need to make this call from a
// separate session, since advisory locks are okay to be re-acquired
// during the same session.
if _, err = thirdClient.Lock(t.Context(), lock); err == nil {
t.Fatal("Expected an error to be thrown on a second lock attempt")
} else if lockErr := err.(*statemgr.LockError); lockErr.Info != lock && //nolint:errcheck // this is a test, I am fine with panic here
lockErr.Err.Error() != "Already locked for workspace creation: default" {
t.Fatalf("Unexpected error thrown on a second lock attempt: %v", err)
}
}