mirror of
https://github.com/opentffoundation/opentf.git
synced 2026-05-13 16:00:47 -04:00
tofu: Plumb context.Context through the resource handling functions
The graph node types related to resources and resource instances use a bunch of helper functions in different combinations, rather than calling directly into the provider API. This commit plumbs context.Context through to the functions that _do_ eventually call methods directly on the provider object, leaving us just one more step away from plumbing the context through to the actual gRPC calls. The next step (in a future commit) will be to update the providers.Interface methods to take context.Context arguments and then have the gRPC-based implementations of that interface pass the context through to the gRPC client stub calls, and then we should be pretty close to being able to turn on OTel tracing instrumentation for our gRPC client requests. Signed-off-by: Martin Atkins <mart@degeneration.co.uk>
This commit is contained in:
@@ -130,18 +130,18 @@ func resolveProviderInstance(keyExpr hcl.Expression, keyScope *lang.Scope, sourc
|
||||
}
|
||||
|
||||
// getProvider returns the providers.Interface and schema for a given provider.
|
||||
func getProvider(ctx EvalContext, addr addrs.AbsProviderConfig, providerKey addrs.InstanceKey) (providers.Interface, providers.ProviderSchema, error) {
|
||||
func getProvider(ctx context.Context, evalCtx EvalContext, addr addrs.AbsProviderConfig, providerKey addrs.InstanceKey) (providers.Interface, providers.ProviderSchema, error) {
|
||||
if addr.Provider.Type == "" {
|
||||
// Should never happen
|
||||
panic("GetProvider used with uninitialized provider configuration address")
|
||||
}
|
||||
provider := ctx.Provider(addr, providerKey)
|
||||
provider := evalCtx.Provider(addr, providerKey)
|
||||
if provider == nil {
|
||||
return nil, providers.ProviderSchema{}, fmt.Errorf("provider %s not initialized", addr.InstanceString(providerKey))
|
||||
}
|
||||
// Not all callers require a schema, so we will leave checking for a nil
|
||||
// schema to the callers.
|
||||
schema, err := ctx.ProviderSchema(context.TODO(), addr)
|
||||
schema, err := evalCtx.ProviderSchema(ctx, addr)
|
||||
if err != nil {
|
||||
return nil, providers.ProviderSchema{}, fmt.Errorf("failed to read schema for provider %s: %w", addr, err)
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func (n *NodeApplyableProvider) Execute(ctx context.Context, evalCtx EvalContext
|
||||
|
||||
return diags
|
||||
}
|
||||
func (n *NodeApplyableProvider) initInstances(_ context.Context, evalCtx EvalContext, op walkOperation) (map[addrs.InstanceKey]providers.Interface, tfdiags.Diagnostics) {
|
||||
func (n *NodeApplyableProvider) initInstances(ctx context.Context, evalCtx EvalContext, op walkOperation) (map[addrs.InstanceKey]providers.Interface, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
var initKeys []addrs.InstanceKey
|
||||
@@ -99,7 +99,7 @@ func (n *NodeApplyableProvider) initInstances(_ context.Context, evalCtx EvalCon
|
||||
|
||||
instances := make(map[addrs.InstanceKey]providers.Interface)
|
||||
for configKey, initKey := range instanceKeys {
|
||||
provider, _, err := getProvider(evalCtx, n.Addr, initKey)
|
||||
provider, _, err := getProvider(ctx, evalCtx, n.Addr, initKey)
|
||||
diags = diags.Append(err)
|
||||
instances[configKey] = provider
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
package tofu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
@@ -540,9 +541,9 @@ func isResourceMovedToDifferentType(newAddr, oldAddr addrs.AbsResourceInstance)
|
||||
|
||||
// readResourceInstanceState reads the current object for a specific instance in
|
||||
// the state.
|
||||
func (n *NodeAbstractResourceInstance) readResourceInstanceState(evalCtx EvalContext, addr addrs.AbsResourceInstance) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
func (n *NodeAbstractResourceInstance) readResourceInstanceState(ctx context.Context, evalCtx EvalContext, addr addrs.AbsResourceInstance) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
provider, providerSchema, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
provider, providerSchema, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
if err != nil {
|
||||
return nil, diags.Append(err)
|
||||
}
|
||||
@@ -595,9 +596,9 @@ func (n *NodeAbstractResourceInstance) readResourceInstanceState(evalCtx EvalCon
|
||||
|
||||
// readResourceInstanceStateDeposed reads the deposed object for a specific
|
||||
// instance in the state.
|
||||
func (n *NodeAbstractResourceInstance) readResourceInstanceStateDeposed(evalCtx EvalContext, addr addrs.AbsResourceInstance, key states.DeposedKey) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
func (n *NodeAbstractResourceInstance) readResourceInstanceStateDeposed(ctx context.Context, evalCtx EvalContext, addr addrs.AbsResourceInstance, key states.DeposedKey) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
provider, providerSchema, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
provider, providerSchema, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
if err != nil {
|
||||
diags = diags.Append(err)
|
||||
return nil, diags
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
package tofu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
@@ -458,26 +459,26 @@ const (
|
||||
//
|
||||
// targetState determines which context state we're writing to during plan. The
|
||||
// default is the global working state.
|
||||
func (n *NodeAbstractResourceInstance) writeResourceInstanceState(evalCtx EvalContext, obj *states.ResourceInstanceObject, targetState phaseState) error {
|
||||
return n.writeResourceInstanceStateImpl(evalCtx, states.NotDeposed, obj, targetState)
|
||||
func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx context.Context, evalCtx EvalContext, obj *states.ResourceInstanceObject, targetState phaseState) error {
|
||||
return n.writeResourceInstanceStateImpl(ctx, evalCtx, states.NotDeposed, obj, targetState)
|
||||
}
|
||||
|
||||
func (n *NodeAbstractResourceInstance) writeResourceInstanceStateDeposed(evalCtx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error {
|
||||
func (n *NodeAbstractResourceInstance) writeResourceInstanceStateDeposed(ctx context.Context, evalCtx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error {
|
||||
if deposedKey == states.NotDeposed {
|
||||
// Bail out to avoid silently doing something other than what the
|
||||
// caller seems to have intended.
|
||||
panic("trying to write current state object using writeResourceInstanceStateDeposed")
|
||||
}
|
||||
return n.writeResourceInstanceStateImpl(evalCtx, deposedKey, obj, targetState)
|
||||
return n.writeResourceInstanceStateImpl(ctx, evalCtx, deposedKey, obj, targetState)
|
||||
}
|
||||
|
||||
// (this is the private common body of both writeResourceInstanceState and
|
||||
// writeResourceInstanceStateDeposed. Don't call it directly; instead, use
|
||||
// one of the two wrappers to be explicit about which of the instance's
|
||||
// objects you are intending to write.
|
||||
func (n *NodeAbstractResourceInstance) writeResourceInstanceStateImpl(evalCtx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error {
|
||||
func (n *NodeAbstractResourceInstance) writeResourceInstanceStateImpl(ctx context.Context, evalCtx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error {
|
||||
absAddr := n.Addr
|
||||
_, providerSchema, err := n.getProvider(evalCtx)
|
||||
_, providerSchema, err := n.getProvider(ctx, evalCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -550,7 +551,7 @@ func (n *NodeAbstractResourceInstance) writeResourceInstanceStateImpl(evalCtx Ev
|
||||
}
|
||||
|
||||
// planForget returns a removed from state diff.
|
||||
func (n *NodeAbstractResourceInstance) planForget(evalCtx EvalContext, currentState *states.ResourceInstanceObject, deposedKey states.DeposedKey) *plans.ResourceInstanceChange {
|
||||
func (n *NodeAbstractResourceInstance) planForget(_ context.Context, evalCtx EvalContext, currentState *states.ResourceInstanceObject, deposedKey states.DeposedKey) *plans.ResourceInstanceChange {
|
||||
var plan *plans.ResourceInstanceChange
|
||||
|
||||
unmarkedPriorVal, _ := currentState.Value.UnmarkDeep()
|
||||
@@ -575,7 +576,7 @@ func (n *NodeAbstractResourceInstance) planForget(evalCtx EvalContext, currentSt
|
||||
}
|
||||
|
||||
// planDestroy returns a plain destroy diff.
|
||||
func (n *NodeAbstractResourceInstance) planDestroy(evalCtx EvalContext, currentState *states.ResourceInstanceObject, deposedKey states.DeposedKey) (*plans.ResourceInstanceChange, tfdiags.Diagnostics) {
|
||||
func (n *NodeAbstractResourceInstance) planDestroy(ctx context.Context, evalCtx EvalContext, currentState *states.ResourceInstanceObject, deposedKey states.DeposedKey) (*plans.ResourceInstanceChange, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
var plan *plans.ResourceInstanceChange
|
||||
|
||||
@@ -616,12 +617,12 @@ func (n *NodeAbstractResourceInstance) planDestroy(evalCtx EvalContext, currentS
|
||||
// operation.
|
||||
nullVal := cty.NullVal(unmarkedPriorVal.Type())
|
||||
|
||||
provider, _, err := n.getProvider(evalCtx)
|
||||
provider, _, err := n.getProvider(ctx, evalCtx)
|
||||
if err != nil {
|
||||
return plan, diags.Append(err)
|
||||
}
|
||||
|
||||
metaConfigVal, metaDiags := n.providerMetas(evalCtx)
|
||||
metaConfigVal, metaDiags := n.providerMetas(ctx, evalCtx)
|
||||
diags = diags.Append(metaDiags)
|
||||
if diags.HasErrors() {
|
||||
return plan, diags
|
||||
@@ -681,7 +682,7 @@ func (n *NodeAbstractResourceInstance) planDestroy(evalCtx EvalContext, currentS
|
||||
|
||||
// writeChange saves a planned change for an instance object into the set of
|
||||
// global planned changes.
|
||||
func (n *NodeAbstractResourceInstance) writeChange(evalCtx EvalContext, change *plans.ResourceInstanceChange, deposedKey states.DeposedKey) error {
|
||||
func (n *NodeAbstractResourceInstance) writeChange(ctx context.Context, evalCtx EvalContext, change *plans.ResourceInstanceChange, deposedKey states.DeposedKey) error {
|
||||
changes := evalCtx.Changes()
|
||||
|
||||
if change == nil {
|
||||
@@ -695,7 +696,7 @@ func (n *NodeAbstractResourceInstance) writeChange(evalCtx EvalContext, change *
|
||||
return nil
|
||||
}
|
||||
|
||||
_, providerSchema, err := n.getProvider(evalCtx)
|
||||
_, providerSchema, err := n.getProvider(ctx, evalCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -738,7 +739,7 @@ func (n *NodeAbstractResourceInstance) writeChange(evalCtx EvalContext, change *
|
||||
}
|
||||
|
||||
// refresh does a refresh for a resource
|
||||
func (n *NodeAbstractResourceInstance) refresh(evalCtx EvalContext, deposedKey states.DeposedKey, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
func (n *NodeAbstractResourceInstance) refresh(ctx context.Context, evalCtx EvalContext, deposedKey states.DeposedKey, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
absAddr := n.Addr
|
||||
if deposedKey == states.NotDeposed {
|
||||
@@ -746,7 +747,7 @@ func (n *NodeAbstractResourceInstance) refresh(evalCtx EvalContext, deposedKey s
|
||||
} else {
|
||||
log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s (deposed object %s)", absAddr, deposedKey)
|
||||
}
|
||||
provider, providerSchema, err := n.getProvider(evalCtx)
|
||||
provider, providerSchema, err := n.getProvider(ctx, evalCtx)
|
||||
if err != nil {
|
||||
return state, diags.Append(err)
|
||||
}
|
||||
@@ -763,7 +764,7 @@ func (n *NodeAbstractResourceInstance) refresh(evalCtx EvalContext, deposedKey s
|
||||
return state, diags
|
||||
}
|
||||
|
||||
metaConfigVal, metaDiags := n.providerMetas(evalCtx)
|
||||
metaConfigVal, metaDiags := n.providerMetas(ctx, evalCtx)
|
||||
diags = diags.Append(metaDiags)
|
||||
if diags.HasErrors() {
|
||||
return state, diags
|
||||
@@ -876,6 +877,7 @@ func (n *NodeAbstractResourceInstance) refresh(evalCtx EvalContext, deposedKey s
|
||||
}
|
||||
|
||||
func (n *NodeAbstractResourceInstance) plan(
|
||||
ctx context.Context,
|
||||
evalCtx EvalContext,
|
||||
plannedChange *plans.ResourceInstanceChange,
|
||||
currentState *states.ResourceInstanceObject,
|
||||
@@ -886,7 +888,7 @@ func (n *NodeAbstractResourceInstance) plan(
|
||||
var keyData instances.RepetitionData
|
||||
|
||||
resource := n.Addr.Resource.Resource
|
||||
provider, providerSchema, err := n.getProvider(evalCtx)
|
||||
provider, providerSchema, err := n.getProvider(ctx, evalCtx)
|
||||
if err != nil {
|
||||
return nil, nil, keyData, diags.Append(err)
|
||||
}
|
||||
@@ -953,7 +955,7 @@ func (n *NodeAbstractResourceInstance) plan(
|
||||
return nil, nil, keyData, diags
|
||||
}
|
||||
|
||||
metaConfigVal, metaDiags := n.providerMetas(evalCtx)
|
||||
metaConfigVal, metaDiags := n.providerMetas(ctx, evalCtx)
|
||||
diags = diags.Append(metaDiags)
|
||||
if diags.HasErrors() {
|
||||
return nil, nil, keyData, diags
|
||||
@@ -1640,13 +1642,13 @@ type ProviderWithEncryption interface {
|
||||
// readDataSource handles everything needed to call ReadDataSource on the provider.
|
||||
// A previously evaluated configVal can be passed in, or a new one is generated
|
||||
// from the resource configuration.
|
||||
func (n *NodeAbstractResourceInstance) readDataSource(evalCtx EvalContext, configVal cty.Value) (cty.Value, tfdiags.Diagnostics) {
|
||||
func (n *NodeAbstractResourceInstance) readDataSource(ctx context.Context, evalCtx EvalContext, configVal cty.Value) (cty.Value, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
var newVal cty.Value
|
||||
|
||||
config := *n.Config
|
||||
|
||||
provider, providerSchema, err := n.getProvider(evalCtx)
|
||||
provider, providerSchema, err := n.getProvider(ctx, evalCtx)
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
return newVal, diags
|
||||
@@ -1658,7 +1660,7 @@ func (n *NodeAbstractResourceInstance) readDataSource(evalCtx EvalContext, confi
|
||||
return newVal, diags
|
||||
}
|
||||
|
||||
metaConfigVal, metaDiags := n.providerMetas(evalCtx)
|
||||
metaConfigVal, metaDiags := n.providerMetas(ctx, evalCtx)
|
||||
diags = diags.Append(metaDiags)
|
||||
if diags.HasErrors() {
|
||||
return newVal, diags
|
||||
@@ -1769,11 +1771,11 @@ func (n *NodeAbstractResourceInstance) readDataSource(evalCtx EvalContext, confi
|
||||
return newVal, diags
|
||||
}
|
||||
|
||||
func (n *NodeAbstractResourceInstance) providerMetas(evalCtx EvalContext) (cty.Value, tfdiags.Diagnostics) {
|
||||
func (n *NodeAbstractResourceInstance) providerMetas(ctx context.Context, evalCtx EvalContext) (cty.Value, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
metaConfigVal := cty.NullVal(cty.DynamicPseudoType)
|
||||
|
||||
_, providerSchema, err := n.getProvider(evalCtx)
|
||||
_, providerSchema, err := n.getProvider(ctx, evalCtx)
|
||||
if err != nil {
|
||||
return metaConfigVal, diags.Append(err)
|
||||
}
|
||||
@@ -1806,12 +1808,12 @@ func (n *NodeAbstractResourceInstance) providerMetas(evalCtx EvalContext) (cty.V
|
||||
// value, but it still matches the previous state, then we can record a NoNop
|
||||
// change. If the states don't match then we record a Read change so that the
|
||||
// new value is applied to the state.
|
||||
func (n *NodeAbstractResourceInstance) planDataSource(evalCtx EvalContext, checkRuleSeverity tfdiags.Severity, skipPlanChanges bool) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) {
|
||||
func (n *NodeAbstractResourceInstance) planDataSource(ctx context.Context, evalCtx EvalContext, checkRuleSeverity tfdiags.Severity, skipPlanChanges bool) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
var keyData instances.RepetitionData
|
||||
var configVal cty.Value
|
||||
|
||||
_, providerSchema, err := n.getProvider(evalCtx)
|
||||
_, providerSchema, err := n.getProvider(ctx, evalCtx)
|
||||
if err != nil {
|
||||
return nil, nil, keyData, diags.Append(err)
|
||||
}
|
||||
@@ -1937,7 +1939,7 @@ func (n *NodeAbstractResourceInstance) planDataSource(evalCtx EvalContext, check
|
||||
|
||||
// We have a complete configuration with no dependencies to wait on, so we
|
||||
// can read the data source into the state.
|
||||
newVal, readDiags := n.readDataSource(evalCtx, configVal)
|
||||
newVal, readDiags := n.readDataSource(ctx, evalCtx, configVal)
|
||||
|
||||
// Now we've loaded the data, and diags tells us whether we were successful
|
||||
// or not, we are going to create our plannedChange and our
|
||||
@@ -2080,11 +2082,11 @@ func (n *NodeAbstractResourceInstance) dependenciesHavePendingChanges(evalCtx Ev
|
||||
|
||||
// apply deals with the main part of the data resource lifecycle: either
|
||||
// actually reading from the data source or generating a plan to do so.
|
||||
func (n *NodeAbstractResourceInstance) applyDataSource(evalCtx EvalContext, planned *plans.ResourceInstanceChange) (*states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) {
|
||||
func (n *NodeAbstractResourceInstance) applyDataSource(ctx context.Context, evalCtx EvalContext, planned *plans.ResourceInstanceChange) (*states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
var keyData instances.RepetitionData
|
||||
|
||||
_, providerSchema, err := n.getProvider(evalCtx)
|
||||
_, providerSchema, err := n.getProvider(ctx, evalCtx)
|
||||
if err != nil {
|
||||
return nil, keyData, diags.Append(err)
|
||||
}
|
||||
@@ -2136,7 +2138,7 @@ func (n *NodeAbstractResourceInstance) applyDataSource(evalCtx EvalContext, plan
|
||||
return nil, keyData, diags
|
||||
}
|
||||
|
||||
newVal, readDiags := n.readDataSource(evalCtx, configVal)
|
||||
newVal, readDiags := n.readDataSource(ctx, evalCtx, configVal)
|
||||
if check, nested := n.nestedInCheckBlock(); nested {
|
||||
addr := check.Addr().Absolute(n.Addr.Module)
|
||||
|
||||
@@ -2181,7 +2183,7 @@ func (n *NodeAbstractResourceInstance) applyDataSource(evalCtx EvalContext, plan
|
||||
// evalApplyProvisioners determines if provisioners need to be run, and if so
|
||||
// executes the provisioners for a resource and returns an updated error if
|
||||
// provisioning fails.
|
||||
func (n *NodeAbstractResourceInstance) evalApplyProvisioners(evalCtx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen) tfdiags.Diagnostics {
|
||||
func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx context.Context, evalCtx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen) tfdiags.Diagnostics {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
if state == nil {
|
||||
@@ -2216,7 +2218,7 @@ func (n *NodeAbstractResourceInstance) evalApplyProvisioners(evalCtx EvalContext
|
||||
|
||||
// If there are no errors, then we append it to our output error
|
||||
// if we have one, otherwise we just output it.
|
||||
diags = diags.Append(n.applyProvisioners(evalCtx, state, when, provs))
|
||||
diags = diags.Append(n.applyProvisioners(ctx, evalCtx, state, when, provs))
|
||||
if diags.HasErrors() {
|
||||
log.Printf("[TRACE] evalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", n.Addr)
|
||||
return diags
|
||||
@@ -2261,7 +2263,7 @@ func filterProvisioners(provisioners []*configs.Provisioner, when configs.Provis
|
||||
}
|
||||
|
||||
// applyProvisioners executes the provisioners for a resource.
|
||||
func (n *NodeAbstractResourceInstance) applyProvisioners(evalCtx EvalContext, state *states.ResourceInstanceObject, when configs.ProvisionerWhen, provs []*configs.Provisioner) tfdiags.Diagnostics {
|
||||
func (n *NodeAbstractResourceInstance) applyProvisioners(_ context.Context, evalCtx EvalContext, state *states.ResourceInstanceObject, when configs.ProvisionerWhen, provs []*configs.Provisioner) tfdiags.Diagnostics {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
// this self is only used for destroy provisioner evaluation, and must
|
||||
@@ -2454,12 +2456,14 @@ func (n *NodeAbstractResourceInstance) evalDestroyProvisionerConfig(evalCtx Eval
|
||||
// send a nil config. The keyData information can be empty if the config is
|
||||
// nil, since it is only used to evaluate the configuration.
|
||||
func (n *NodeAbstractResourceInstance) apply(
|
||||
ctx context.Context,
|
||||
evalCtx EvalContext,
|
||||
state *states.ResourceInstanceObject,
|
||||
change *plans.ResourceInstanceChange,
|
||||
applyConfig *configs.Resource,
|
||||
keyData instances.RepetitionData,
|
||||
createBeforeDestroy bool) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
createBeforeDestroy bool,
|
||||
) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
|
||||
var diags tfdiags.Diagnostics
|
||||
if state == nil {
|
||||
@@ -2474,7 +2478,7 @@ func (n *NodeAbstractResourceInstance) apply(
|
||||
return state, diags
|
||||
}
|
||||
|
||||
provider, providerSchema, err := n.getProvider(evalCtx)
|
||||
provider, providerSchema, err := n.getProvider(ctx, evalCtx)
|
||||
if err != nil {
|
||||
return nil, diags.Append(err)
|
||||
}
|
||||
@@ -2522,7 +2526,7 @@ func (n *NodeAbstractResourceInstance) apply(
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
metaConfigVal, metaDiags := n.providerMetas(evalCtx)
|
||||
metaConfigVal, metaDiags := n.providerMetas(ctx, evalCtx)
|
||||
diags = diags.Append(metaDiags)
|
||||
if diags.HasErrors() {
|
||||
return nil, diags
|
||||
@@ -2796,8 +2800,8 @@ func resourceInstancePrevRunAddr(evalCtx EvalContext, currentAddr addrs.AbsResou
|
||||
return table.OldAddr(currentAddr)
|
||||
}
|
||||
|
||||
func (n *NodeAbstractResourceInstance) getProvider(evalCtx EvalContext) (providers.Interface, providers.ProviderSchema, error) {
|
||||
underlyingProvider, schema, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
func (n *NodeAbstractResourceInstance) getProvider(ctx context.Context, evalCtx EvalContext) (providers.Interface, providers.ProviderSchema, error) {
|
||||
underlyingProvider, schema, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
if err != nil {
|
||||
return nil, providers.ProviderSchema{}, err
|
||||
}
|
||||
|
||||
@@ -149,9 +149,9 @@ func TestNodeAbstractResourceInstanceProvider(t *testing.T) {
|
||||
|
||||
func TestNodeAbstractResourceInstance_WriteResourceInstanceState(t *testing.T) {
|
||||
state := states.NewState()
|
||||
ctx := new(MockEvalContext)
|
||||
ctx.StateState = state.SyncWrapper()
|
||||
ctx.PathPath = addrs.RootModuleInstance
|
||||
evalCtx := new(MockEvalContext)
|
||||
evalCtx.StateState = state.SyncWrapper()
|
||||
evalCtx.PathPath = addrs.RootModuleInstance
|
||||
|
||||
mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
@@ -176,10 +176,10 @@ func TestNodeAbstractResourceInstance_WriteResourceInstanceState(t *testing.T) {
|
||||
ResolvedProvider: ResolvedProvider{ProviderConfig: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`)},
|
||||
},
|
||||
}
|
||||
ctx.ProviderProvider = mockProvider
|
||||
ctx.ProviderSchemaSchema = mockProvider.GetProviderSchema()
|
||||
evalCtx.ProviderProvider = mockProvider
|
||||
evalCtx.ProviderSchemaSchema = mockProvider.GetProviderSchema()
|
||||
|
||||
err := node.writeResourceInstanceState(ctx, obj, workingState)
|
||||
err := node.writeResourceInstanceState(t.Context(), evalCtx, obj, workingState)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err.Error())
|
||||
}
|
||||
|
||||
@@ -7,9 +7,10 @@ package tofu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/opentofu/opentofu/internal/refactoring"
|
||||
"github.com/opentofu/opentofu/internal/tfdiags"
|
||||
"testing"
|
||||
|
||||
"github.com/opentofu/opentofu/internal/addrs"
|
||||
"github.com/opentofu/opentofu/internal/configs"
|
||||
@@ -330,14 +331,14 @@ func TestNodeAbstractResource_ReadResourceInstanceState(t *testing.T) {
|
||||
})
|
||||
for _, test := range tests {
|
||||
t.Run("ReadState "+test.Name, func(t *testing.T) {
|
||||
ctx := new(MockEvalContext)
|
||||
ctx.StateState = test.State.SyncWrapper()
|
||||
ctx.PathPath = addrs.RootModuleInstance
|
||||
ctx.ProviderSchemaSchema = test.Provider.GetProviderSchema()
|
||||
ctx.MoveResultsResults = test.MoveResults
|
||||
ctx.ProviderProvider = providers.Interface(test.Provider)
|
||||
evalCtx := new(MockEvalContext)
|
||||
evalCtx.StateState = test.State.SyncWrapper()
|
||||
evalCtx.PathPath = addrs.RootModuleInstance
|
||||
evalCtx.ProviderSchemaSchema = test.Provider.GetProviderSchema()
|
||||
evalCtx.MoveResultsResults = test.MoveResults
|
||||
evalCtx.ProviderProvider = providers.Interface(test.Provider)
|
||||
|
||||
got, readDiags := test.Node.readResourceInstanceState(ctx, test.Node.Addr)
|
||||
got, readDiags := test.Node.readResourceInstanceState(t.Context(), evalCtx, test.Node.Addr)
|
||||
if test.WantErrorStr != "" {
|
||||
if !readDiags.HasErrors() {
|
||||
t.Fatalf("[%s] Expected error, got none", test.Name)
|
||||
@@ -377,15 +378,15 @@ func TestNodeAbstractResource_ReadResourceInstanceState(t *testing.T) {
|
||||
})
|
||||
for _, test := range deposedTests {
|
||||
t.Run("ReadStateDeposed "+test.Name, func(t *testing.T) {
|
||||
ctx := new(MockEvalContext)
|
||||
ctx.StateState = test.State.SyncWrapper()
|
||||
ctx.PathPath = addrs.RootModuleInstance
|
||||
ctx.ProviderSchemaSchema = test.Provider.GetProviderSchema()
|
||||
ctx.MoveResultsResults = test.MoveResults
|
||||
ctx.ProviderProvider = providers.Interface(test.Provider)
|
||||
evalCtx := new(MockEvalContext)
|
||||
evalCtx.StateState = test.State.SyncWrapper()
|
||||
evalCtx.PathPath = addrs.RootModuleInstance
|
||||
evalCtx.ProviderSchemaSchema = test.Provider.GetProviderSchema()
|
||||
evalCtx.MoveResultsResults = test.MoveResults
|
||||
evalCtx.ProviderProvider = providers.Interface(test.Provider)
|
||||
|
||||
key := states.DeposedKey("00000001") // shim from legacy state assigns 0th deposed index this key
|
||||
got, readDiags := test.Node.readResourceInstanceStateDeposed(ctx, test.Node.Addr, key)
|
||||
got, readDiags := test.Node.readResourceInstanceStateDeposed(t.Context(), evalCtx, test.Node.Addr, key)
|
||||
if test.WantErrorStr != "" {
|
||||
if !readDiags.HasErrors() {
|
||||
t.Fatalf("[%s] Expected error, got none", test.Name)
|
||||
|
||||
@@ -181,8 +181,8 @@ func (n *NodeApplyableResourceInstance) Execute(ctx context.Context, evalCtx Eva
|
||||
return diags
|
||||
}
|
||||
|
||||
func (n *NodeApplyableResourceInstance) dataResourceExecute(_ context.Context, evalCtx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
_, providerSchema, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
func (n *NodeApplyableResourceInstance) dataResourceExecute(ctx context.Context, evalCtx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
_, providerSchema, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -204,7 +204,7 @@ func (n *NodeApplyableResourceInstance) dataResourceExecute(_ context.Context, e
|
||||
// In this particular call to applyDataSource we include our planned
|
||||
// change, which signals that we expect this read to complete fully
|
||||
// with no unknown values; it'll produce an error if not.
|
||||
state, repeatData, applyDiags := n.applyDataSource(evalCtx, change)
|
||||
state, repeatData, applyDiags := n.applyDataSource(ctx, evalCtx, change)
|
||||
diags = diags.Append(applyDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -216,13 +216,13 @@ func (n *NodeApplyableResourceInstance) dataResourceExecute(_ context.Context, e
|
||||
// actually reading the data (e.g. because it was already read during
|
||||
// the plan phase) and so we're only running through here to get the
|
||||
// extra details like precondition/postcondition checks.
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, state, workingState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, state, workingState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
diags = diags.Append(n.writeChange(evalCtx, nil, ""))
|
||||
diags = diags.Append(n.writeChange(ctx, evalCtx, nil, ""))
|
||||
|
||||
diags = diags.Append(updateStateHook(evalCtx))
|
||||
|
||||
@@ -250,7 +250,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx context.Conte
|
||||
var deposedKey states.DeposedKey
|
||||
|
||||
addr := n.ResourceInstanceAddr().Resource
|
||||
_, providerSchema, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
_, providerSchema, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -291,7 +291,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx context.Conte
|
||||
log.Printf("[TRACE] managedResourceExecute: prior object for %s now deposed with key %s", n.Addr, deposedKey)
|
||||
}
|
||||
|
||||
state, readDiags := n.readResourceInstanceState(evalCtx, n.ResourceInstanceAddr())
|
||||
state, readDiags := n.readResourceInstanceState(ctx, evalCtx, n.ResourceInstanceAddr())
|
||||
diags = diags.Append(readDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -306,7 +306,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx context.Conte
|
||||
|
||||
// Make a new diff, in case we've learned new values in the state
|
||||
// during apply which we can now incorporate.
|
||||
diffApply, _, repeatData, planDiags := n.plan(evalCtx, diff, state, false, n.forceReplace)
|
||||
diffApply, _, repeatData, planDiags := n.plan(ctx, evalCtx, diff, state, false, n.forceReplace)
|
||||
diags = diags.Append(planDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -337,12 +337,12 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx context.Conte
|
||||
return diags.Append(n.managedResourcePostconditions(evalCtx, repeatData))
|
||||
}
|
||||
|
||||
state, applyDiags := n.apply(evalCtx, state, diffApply, n.Config, repeatData, n.CreateBeforeDestroy())
|
||||
state, applyDiags := n.apply(ctx, evalCtx, state, diffApply, n.Config, repeatData, n.CreateBeforeDestroy())
|
||||
diags = diags.Append(applyDiags)
|
||||
|
||||
// We clear the change out here so that future nodes don't see a change
|
||||
// that is already complete.
|
||||
err = n.writeChange(evalCtx, nil, "")
|
||||
err = n.writeChange(ctx, evalCtx, nil, "")
|
||||
if err != nil {
|
||||
return diags.Append(err)
|
||||
}
|
||||
@@ -353,20 +353,20 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx context.Conte
|
||||
// dependencies are always updated to match the configuration during apply
|
||||
state.Dependencies = n.Dependencies
|
||||
}
|
||||
err = n.writeResourceInstanceState(evalCtx, state, workingState)
|
||||
err = n.writeResourceInstanceState(ctx, evalCtx, state, workingState)
|
||||
if err != nil {
|
||||
return diags.Append(err)
|
||||
}
|
||||
|
||||
// Run Provisioners
|
||||
createNew := (diffApply.Action == plans.Create || diffApply.Action.IsReplace())
|
||||
applyProvisionersDiags := n.evalApplyProvisioners(evalCtx, state, createNew, configs.ProvisionerWhenCreate)
|
||||
applyProvisionersDiags := n.evalApplyProvisioners(ctx, evalCtx, state, createNew, configs.ProvisionerWhenCreate)
|
||||
// the provisioner errors count as port of the apply error, so we can bundle the diags
|
||||
diags = diags.Append(applyProvisionersDiags)
|
||||
|
||||
state = maybeTainted(addr.Absolute(evalCtx.Path()), state, diffApply, diags.Err())
|
||||
|
||||
err = n.writeResourceInstanceState(evalCtx, state, workingState)
|
||||
err = n.writeResourceInstanceState(ctx, evalCtx, state, workingState)
|
||||
if err != nil {
|
||||
return diags.Append(err)
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx context.Context, eva
|
||||
)
|
||||
|
||||
// Read the state for the deposed resource instance
|
||||
state, err := n.readResourceInstanceStateDeposed(evalCtx, n.Addr, n.DeposedKey)
|
||||
state, err := n.readResourceInstanceStateDeposed(ctx, evalCtx, n.Addr, n.DeposedKey)
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -120,13 +120,13 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx context.Context, eva
|
||||
|
||||
// Note any upgrades that readResourceInstanceState might've done in the
|
||||
// prevRunState, so that it'll conform to current schema.
|
||||
diags = diags.Append(n.writeResourceInstanceStateDeposed(evalCtx, n.DeposedKey, state, prevRunState))
|
||||
diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, evalCtx, n.DeposedKey, state, prevRunState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
// Also the refreshState, because that should still reflect schema upgrades
|
||||
// even if not refreshing.
|
||||
diags = diags.Append(n.writeResourceInstanceStateDeposed(evalCtx, n.DeposedKey, state, refreshState))
|
||||
diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, evalCtx, n.DeposedKey, state, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -144,13 +144,13 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx context.Context, eva
|
||||
// resource during Delete correctly. If this is a simple refresh,
|
||||
// OpenTofu is expected to remove the missing resource from the state
|
||||
// entirely
|
||||
refreshedState, refreshDiags := n.refresh(evalCtx, n.DeposedKey, state)
|
||||
refreshedState, refreshDiags := n.refresh(ctx, evalCtx, n.DeposedKey, state)
|
||||
diags = diags.Append(refreshDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
diags = diags.Append(n.writeResourceInstanceStateDeposed(evalCtx, n.DeposedKey, refreshedState, refreshState))
|
||||
diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, evalCtx, n.DeposedKey, refreshedState, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -176,17 +176,17 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx context.Context, eva
|
||||
|
||||
if shouldForget {
|
||||
if shouldDestroy {
|
||||
change, planDiags = n.planDestroy(evalCtx, state, n.DeposedKey)
|
||||
change, planDiags = n.planDestroy(ctx, evalCtx, state, n.DeposedKey)
|
||||
} else {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagWarning,
|
||||
Summary: "Resource going to be removed from the state",
|
||||
Detail: fmt.Sprintf("After this plan gets applied, the resource %s will not be managed anymore by OpenTofu.\n\nIn case you want to manage the resource again, you will have to import it.", n.Addr),
|
||||
})
|
||||
change = n.planForget(evalCtx, state, n.DeposedKey)
|
||||
change = n.planForget(ctx, evalCtx, state, n.DeposedKey)
|
||||
}
|
||||
} else {
|
||||
change, planDiags = n.planDestroy(evalCtx, state, n.DeposedKey)
|
||||
change, planDiags = n.planDestroy(ctx, evalCtx, state, n.DeposedKey)
|
||||
}
|
||||
|
||||
diags = diags.Append(planDiags)
|
||||
@@ -202,16 +202,16 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx context.Context, eva
|
||||
// now just need to get the deposed object destroyed, because there
|
||||
// should be a new object already serving as its replacement.
|
||||
|
||||
diags = diags.Append(n.writeChange(evalCtx, change, n.DeposedKey))
|
||||
diags = diags.Append(n.writeChange(ctx, evalCtx, change, n.DeposedKey))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
diags = diags.Append(n.writeResourceInstanceStateDeposed(evalCtx, n.DeposedKey, nil, workingState))
|
||||
diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, evalCtx, n.DeposedKey, nil, workingState))
|
||||
} else {
|
||||
// The working state should at least be updated with the result
|
||||
// of upgrading and refreshing from above.
|
||||
diags = diags.Append(n.writeResourceInstanceStateDeposed(evalCtx, n.DeposedKey, state, workingState))
|
||||
diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, evalCtx, n.DeposedKey, state, workingState))
|
||||
}
|
||||
|
||||
return diags
|
||||
@@ -284,7 +284,7 @@ func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v b
|
||||
}
|
||||
|
||||
// GraphNodeExecutable impl.
|
||||
func (n *NodeDestroyDeposedResourceInstanceObject) Execute(_ context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
||||
func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
||||
var change *plans.ResourceInstanceChange
|
||||
|
||||
diags = n.resolveProvider(evalCtx, false, n.DeposedKey)
|
||||
@@ -293,7 +293,7 @@ func (n *NodeDestroyDeposedResourceInstanceObject) Execute(_ context.Context, ev
|
||||
}
|
||||
|
||||
// Read the state for the deposed resource instance
|
||||
state, err := n.readResourceInstanceStateDeposed(evalCtx, n.Addr, n.DeposedKey)
|
||||
state, err := n.readResourceInstanceStateDeposed(ctx, evalCtx, n.Addr, n.DeposedKey)
|
||||
if err != nil {
|
||||
return diags.Append(err)
|
||||
}
|
||||
@@ -303,7 +303,7 @@ func (n *NodeDestroyDeposedResourceInstanceObject) Execute(_ context.Context, ev
|
||||
return diags
|
||||
}
|
||||
|
||||
change, destroyPlanDiags := n.planDestroy(evalCtx, state, n.DeposedKey)
|
||||
change, destroyPlanDiags := n.planDestroy(ctx, evalCtx, state, n.DeposedKey)
|
||||
diags = diags.Append(destroyPlanDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -316,14 +316,14 @@ func (n *NodeDestroyDeposedResourceInstanceObject) Execute(_ context.Context, ev
|
||||
}
|
||||
|
||||
// we pass a nil configuration to apply because we are destroying
|
||||
state, applyDiags := n.apply(evalCtx, state, change, nil, instances.RepetitionData{}, false)
|
||||
state, applyDiags := n.apply(ctx, evalCtx, state, change, nil, instances.RepetitionData{}, false)
|
||||
diags = diags.Append(applyDiags)
|
||||
// don't return immediately on errors, we need to handle the state
|
||||
|
||||
// Always write the resource back to the state deposed. If it
|
||||
// was successfully destroyed it will be pruned. If it was not, it will
|
||||
// be caught on the next run.
|
||||
writeDiags := n.writeResourceInstanceState(evalCtx, state)
|
||||
writeDiags := n.writeResourceInstanceState(ctx, evalCtx, state)
|
||||
diags.Append(writeDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -356,7 +356,7 @@ func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) {
|
||||
n.PreallocatedDeposedKey = key
|
||||
}
|
||||
|
||||
func (n *NodeDestroyDeposedResourceInstanceObject) writeResourceInstanceState(evalCtx EvalContext, obj *states.ResourceInstanceObject) error {
|
||||
func (n *NodeDestroyDeposedResourceInstanceObject) writeResourceInstanceState(ctx context.Context, evalCtx EvalContext, obj *states.ResourceInstanceObject) error {
|
||||
absAddr := n.Addr
|
||||
key := n.DeposedKey
|
||||
state := evalCtx.State()
|
||||
@@ -373,7 +373,7 @@ func (n *NodeDestroyDeposedResourceInstanceObject) writeResourceInstanceState(ev
|
||||
return nil
|
||||
}
|
||||
|
||||
_, providerSchema, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
_, providerSchema, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -438,14 +438,14 @@ func (n *NodeForgetDeposedResourceInstanceObject) References() []*addrs.Referenc
|
||||
}
|
||||
|
||||
// GraphNodeExecutable impl.
|
||||
func (n *NodeForgetDeposedResourceInstanceObject) Execute(_ context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
||||
func (n *NodeForgetDeposedResourceInstanceObject) Execute(ctx context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
||||
diags = n.resolveProvider(evalCtx, false, n.DeposedKey)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
// Read the state for the deposed resource instance
|
||||
state, err := n.readResourceInstanceStateDeposed(evalCtx, n.Addr, n.DeposedKey)
|
||||
state, err := n.readResourceInstanceStateDeposed(ctx, evalCtx, n.Addr, n.DeposedKey)
|
||||
if err != nil {
|
||||
return diags.Append(err)
|
||||
}
|
||||
|
||||
@@ -255,9 +255,9 @@ func TestNodeDestroyDeposedResourceInstanceObject_Execute(t *testing.T) {
|
||||
|
||||
func TestNodeDestroyDeposedResourceInstanceObject_WriteResourceInstanceState(t *testing.T) {
|
||||
state := states.NewState()
|
||||
ctx := new(MockEvalContext)
|
||||
ctx.StateState = state.SyncWrapper()
|
||||
ctx.PathPath = addrs.RootModuleInstance
|
||||
evalCtx := new(MockEvalContext)
|
||||
evalCtx.StateState = state.SyncWrapper()
|
||||
evalCtx.PathPath = addrs.RootModuleInstance
|
||||
mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"id": {
|
||||
@@ -266,8 +266,8 @@ func TestNodeDestroyDeposedResourceInstanceObject_WriteResourceInstanceState(t *
|
||||
},
|
||||
},
|
||||
})
|
||||
ctx.ProviderProvider = mockProvider
|
||||
ctx.ProviderSchemaSchema = mockProvider.GetProviderSchema()
|
||||
evalCtx.ProviderProvider = mockProvider
|
||||
evalCtx.ProviderSchemaSchema = mockProvider.GetProviderSchema()
|
||||
|
||||
obj := &states.ResourceInstanceObject{
|
||||
Value: cty.ObjectVal(map[string]cty.Value{
|
||||
@@ -284,7 +284,7 @@ func TestNodeDestroyDeposedResourceInstanceObject_WriteResourceInstanceState(t *
|
||||
},
|
||||
DeposedKey: states.NewDeposedKey(),
|
||||
}
|
||||
err := node.writeResourceInstanceState(ctx, obj)
|
||||
err := node.writeResourceInstanceState(t.Context(), evalCtx, obj)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err.Error())
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ func (n *NodeDestroyResourceInstance) Execute(ctx context.Context, evalCtx EvalC
|
||||
return diags
|
||||
}
|
||||
|
||||
func (n *NodeDestroyResourceInstance) managedResourceExecute(_ context.Context, evalCtx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
func (n *NodeDestroyResourceInstance) managedResourceExecute(ctx context.Context, evalCtx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
addr := n.ResourceInstanceAddr()
|
||||
|
||||
// Get our state
|
||||
@@ -190,7 +190,7 @@ func (n *NodeDestroyResourceInstance) managedResourceExecute(_ context.Context,
|
||||
var changeApply *plans.ResourceInstanceChange
|
||||
var state *states.ResourceInstanceObject
|
||||
|
||||
_, providerSchema, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
_, providerSchema, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -209,7 +209,7 @@ func (n *NodeDestroyResourceInstance) managedResourceExecute(_ context.Context,
|
||||
return diags
|
||||
}
|
||||
|
||||
state, readDiags := n.readResourceInstanceState(evalCtx, addr)
|
||||
state, readDiags := n.readResourceInstanceState(ctx, evalCtx, addr)
|
||||
diags = diags.Append(readDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -227,7 +227,7 @@ func (n *NodeDestroyResourceInstance) managedResourceExecute(_ context.Context,
|
||||
|
||||
// Run destroy provisioners if not tainted
|
||||
if state.Status != states.ObjectTainted {
|
||||
applyProvisionersDiags := n.evalApplyProvisioners(evalCtx, state, false, configs.ProvisionerWhenDestroy)
|
||||
applyProvisionersDiags := n.evalApplyProvisioners(ctx, evalCtx, state, false, configs.ProvisionerWhenDestroy)
|
||||
diags = diags.Append(applyProvisionersDiags)
|
||||
// keep the diags separate from the main set until we handle the cleanup
|
||||
|
||||
@@ -242,12 +242,12 @@ func (n *NodeDestroyResourceInstance) managedResourceExecute(_ context.Context,
|
||||
// Managed resources need to be destroyed, while data sources
|
||||
// are only removed from state.
|
||||
// we pass a nil configuration to apply because we are destroying
|
||||
s, d := n.apply(evalCtx, state, changeApply, nil, instances.RepetitionData{}, false)
|
||||
s, d := n.apply(ctx, evalCtx, state, changeApply, nil, instances.RepetitionData{}, false)
|
||||
state, diags = s, diags.Append(d)
|
||||
// we don't return immediately here on error, so that the state can be
|
||||
// finalized
|
||||
|
||||
err = n.writeResourceInstanceState(evalCtx, state, workingState)
|
||||
err = n.writeResourceInstanceState(ctx, evalCtx, state, workingState)
|
||||
if err != nil {
|
||||
return diags.Append(err)
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func (n *NodeForgetResourceInstance) Name() string {
|
||||
}
|
||||
|
||||
// GraphNodeExecutable
|
||||
func (n *NodeForgetResourceInstance) Execute(_ context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
||||
func (n *NodeForgetResourceInstance) Execute(ctx context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
||||
addr := n.ResourceInstanceAddr()
|
||||
|
||||
// Get our state
|
||||
@@ -61,7 +61,7 @@ func (n *NodeForgetResourceInstance) Execute(_ context.Context, evalCtx EvalCont
|
||||
|
||||
var state *states.ResourceInstanceObject
|
||||
|
||||
state, readDiags := n.readResourceInstanceState(evalCtx, addr)
|
||||
state, readDiags := n.readResourceInstanceState(ctx, evalCtx, addr)
|
||||
diags = diags.Append(readDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
|
||||
@@ -76,7 +76,7 @@ func (n *graphNodeImportState) ModulePath() addrs.Module {
|
||||
}
|
||||
|
||||
// GraphNodeExecutable impl.
|
||||
func (n *graphNodeImportState) Execute(_ context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
||||
func (n *graphNodeImportState) Execute(ctx context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
||||
// Reset our states
|
||||
n.states = nil
|
||||
|
||||
@@ -100,7 +100,7 @@ func (n *graphNodeImportState) Execute(_ context.Context, evalCtx EvalContext, o
|
||||
n.ResolvedProviderKey = asAbsNode.ResolvedProviderKey
|
||||
log.Printf("[TRACE] graphNodeImportState: importing using %s", n.ResolvedProvider.ProviderConfig.InstanceString(n.ResolvedProviderKey))
|
||||
|
||||
provider, _, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
provider, _, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -244,7 +244,7 @@ func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance {
|
||||
}
|
||||
|
||||
// GraphNodeExecutable impl.
|
||||
func (n *graphNodeImportStateSub) Execute(_ context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
||||
func (n *graphNodeImportStateSub) Execute(ctx context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
||||
// If the Ephemeral type isn't set, then it is an error
|
||||
if n.State.TypeName == "" {
|
||||
diags = diags.Append(fmt.Errorf("import of %s didn't set type", n.TargetAddr.String()))
|
||||
@@ -261,7 +261,7 @@ func (n *graphNodeImportStateSub) Execute(_ context.Context, evalCtx EvalContext
|
||||
},
|
||||
ResolvedProviderKey: n.ResolvedProviderKey,
|
||||
}
|
||||
state, refreshDiags := riNode.refresh(evalCtx, states.NotDeposed, state)
|
||||
state, refreshDiags := riNode.refresh(ctx, evalCtx, states.NotDeposed, state)
|
||||
diags = diags.Append(refreshDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -297,6 +297,6 @@ func (n *graphNodeImportStateSub) Execute(_ context.Context, evalCtx EvalContext
|
||||
state.Value = state.Value.MarkWithPaths(combined)
|
||||
}
|
||||
|
||||
diags = diags.Append(riNode.writeResourceInstanceState(evalCtx, state, workingState))
|
||||
diags = diags.Append(riNode.writeResourceInstanceState(ctx, evalCtx, state, workingState))
|
||||
return diags
|
||||
}
|
||||
|
||||
@@ -178,7 +178,7 @@ func (n *nodeExpandPlannableResource) DynamicExpand(evalCtx EvalContext) (*Graph
|
||||
instAddrs := addrs.MakeSet[addrs.Checkable]()
|
||||
for _, module := range moduleInstances {
|
||||
resAddr := n.Addr.Resource.Absolute(module)
|
||||
err := n.expandResourceInstances(evalCtx, resAddr, &g, instAddrs)
|
||||
err := n.expandResourceInstances(context.TODO(), evalCtx, resAddr, &g, instAddrs)
|
||||
diags = diags.Append(err)
|
||||
}
|
||||
if diags.HasErrors() {
|
||||
@@ -211,7 +211,7 @@ func (n *nodeExpandPlannableResource) DynamicExpand(evalCtx EvalContext) (*Graph
|
||||
// within, the caller must register the final superset instAddrs with the
|
||||
// checks subsystem so that it knows the fully expanded set of checkable
|
||||
// object instances for this resource instance.
|
||||
func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalContext, resAddr addrs.AbsResource, g *Graph, instAddrs addrs.Set[addrs.Checkable]) error {
|
||||
func (n *nodeExpandPlannableResource) expandResourceInstances(ctx context.Context, globalCtx EvalContext, resAddr addrs.AbsResource, g *Graph, instAddrs addrs.Set[addrs.Checkable]) error {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
// The rest of our work here needs to know which module instance it's
|
||||
@@ -306,7 +306,7 @@ func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalCont
|
||||
// construct a subgraph just for this individual modules's instances and
|
||||
// then we'll steal all of its nodes and edges to incorporate into our
|
||||
// main graph which contains all of the resource instances together.
|
||||
instG, err := n.resourceInstanceSubgraph(moduleCtx, resAddr, instanceAddrs)
|
||||
instG, err := n.resourceInstanceSubgraph(ctx, moduleCtx, resAddr, instanceAddrs)
|
||||
if err != nil {
|
||||
diags = diags.Append(err)
|
||||
return diags.ErrWithWarnings()
|
||||
@@ -316,7 +316,7 @@ func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalCont
|
||||
return diags.ErrWithWarnings()
|
||||
}
|
||||
|
||||
func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(evalCtx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance) (*Graph, error) {
|
||||
func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx context.Context, evalCtx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance) (*Graph, error) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
var commandLineImportTargets []CommandLineImportTarget
|
||||
|
||||
@@ -86,7 +86,7 @@ func (n *NodePlanDestroyableResourceInstance) Execute(ctx context.Context, evalC
|
||||
return diags
|
||||
}
|
||||
|
||||
func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(_ context.Context, evalCtx EvalContext, _ walkOperation) (diags tfdiags.Diagnostics) {
|
||||
func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(ctx context.Context, evalCtx EvalContext, _ walkOperation) (diags tfdiags.Diagnostics) {
|
||||
addr := n.ResourceInstanceAddr()
|
||||
|
||||
// Declare a bunch of variables that are used for state during
|
||||
@@ -95,7 +95,7 @@ func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(_ context.C
|
||||
var change *plans.ResourceInstanceChange
|
||||
var state *states.ResourceInstanceObject
|
||||
|
||||
state, err := n.readResourceInstanceState(evalCtx, addr)
|
||||
state, err := n.readResourceInstanceState(ctx, evalCtx, addr)
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -111,23 +111,23 @@ func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(_ context.C
|
||||
// conditionals must agree (be exactly opposite) in order to get the
|
||||
// correct behavior in both cases.
|
||||
if n.skipRefresh {
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, state, prevRunState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, state, prevRunState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, state, refreshState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, state, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
change, destroyPlanDiags := n.planDestroy(evalCtx, state, "")
|
||||
change, destroyPlanDiags := n.planDestroy(ctx, evalCtx, state, "")
|
||||
diags = diags.Append(destroyPlanDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
diags = diags.Append(n.writeChange(evalCtx, change, ""))
|
||||
diags = diags.Append(n.writeChange(ctx, evalCtx, change, ""))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -136,7 +136,7 @@ func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(_ context.C
|
||||
return diags
|
||||
}
|
||||
|
||||
func (n *NodePlanDestroyableResourceInstance) dataResourceExecute(_ context.Context, evalCtx EvalContext, _ walkOperation) (diags tfdiags.Diagnostics) {
|
||||
func (n *NodePlanDestroyableResourceInstance) dataResourceExecute(ctx context.Context, evalCtx EvalContext, _ walkOperation) (diags tfdiags.Diagnostics) {
|
||||
|
||||
// We may not be able to read a prior data source from the state if the
|
||||
// schema was upgraded and we are destroying before ever refreshing that
|
||||
@@ -152,5 +152,5 @@ func (n *NodePlanDestroyableResourceInstance) dataResourceExecute(_ context.Cont
|
||||
},
|
||||
ProviderAddr: n.ResolvedProvider.ProviderConfig,
|
||||
}
|
||||
return diags.Append(n.writeChange(evalCtx, change, ""))
|
||||
return diags.Append(n.writeChange(ctx, evalCtx, change, ""))
|
||||
}
|
||||
|
||||
@@ -126,13 +126,13 @@ func (n *NodePlannableResourceInstance) Execute(ctx context.Context, evalCtx Eva
|
||||
return diags
|
||||
}
|
||||
|
||||
func (n *NodePlannableResourceInstance) dataResourceExecute(_ context.Context, evalCtx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
func (n *NodePlannableResourceInstance) dataResourceExecute(ctx context.Context, evalCtx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
config := n.Config
|
||||
addr := n.ResourceInstanceAddr()
|
||||
|
||||
var change *plans.ResourceInstanceChange
|
||||
|
||||
_, providerSchema, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
_, providerSchema, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -148,7 +148,7 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(_ context.Context, e
|
||||
checkRuleSeverity = tfdiags.Warning
|
||||
}
|
||||
|
||||
change, state, repeatData, planDiags := n.planDataSource(evalCtx, checkRuleSeverity, n.skipPlanChanges)
|
||||
change, state, repeatData, planDiags := n.planDataSource(ctx, evalCtx, checkRuleSeverity, n.skipPlanChanges)
|
||||
diags = diags.Append(planDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -156,16 +156,16 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(_ context.Context, e
|
||||
|
||||
// write the data source into both the refresh state and the
|
||||
// working state
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, state, refreshState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, state, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, state, workingState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, state, workingState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
diags = diags.Append(n.writeChange(evalCtx, change, ""))
|
||||
diags = diags.Append(n.writeChange(ctx, evalCtx, change, ""))
|
||||
|
||||
// Post-conditions might block further progress. We intentionally do this
|
||||
// _after_ writing the state/diff because we want to check against
|
||||
@@ -182,7 +182,7 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(_ context.Context, e
|
||||
return diags
|
||||
}
|
||||
|
||||
func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context, evalCtx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
func (n *NodePlannableResourceInstance) managedResourceExecute(ctx context.Context, evalCtx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
config := n.Config
|
||||
addr := n.ResourceInstanceAddr()
|
||||
|
||||
@@ -193,7 +193,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
checkRuleSeverity = tfdiags.Warning
|
||||
}
|
||||
|
||||
provider, providerSchema, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
provider, providerSchema, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -229,10 +229,10 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
// If the resource is to be imported, we now ask the provider for an Import
|
||||
// and a Refresh, and save the resulting state to instanceRefreshState.
|
||||
if importing {
|
||||
instanceRefreshState, diags = n.importState(evalCtx, addr, n.importTarget.ID, provider, providerSchema)
|
||||
instanceRefreshState, diags = n.importState(ctx, evalCtx, addr, n.importTarget.ID, provider, providerSchema)
|
||||
} else {
|
||||
var readDiags tfdiags.Diagnostics
|
||||
instanceRefreshState, readDiags = n.readResourceInstanceState(evalCtx, addr)
|
||||
instanceRefreshState, readDiags = n.readResourceInstanceState(ctx, evalCtx, addr)
|
||||
diags = diags.Append(readDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -244,13 +244,13 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
// result of any schema upgrading that readResourceInstanceState just did,
|
||||
// but not include any out-of-band changes we might detect in in the
|
||||
// refresh step below.
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, instanceRefreshState, prevRunState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, instanceRefreshState, prevRunState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
// Also the refreshState, because that should still reflect schema upgrades
|
||||
// even if it doesn't reflect upstream changes.
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, instanceRefreshState, refreshState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, instanceRefreshState, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -271,7 +271,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy
|
||||
|
||||
if prevCreateBeforeDestroy != instanceRefreshState.CreateBeforeDestroy && n.skipRefresh {
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, instanceRefreshState, refreshState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, instanceRefreshState, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -282,7 +282,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
// Refresh, maybe
|
||||
// The import process handles its own refresh
|
||||
if !n.skipRefresh && !importing {
|
||||
s, refreshDiags := n.refresh(evalCtx, states.NotDeposed, instanceRefreshState)
|
||||
s, refreshDiags := n.refresh(ctx, evalCtx, states.NotDeposed, instanceRefreshState)
|
||||
diags = diags.Append(refreshDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -299,7 +299,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
|
||||
}
|
||||
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, instanceRefreshState, refreshState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, instanceRefreshState, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -325,7 +325,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
}
|
||||
|
||||
change, instancePlanState, repeatData, planDiags := n.plan(
|
||||
evalCtx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
|
||||
ctx, evalCtx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
|
||||
)
|
||||
diags = diags.Append(planDiags)
|
||||
if diags.HasErrors() {
|
||||
@@ -346,7 +346,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
GeneratedConfig: n.generatedConfigHCL,
|
||||
},
|
||||
}
|
||||
diags = diags.Append(n.writeChange(evalCtx, change, ""))
|
||||
diags = diags.Append(n.writeChange(ctx, evalCtx, change, ""))
|
||||
}
|
||||
|
||||
return diags
|
||||
@@ -377,7 +377,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
// Future work should adjust these APIs such that it is impossible to
|
||||
// update these two data structures incorrectly through any objects
|
||||
// reachable via the tofu.EvalContext API.
|
||||
diags = diags.Append(n.writeChange(evalCtx, change, ""))
|
||||
diags = diags.Append(n.writeChange(ctx, evalCtx, change, ""))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -386,7 +386,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
return diags
|
||||
}
|
||||
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, instancePlanState, workingState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, instancePlanState, workingState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -399,7 +399,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
// the refresh state will be the final state for this resource, so
|
||||
// finalize the dependencies here if they need to be updated.
|
||||
instanceRefreshState.Dependencies = n.Dependencies
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, instanceRefreshState, refreshState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, instanceRefreshState, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -442,7 +442,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(_ context.Context
|
||||
// any output values referring to this will not react to the drift.
|
||||
// (Even if we didn't actually refresh above, this will still save
|
||||
// the result of any schema upgrading we did in readResourceInstanceState.)
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, instanceRefreshState, workingState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, instanceRefreshState, workingState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -497,7 +497,7 @@ func (n *NodePlannableResourceInstance) replaceTriggered(evalCtx EvalContext, re
|
||||
return diags
|
||||
}
|
||||
|
||||
func (n *NodePlannableResourceInstance) importState(evalCtx EvalContext, addr addrs.AbsResourceInstance, importId string, provider providers.Interface, providerSchema providers.ProviderSchema) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
func (n *NodePlannableResourceInstance) importState(ctx context.Context, evalCtx EvalContext, addr addrs.AbsResourceInstance, importId string, provider providers.Interface, providerSchema providers.ProviderSchema) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
absAddr := addr.Resource.Absolute(evalCtx.Path())
|
||||
|
||||
@@ -577,7 +577,7 @@ func (n *NodePlannableResourceInstance) importState(evalCtx EvalContext, addr ad
|
||||
},
|
||||
ResolvedProviderKey: n.ResolvedProviderKey,
|
||||
}
|
||||
instanceRefreshState, refreshDiags := riNode.refresh(evalCtx, states.NotDeposed, importedState)
|
||||
instanceRefreshState, refreshDiags := riNode.refresh(ctx, evalCtx, states.NotDeposed, importedState)
|
||||
diags = diags.Append(refreshDiags)
|
||||
if diags.HasErrors() {
|
||||
return instanceRefreshState, diags
|
||||
@@ -680,7 +680,7 @@ func (n *NodePlannableResourceInstance) importState(evalCtx EvalContext, addr ad
|
||||
}
|
||||
}
|
||||
|
||||
diags = diags.Append(riNode.writeResourceInstanceState(evalCtx, instanceRefreshState, refreshState))
|
||||
diags = diags.Append(riNode.writeResourceInstanceState(ctx, evalCtx, instanceRefreshState, refreshState))
|
||||
return instanceRefreshState, diags
|
||||
}
|
||||
|
||||
|
||||
@@ -120,10 +120,10 @@ func (n *NodePlannableResourceInstanceOrphan) dataResourceExecute(_ context.Cont
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(_ context.Context, evalCtx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx context.Context, evalCtx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
addr := n.ResourceInstanceAddr()
|
||||
|
||||
oldState, readDiags := n.readResourceInstanceState(evalCtx, addr)
|
||||
oldState, readDiags := n.readResourceInstanceState(ctx, evalCtx, addr)
|
||||
diags = diags.Append(readDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@@ -131,13 +131,13 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(_ context.C
|
||||
|
||||
// Note any upgrades that readResourceInstanceState might've done in the
|
||||
// prevRunState, so that it'll conform to current schema.
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, oldState, prevRunState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, oldState, prevRunState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
// Also the refreshState, because that should still reflect schema upgrades
|
||||
// even if not refreshing.
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, oldState, refreshState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, oldState, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -149,13 +149,13 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(_ context.C
|
||||
// plan before apply, and may not handle a missing resource during
|
||||
// Delete correctly. If this is a simple refresh, OpenTofu is
|
||||
// expected to remove the missing resource from the state entirely
|
||||
refreshedState, refreshDiags := n.refresh(evalCtx, states.NotDeposed, oldState)
|
||||
refreshedState, refreshDiags := n.refresh(ctx, evalCtx, states.NotDeposed, oldState)
|
||||
diags = diags.Append(refreshDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
diags = diags.Append(n.writeResourceInstanceState(evalCtx, refreshedState, refreshState))
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, refreshedState, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -170,7 +170,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(_ context.C
|
||||
// to plan because there is no longer any state and it doesn't exist in the
|
||||
// config.
|
||||
if n.skipPlanChanges || oldState == nil || oldState.Value.IsNull() {
|
||||
return diags.Append(n.writeResourceInstanceState(evalCtx, oldState, workingState))
|
||||
return diags.Append(n.writeResourceInstanceState(ctx, evalCtx, oldState, workingState))
|
||||
}
|
||||
|
||||
var change *plans.ResourceInstanceChange
|
||||
@@ -188,17 +188,17 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(_ context.C
|
||||
|
||||
if shouldForget {
|
||||
if shouldDestroy {
|
||||
change, planDiags = n.planDestroy(evalCtx, oldState, "")
|
||||
change, planDiags = n.planDestroy(ctx, evalCtx, oldState, "")
|
||||
} else {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagWarning,
|
||||
Summary: "Resource going to be removed from the state",
|
||||
Detail: fmt.Sprintf("After this plan gets applied, the resource %s will not be managed anymore by OpenTofu.\n\nIn case you want to manage the resource again, you will have to import it.", n.Addr),
|
||||
})
|
||||
change = n.planForget(evalCtx, oldState, "")
|
||||
change = n.planForget(ctx, evalCtx, oldState, "")
|
||||
}
|
||||
} else {
|
||||
change, planDiags = n.planDestroy(evalCtx, oldState, "")
|
||||
change, planDiags = n.planDestroy(ctx, evalCtx, oldState, "")
|
||||
}
|
||||
|
||||
diags = diags.Append(planDiags)
|
||||
@@ -211,7 +211,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(_ context.C
|
||||
// sometimes not have a reason.)
|
||||
change.ActionReason = n.deleteActionReason(evalCtx)
|
||||
|
||||
diags = diags.Append(n.writeChange(evalCtx, change, ""))
|
||||
diags = diags.Append(n.writeChange(ctx, evalCtx, change, ""))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -221,7 +221,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(_ context.C
|
||||
return diags
|
||||
}
|
||||
|
||||
return diags.Append(n.writeResourceInstanceState(evalCtx, nil, workingState))
|
||||
return diags.Append(n.writeResourceInstanceState(ctx, evalCtx, nil, workingState))
|
||||
}
|
||||
|
||||
func (n *NodePlannableResourceInstanceOrphan) deleteActionReason(evalCtx EvalContext) plans.ResourceInstanceChangeActionReason {
|
||||
|
||||
@@ -63,7 +63,7 @@ func (n *NodeValidatableResource) Execute(ctx context.Context, evalCtx EvalConte
|
||||
return diags
|
||||
}
|
||||
|
||||
diags = diags.Append(n.validateResource(evalCtx))
|
||||
diags = diags.Append(n.validateResource(ctx, evalCtx))
|
||||
|
||||
diags = diags.Append(n.validateCheckRules(evalCtx, n.Config))
|
||||
|
||||
@@ -86,7 +86,7 @@ func (n *NodeValidatableResource) Execute(ctx context.Context, evalCtx EvalConte
|
||||
}
|
||||
|
||||
// Validate Provisioner Config
|
||||
diags = diags.Append(n.validateProvisioner(evalCtx, &provisioner))
|
||||
diags = diags.Append(n.validateProvisioner(ctx, evalCtx, &provisioner))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
@@ -98,7 +98,7 @@ func (n *NodeValidatableResource) Execute(ctx context.Context, evalCtx EvalConte
|
||||
// validateProvisioner validates the configuration of a provisioner belonging to
|
||||
// a resource. The provisioner config is expected to contain the merged
|
||||
// connection configurations.
|
||||
func (n *NodeValidatableResource) validateProvisioner(evalCtx EvalContext, p *configs.Provisioner) tfdiags.Diagnostics {
|
||||
func (n *NodeValidatableResource) validateProvisioner(_ context.Context, evalCtx EvalContext, p *configs.Provisioner) tfdiags.Diagnostics {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
provisioner, err := evalCtx.Provisioner(p.Type)
|
||||
@@ -291,10 +291,10 @@ var connectionBlockSupersetSchema = &configschema.Block{
|
||||
},
|
||||
}
|
||||
|
||||
func (n *NodeValidatableResource) validateResource(evalCtx EvalContext) tfdiags.Diagnostics {
|
||||
func (n *NodeValidatableResource) validateResource(ctx context.Context, evalCtx EvalContext) tfdiags.Diagnostics {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
provider, providerSchema, err := getProvider(evalCtx, n.ResolvedProvider.ProviderConfig, addrs.NoKey) // Provider Instance Keys are ignored during validate
|
||||
provider, providerSchema, err := getProvider(ctx, evalCtx, n.ResolvedProvider.ProviderConfig, addrs.NoKey) // Provider Instance Keys are ignored during validate
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestNodeValidatableResource_ValidateProvisioner_valid(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
diags := node.validateProvisioner(ctx, pc)
|
||||
diags := node.validateProvisioner(t.Context(), ctx, pc)
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("node.Eval failed: %s", diags.Err())
|
||||
}
|
||||
@@ -101,7 +101,7 @@ func TestNodeValidatableResource_ValidateProvisioner__warning(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
diags := node.validateProvisioner(ctx, pc)
|
||||
diags := node.validateProvisioner(t.Context(), ctx, pc)
|
||||
if len(diags) != 1 {
|
||||
t.Fatalf("wrong number of diagnostics in %s; want one warning", diags.ErrWithWarnings())
|
||||
}
|
||||
@@ -146,7 +146,7 @@ func TestNodeValidatableResource_ValidateProvisioner__connectionInvalid(t *testi
|
||||
},
|
||||
}
|
||||
|
||||
diags := node.validateProvisioner(ctx, pc)
|
||||
diags := node.validateProvisioner(t.Context(), ctx, pc)
|
||||
if !diags.HasErrors() {
|
||||
t.Fatalf("node.Eval succeeded; want error")
|
||||
}
|
||||
@@ -198,7 +198,7 @@ func TestNodeValidatableResource_ValidateResource_managedResource(t *testing.T)
|
||||
ctx.ProviderSchemaSchema = mp.GetProviderSchema()
|
||||
ctx.ProviderProvider = p
|
||||
|
||||
err := node.validateResource(ctx)
|
||||
err := node.validateResource(t.Context(), ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@@ -261,7 +261,7 @@ func TestNodeValidatableResource_ValidateResource_managedResourceCount(t *testin
|
||||
},
|
||||
}
|
||||
|
||||
diags := node.validateResource(ctx)
|
||||
diags := node.validateResource(t.Context(), ctx)
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("err: %s", diags.Err())
|
||||
}
|
||||
@@ -312,7 +312,7 @@ func TestNodeValidatableResource_ValidateResource_dataSource(t *testing.T) {
|
||||
ctx.ProviderSchemaSchema = mp.GetProviderSchema()
|
||||
ctx.ProviderProvider = p
|
||||
|
||||
diags := node.validateResource(ctx)
|
||||
diags := node.validateResource(t.Context(), ctx)
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("err: %s", diags.Err())
|
||||
}
|
||||
@@ -348,7 +348,7 @@ func TestNodeValidatableResource_ValidateResource_valid(t *testing.T) {
|
||||
ctx.ProviderSchemaSchema = mp.GetProviderSchema()
|
||||
ctx.ProviderProvider = p
|
||||
|
||||
diags := node.validateResource(ctx)
|
||||
diags := node.validateResource(t.Context(), ctx)
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("err: %s", diags.Err())
|
||||
}
|
||||
@@ -385,7 +385,7 @@ func TestNodeValidatableResource_ValidateResource_warningsAndErrorsPassedThrough
|
||||
ctx.ProviderSchemaSchema = mp.GetProviderSchema()
|
||||
ctx.ProviderProvider = p
|
||||
|
||||
diags := node.validateResource(ctx)
|
||||
diags := node.validateResource(t.Context(), ctx)
|
||||
if !diags.HasErrors() {
|
||||
t.Fatal("unexpected success; want error")
|
||||
}
|
||||
@@ -448,7 +448,7 @@ func TestNodeValidatableResource_ValidateResource_invalidDependsOn(t *testing.T)
|
||||
ctx.ProviderSchemaSchema = mp.GetProviderSchema()
|
||||
ctx.ProviderProvider = p
|
||||
|
||||
diags := node.validateResource(ctx)
|
||||
diags := node.validateResource(t.Context(), ctx)
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("error for supposedly-valid config: %s", diags.ErrWithWarnings())
|
||||
}
|
||||
@@ -469,7 +469,7 @@ func TestNodeValidatableResource_ValidateResource_invalidDependsOn(t *testing.T)
|
||||
},
|
||||
})
|
||||
|
||||
diags = node.validateResource(ctx)
|
||||
diags = node.validateResource(t.Context(), ctx)
|
||||
if !diags.HasErrors() {
|
||||
t.Fatal("no error for invalid depends_on")
|
||||
}
|
||||
@@ -485,7 +485,7 @@ func TestNodeValidatableResource_ValidateResource_invalidDependsOn(t *testing.T)
|
||||
},
|
||||
})
|
||||
|
||||
diags = node.validateResource(ctx)
|
||||
diags = node.validateResource(t.Context(), ctx)
|
||||
if !diags.HasErrors() {
|
||||
t.Fatal("no error for invalid depends_on")
|
||||
}
|
||||
@@ -532,7 +532,7 @@ func TestNodeValidatableResource_ValidateResource_invalidIgnoreChangesNonexisten
|
||||
ctx.ProviderSchemaSchema = mp.GetProviderSchema()
|
||||
ctx.ProviderProvider = p
|
||||
|
||||
diags := node.validateResource(ctx)
|
||||
diags := node.validateResource(t.Context(), ctx)
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("error for supposedly-valid config: %s", diags.ErrWithWarnings())
|
||||
}
|
||||
@@ -545,7 +545,7 @@ func TestNodeValidatableResource_ValidateResource_invalidIgnoreChangesNonexisten
|
||||
},
|
||||
})
|
||||
|
||||
diags = node.validateResource(ctx)
|
||||
diags = node.validateResource(t.Context(), ctx)
|
||||
if !diags.HasErrors() {
|
||||
t.Fatal("no error for invalid ignore_changes")
|
||||
}
|
||||
@@ -615,7 +615,7 @@ func TestNodeValidatableResource_ValidateResource_invalidIgnoreChangesComputed(t
|
||||
ctx.ProviderSchemaSchema = mp.GetProviderSchema()
|
||||
ctx.ProviderProvider = p
|
||||
|
||||
diags := node.validateResource(ctx)
|
||||
diags := node.validateResource(t.Context(), ctx)
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("error for supposedly-valid config: %s", diags.ErrWithWarnings())
|
||||
}
|
||||
@@ -628,7 +628,7 @@ func TestNodeValidatableResource_ValidateResource_invalidIgnoreChangesComputed(t
|
||||
},
|
||||
})
|
||||
|
||||
diags = node.validateResource(ctx)
|
||||
diags = node.validateResource(t.Context(), ctx)
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("got unexpected error: %s", diags.ErrWithWarnings())
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ type AttachSchemaTransformer struct {
|
||||
Config *configs.Config
|
||||
}
|
||||
|
||||
func (t *AttachSchemaTransformer) Transform(_ context.Context, g *Graph) error {
|
||||
func (t *AttachSchemaTransformer) Transform(ctx context.Context, g *Graph) error {
|
||||
if t.Plugins == nil {
|
||||
// Should never happen with a reasonable caller, but we'll return a
|
||||
// proper error here anyway so that we'll fail gracefully.
|
||||
@@ -69,7 +69,7 @@ func (t *AttachSchemaTransformer) Transform(_ context.Context, g *Graph) error {
|
||||
providerFqn := tv.Provider()
|
||||
|
||||
// TODO: Plumb a useful context.Context through to here.
|
||||
schema, version, err := t.Plugins.ResourceTypeSchema(context.TODO(), providerFqn, mode, typeName)
|
||||
schema, version, err := t.Plugins.ResourceTypeSchema(ctx, providerFqn, mode, typeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read schema for %s in %s: %w", addr, providerFqn, err)
|
||||
}
|
||||
@@ -84,7 +84,7 @@ func (t *AttachSchemaTransformer) Transform(_ context.Context, g *Graph) error {
|
||||
if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok {
|
||||
providerAddr := tv.ProviderAddr()
|
||||
// TODO: Plumb a useful context.Context through to here.
|
||||
schema, err := t.Plugins.ProviderConfigSchema(context.TODO(), providerAddr.Provider)
|
||||
schema, err := t.Plugins.ProviderConfigSchema(ctx, providerAddr.Provider)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read provider configuration schema for %s: %w", providerAddr.Provider, err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user