Files
opentf/internal/engine/planning/plan.go
James Humphries 19af81d28e Implement resource identity support (#3671)
Signed-off-by: James Humphries <james@james-humphries.co.uk>
Signed-off-by: Martin Atkins <mart@degeneration.co.uk>
Co-authored-by: Martin Atkins <mart@degeneration.co.uk>
2026-03-31 16:57:21 +01:00

240 lines
9.6 KiB
Go

// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package planning
import (
"context"
"fmt"
"log"
"strings"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/engine/plugins"
"github.com/opentofu/opentofu/internal/lang/eval"
"github.com/opentofu/opentofu/internal/lang/grapheval"
"github.com/opentofu/opentofu/internal/logging"
"github.com/opentofu/opentofu/internal/plans"
"github.com/opentofu/opentofu/internal/states"
"github.com/opentofu/opentofu/internal/tfdiags"
)
// PlanChanges is the main entry point, taking a state snapshot from the end
// of the previous plan/apply round and an instantiated configuration (bound
// to some input variable definitions) and returning a plan containing a set of
// proposed actions.
func PlanChanges(ctx context.Context, prevRoundState *states.State, configInst *eval.ConfigInstance, providers plugins.Providers) (*plans.Plan, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
planCtx := newPlanContext(configInst.EvalContext(), prevRoundState, providers)
// This configInst.DrivePlanning call blocks until the evaluator has
// visited all expressions in the configuration and calls
// [planContext.PlanDesiredResourceInstance] on the [planGlue] object for
// each resource instance it discovers so that we can produce a planned
// action and result value for each one.
//
// It also calls the various "Plan*Orphans" methods at different levels
// of granularity once it's determined the full set of objects under
// a given prefix, which planGlue uses to notice when there are
// prevRoundState resource instances that are no longer in the desired
// state and so plan to delete or forget them.
//
// If this completes without returning any error diagnostics then
// planCtx.resourceInstObjs should accurately represent the relationships
// between all of the "current" resource instance objects we found, but
// we won't discover any deposed objects until the next step below.
evalResult, moreDiags := configInst.DrivePlanning(ctx, func(oracle *eval.PlanningOracle) eval.PlanGlue {
return &planGlue{
planCtx: planCtx,
oracle: oracle,
}
})
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
// If we encountered errors during the eval-based phase then we'll halt
// here but we'll still produce a best-effort [plans.Plan] describing
// the situation because that often gives useful information for debugging
// what caused the errors.
intermediate, moreDiags := planCtx.Close(ctx)
diags = diags.Append(moreDiags)
plan, moreDiags := finalizePlan(ctx, intermediate, providers)
diags = diags.Append(moreDiags)
plan.Errored = true
return plan, diags
}
if evalResult == nil {
// This should not happen: we should always have an evalResult if
// there weren't any errors.
panic(fmt.Sprintf("%T.DrivePlanning returned nil result without any error diagnostics", configInst))
}
// We also need to deal with any "deposed" resource instances that were
// in the previous round state. We do this separately afterwards because
// these have no direct representation in the configuration at all and
// so are not in scope for the config eval system. It's also relatively
// rare for a previous round state to include deposed instances, since it
// can happen only if the "delete" leg of a create-before-destroy replace
// failed in the previous round.
//
// The provider instance manager should've planned ahead and arranged for
// any providers we need for these to still be open, waiting for the
// completion reports generated by our planning calls in this loop.
//
// After we complete this work, planCtx.resourceInstObjs is expanded to
// also include any deposed resource instance objects we discovered.
ctx = grapheval.ContextWithNewWorker(ctx)
planGlue := evalResult.Glue.(*planGlue)
for _, moduleState := range prevRoundState.Modules {
for _, resourceState := range moduleState.Resources {
for instKey, instState := range resourceState.Instances {
instAddr := resourceState.Addr.Instance(instKey)
for dk := range instState.Deposed {
// We currently have a schism where we do all of the
// discovery work using the traditional state model but
// we then switch to using our new-style "full" object model
// to act on what we've discovered. This is hopefully just
// a temporary situation while we're operating in a mixed
// world where most of the system doesn't know about the
// new runtime yet.
objState := prevRoundState.SyncWrapper().ResourceInstanceObjectFull(instAddr.Object(dk))
if objState == nil {
// If we get here then there's a bug in the
// ResourceInstanceObjectFull function, because we
// should only be here if instAddr and dk correspond.
// to an actual deposed object.
panic(fmt.Sprintf("state has %s deposed object %q, but ResourceInstanceObjectFull didn't return it", instAddr, dk))
}
diags = diags.Append(
planGlue.planDeposedResourceInstanceObject(ctx, instAddr, dk, objState),
)
}
}
}
}
// TODO: Consider factoring most of the work we've done here into a single
// function that directly returns the "intermediate" object. Exposing
// planCtx as a mutable object in this function doesn't seem necessary
// anymore since we only actually care about the results from Close here.
intermediate, moreDiags := planCtx.Close(ctx)
diags = diags.Append(moreDiags)
plan, moreDiags := finalizePlan(ctx, intermediate, providers)
diags = diags.Append(moreDiags)
if diags.HasErrors() {
plan.Errored = true
}
return plan, diags
}
func finalizePlan(ctx context.Context, intermediate *planContextResult, providers plugins.Providers) (*plans.Plan, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
effectiveReplaceOrders, selfDeps := findEffectiveReplaceOrders(intermediate.ResourceInstanceObjects)
if len(selfDeps) != 0 {
// TODO: _Should_ we return this error here? In principle we should only
// get here if the evaluator failed to detect a self-reference, but in
// theory that should be impossible and so maybe this is a "should never
// happen" case, rather than a normal user-facing error?
selfDeps := sortedResourceInstanceObjectAddrs(selfDeps.All())
var detail strings.Builder
detail.WriteString("The following objects depend on themselves either directly or indirectly:")
for _, addr := range selfDeps {
fmt.Fprintf(&detail, "\n - %s", addr)
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Self-referencing resource instances",
detail.String(),
))
}
changes, moreDiags := buildPlanChanges(ctx,
intermediate.ResourceInstanceObjects,
effectiveReplaceOrders,
providers,
)
diags = diags.Append(moreDiags)
egb := newExecGraphBuilder()
egb.AddResourceInstanceObjectSubgraphs(
intermediate.ResourceInstanceObjects,
effectiveReplaceOrders,
)
execGraph := egb.Finish()
if logging.IsDebugOrHigher() {
// FIXME: This can potentially contain sensitive values from the
// configuration, so we should either remove this or change the
// value representation to include sensitive value redactions.
log.Println("[DEBUG] Planned execution graph:\n" + logging.Indent(execGraph.DebugRepr()))
}
return &plans.Plan{
UIMode: plans.NormalMode, // TODO: [PlanChanges] needs something analogous to [tofu.PlanOpts] for planning mode/options
Changes: changes,
PrevRunState: intermediate.PrevRoundState,
PriorState: intermediate.RefreshedState,
// TODO: various other fields that we need to actually make use
// of this plan result. But this is intentionally just a partial
// result for now because it's not clear that we'd even be using
// plans.Plan in a final version of this new approach.
// This is a special extra field used only by this new runtime,
// as a probably-temporary place to keep the serialized execution
// graph so we can round-trip it through saved plan files while
// the CLI layer is still working in terms of [plans.Plan].
ExecutionGraph: execGraph.Marshal(),
}, diags
}
func buildPlanChanges(
ctx context.Context,
objs *resourceInstanceObjects,
effectiveReplaceOrders addrs.Map[addrs.AbsResourceInstanceObject, resourceInstanceReplaceOrder],
providers plugins.Providers,
) (*plans.Changes, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
changes := plans.NewChanges().SyncWrapper()
for addr, obj := range objs.All() {
change := obj.PlannedChange
if change == nil {
// We're only interested in the subset of objects that actually
// have planned changes.
continue
}
schema, moreDiags := providers.ResourceTypeSchema(ctx,
obj.Provider,
obj.Addr.InstanceAddr.Resource.Resource.Mode,
obj.Addr.InstanceAddr.Resource.Resource.Type,
)
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
continue // can't encode a change without a schema
}
changeSrc, err := change.Encode(schema)
if err != nil {
// TODO: Make this a proper diagnostic, since this can potentially
// be user-facing if the provider returned something that's somehow
// invalid. (That can only happen for built-in providers, because
// for plugin-based providers we would already have used the schema
// to decode the wire representation of this object.)
diags = diags.Append(err)
continue
}
if changeSrc.Action.IsReplace() {
// We substitute the final effective change action now, to describe
// the change accurately to the end-user.
changeSrc.Action = effectiveReplaceOrders.Get(addr).ChangeAction()
}
changes.AppendResourceInstanceChange(changeSrc)
}
return changes.Close(), diags
}