From ca1d83fef758fef6dd2d8404b13cb19a40018719 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 3 Dec 2025 16:39:05 -0800 Subject: [PATCH] engine/planning: Shallow adoption of states.ResourceInstanceObjectFull This is just a minimal set of changes to introduce uses of the new states.ResourceInstanceObjectFull to all of the leaf functions related to planning managed and data resource instances. The main goal here was just to prove that we'd reasonably be able to look up objects with the new type in all of the places we'd need to. We're planning some more substantial changes to the planning engine in future commits (e.g. to generate execution graphs instead of traditional plans) and so we'll plumb this in better as part of that work. Signed-off-by: Martin Atkins --- internal/engine/planning/plan.go | 17 +++++++++++- internal/engine/planning/plan_data.go | 10 +++---- internal/engine/planning/plan_eval_glue.go | 31 ++++++++++++++++++---- internal/engine/planning/plan_managed.go | 10 +++---- 4 files changed, 50 insertions(+), 18 deletions(-) diff --git a/internal/engine/planning/plan.go b/internal/engine/planning/plan.go index 88ec66309c..b585a33352 100644 --- a/internal/engine/planning/plan.go +++ b/internal/engine/planning/plan.go @@ -102,8 +102,23 @@ func PlanChanges(ctx context.Context, prevRoundState *states.State, configInst * for instKey, instState := range resourceState.Instances { instAddr := resourceState.Addr.Instance(instKey) for dk := range instState.Deposed { + // We currently have a schism where we do all of the + // discovery work using the traditional state model but + // we then switch to using our new-style "full" object model + // to act on what we've discovered. This is hopefully just + // a temporary situation while we're operating in a mixed + // world where most of the system doesn't know about the + // new runtime yet. + objState := prevRoundState.SyncWrapper().ResourceInstanceObjectFull(instAddr, dk) + if objState == nil { + // If we get here then there's a bug in the + // ResourceInstanceObjectFull function, because we + // should only be here if instAddr and dk correspond. + // to an actual deposed object. + panic(fmt.Sprintf("state has %s deposed object %q, but ResourceInstanceObjectFull didn't return it", instAddr, dk)) + } diags = diags.Append( - planGlue.planDeposedResourceInstanceObject(ctx, instAddr, dk, instState), + planGlue.planDeposedResourceInstanceObject(ctx, instAddr, dk, objState), ) } } diff --git a/internal/engine/planning/plan_data.go b/internal/engine/planning/plan_data.go index 01233f05c7..f576add2b0 100644 --- a/internal/engine/planning/plan_data.go +++ b/internal/engine/planning/plan_data.go @@ -83,19 +83,15 @@ func (p *planGlue) planDesiredDataResourceInstance(ctx context.Context, inst *ev panic("unimplemented") } -func (p *planGlue) planOrphanDataResourceInstance(_ context.Context, addr addrs.AbsResourceInstance, state *states.ResourceInstance) tfdiags.Diagnostics { +func (p *planGlue) planOrphanDataResourceInstance(_ context.Context, addr addrs.AbsResourceInstance, state *states.ResourceInstanceObjectFullSrc) tfdiags.Diagnostics { // Regardless of outcome we'll always report that we completed planning. defer p.planCtx.reportResourceInstancePlanCompletion(addr) var diags tfdiags.Diagnostics - // An orphan data resource is always just discarded completely, because + // An orphan data object is always just discarded completely, because // OpenTofu retains them only for esoteric uses like the "tofu console" // command: they are not actually expected to persist between rounds. - // - // FIXME: We can't actually populate the provider instance address here - // because in our current model it's split awkwardly across *states.Resource - // and *states.ResourceInstance, and we only have the latter here. - p.planCtx.refreshedState.SetResourceInstanceCurrent(addr, nil, addrs.AbsProviderConfig{}, state.ProviderKey) + p.planCtx.refreshedState.SetResourceInstanceObjectFull(addr, states.NotDeposed, nil) return diags } diff --git a/internal/engine/planning/plan_eval_glue.go b/internal/engine/planning/plan_eval_glue.go index 23033ae73f..95c8c3d9ac 100644 --- a/internal/engine/planning/plan_eval_glue.go +++ b/internal/engine/planning/plan_eval_glue.go @@ -67,7 +67,7 @@ func (p *planGlue) PlanDesiredResourceInstance(ctx context.Context, inst *eval.D } -func (p *planGlue) planOrphanResourceInstance(ctx context.Context, addr addrs.AbsResourceInstance, state *states.ResourceInstance) tfdiags.Diagnostics { +func (p *planGlue) planOrphanResourceInstance(ctx context.Context, addr addrs.AbsResourceInstance, state *states.ResourceInstanceObjectFullSrc) tfdiags.Diagnostics { log.Printf("[TRACE] planContext: planning orphan resource instance %s", addr) switch mode := addr.Resource.Resource.Mode; mode { case addrs.ManagedResourceMode: @@ -90,7 +90,7 @@ func (p *planGlue) planOrphanResourceInstance(ctx context.Context, addr addrs.Ab } } -func (p *planGlue) planDeposedResourceInstanceObject(ctx context.Context, addr addrs.AbsResourceInstance, deposedKey states.DeposedKey, state *states.ResourceInstance) tfdiags.Diagnostics { +func (p *planGlue) planDeposedResourceInstanceObject(ctx context.Context, addr addrs.AbsResourceInstance, deposedKey states.DeposedKey, state *states.ResourceInstanceObjectFullSrc) tfdiags.Diagnostics { log.Printf("[TRACE] planContext: planning deposed resource instance object %s %s", addr, deposedKey) if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { // Should not be possible because only managed resource instances @@ -369,16 +369,37 @@ func (p *planGlue) resourceInstancePlaceholderValue(ctx context.Context, provide // should adopt a different representation of state which uses a tree structure // where we can efficiently scan over subtrees that match a particular prefix, // rather than always scanning over everything. -func resourceInstancesFilter(state *states.State, want func(addrs.AbsResourceInstance) bool) iter.Seq2[addrs.AbsResourceInstance, *states.ResourceInstance] { - return func(yield func(addrs.AbsResourceInstance, *states.ResourceInstance) bool) { +func resourceInstancesFilter(state *states.State, want func(addrs.AbsResourceInstance) bool) iter.Seq2[addrs.AbsResourceInstance, *states.ResourceInstanceObjectFullSrc] { + return func(yield func(addrs.AbsResourceInstance, *states.ResourceInstanceObjectFullSrc) bool) { for _, modState := range state.Modules { for _, resourceState := range modState.Resources { for instKey, instanceState := range resourceState.Instances { + if instanceState.Current == nil { + // Only the current object for a resource instance + // can be an "orphan". (Deposed objects are handled + // elsewhere.) + continue + } instAddr := resourceState.Addr.Instance(instKey) if !want(instAddr) { continue } - if !yield(instAddr, instanceState) { + // We currently have a schism where we do all of the + // discovery work using the traditional state model but + // we then switch to using our new-style "full" object model + // to act on what we've discovered. This is hopefully just + // a temporary situation while we're operating in a mixed + // world where most of the system doesn't know about the + // new runtime yet. + objState := state.SyncWrapper().ResourceInstanceObjectFull(instAddr, states.NotDeposed) + if objState == nil { + // If we get here then there's a bug in the + // ResourceInstanceObjectFull function, because we + // should only be here if instAddr corresponds to a + // to an instance with a current object. + panic(fmt.Sprintf("state has %s, but ResourceInstanceObjectFull didn't return it", instAddr)) + } + if !yield(instAddr, objState) { return } } diff --git a/internal/engine/planning/plan_managed.go b/internal/engine/planning/plan_managed.go index 9dbb41df9e..18a577fae8 100644 --- a/internal/engine/planning/plan_managed.go +++ b/internal/engine/planning/plan_managed.go @@ -74,9 +74,9 @@ func (p *planGlue) planDesiredManagedResourceInstance(ctx context.Context, inst var prevRoundVal cty.Value var prevRoundPrivate []byte - prevRoundState := p.planCtx.prevRoundState.ResourceInstance(inst.Addr) - if prevRoundState != nil && prevRoundState.Current != nil { - obj, err := prevRoundState.Current.Decode(schema.Block.ImpliedType()) + prevRoundState := p.planCtx.prevRoundState.SyncWrapper().ResourceInstanceObjectFull(inst.Addr, states.NotDeposed) + if prevRoundState != nil { + obj, err := states.DecodeResourceInstanceObjectFull(prevRoundState, schema.Block.ImpliedType()) if err != nil { diags = diags.Append(tfdiags.AttributeValue( tfdiags.Error, @@ -223,7 +223,7 @@ func (p *planGlue) planDesiredManagedResourceInstance(ctx context.Context, inst return planResp.PlannedState, diags } -func (p *planGlue) planOrphanManagedResourceInstance(ctx context.Context, addr addrs.AbsResourceInstance, state *states.ResourceInstance) tfdiags.Diagnostics { +func (p *planGlue) planOrphanManagedResourceInstance(ctx context.Context, addr addrs.AbsResourceInstance, state *states.ResourceInstanceObjectFullSrc) tfdiags.Diagnostics { // Regardless of outcome we'll always report that we completed planning. defer p.planCtx.reportResourceInstancePlanCompletion(addr) @@ -231,7 +231,7 @@ func (p *planGlue) planOrphanManagedResourceInstance(ctx context.Context, addr a panic("unimplemented") } -func (p *planGlue) planDeposedManagedResourceInstanceObject(ctx context.Context, addr addrs.AbsResourceInstance, deposedKey states.DeposedKey, state *states.ResourceInstance) tfdiags.Diagnostics { +func (p *planGlue) planDeposedManagedResourceInstanceObject(ctx context.Context, addr addrs.AbsResourceInstance, deposedKey states.DeposedKey, state *states.ResourceInstanceObjectFullSrc) tfdiags.Diagnostics { // Regardless of outcome we'll always report that we completed planning. defer p.planCtx.reportResourceInstanceDeposedPlanCompletion(addr, deposedKey)