execgraph: A more complete compiler implementation

This now seems to more-or-less work, at least as far as the new
compile-and-execute is concerned.

Signed-off-by: Martin Atkins <mart@degeneration.co.uk>
This commit is contained in:
Martin Atkins
2025-09-29 16:42:53 -07:00
parent 4218231438
commit ff8e4fc4e0
7 changed files with 844 additions and 65 deletions

View File

@@ -7,13 +7,13 @@ package execgraph
import (
"context"
"fmt"
"sync"
"github.com/apparentlymart/go-workgraph/workgraph"
"github.com/zclconf/go-cty/cty"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/lang/grapheval"
"github.com/opentofu/opentofu/internal/tfdiags"
)
@@ -27,7 +27,7 @@ type CompiledGraph struct {
// compiler to arrange for the necessary data flow while it's building
// these compiled operations. Execution is complete once all of these
// functions have returned.
steps []nodeExecuteRaw
steps []compiledGraphStep
// resourceInstanceValues provides a function for each resource instance
// that was registered as a "sink" during graph building which blocks
@@ -50,6 +50,15 @@ type CompiledGraph struct {
cleanupWorker *workgraph.Worker
}
// compiledGraphStep is a single "step" from a [CompiledGraph].
//
// [CompiledGraph.Execute] executes all of the steps concurrently in a
// separate goroutine each, and so a compiledGraphStep function should start
// by establishing a new [workgraph.Worker] to represent whatever work it's
// going to do, so that the system can detect when a step tries to depend
// on its own result, directly or indirectly.
type compiledGraphStep func(ctx context.Context) tfdiags.Diagnostics
// Execute performs all of the work described in the execution graph in a
// suitable order, returning any diagnostics that operations might return
// along the way.
@@ -66,13 +75,18 @@ func (c *CompiledGraph) Execute(ctx context.Context) tfdiags.Diagnostics {
var wg sync.WaitGroup
wg.Add(len(c.steps))
for _, op := range c.steps {
wg.Go(func() {
_, _, opDiags := op(grapheval.ContextWithNewWorker(ctx))
for idx, step := range c.steps {
if step == nil {
diags = diags.Append(fmt.Errorf("execution graph compiled step %d is nil function", idx))
return diags
}
go func() {
opDiags := step(ctx)
diagsMu.Lock()
diags = diags.Append(opDiags)
diagsMu.Unlock()
})
wg.Done()
}()
}
wg.Wait()
@@ -103,25 +117,3 @@ func (c *CompiledGraph) ResourceInstanceValue(ctx context.Context, addr addrs.Ab
}
return getter(ctx)
}
// compiledOperation is the signature of a function acting as the implementation
// of a specific operation in a compiled graph.
type compiledOperation[Result any] func(ctx context.Context) (Result, tfdiags.Diagnostics)
// anyCompiledOperation is a type-erased version of [compiledOperation] used
// in situations where we only care that they got executed and have completed,
// without needing the actual results.
//
// The main way to create a function of this type is to pass a
// [compiledOperation] of some other type to [typeErasedCompiledOperation].
type anyCompiledOperation = func(ctx context.Context) tfdiags.Diagnostics
// typeErasedCompiledOperation turns a [compiledOperation] of some specific
// result type into a type-erased [anyCompiledOperation], by discarding
// its result and just returning its diagnostics.
func typeErasedCompiledOperation[Result any](op compiledOperation[Result]) anyCompiledOperation {
return func(ctx context.Context) tfdiags.Diagnostics {
_, diags := op(ctx)
return diags
}
}

View File

@@ -9,6 +9,7 @@ import (
"context"
"fmt"
"iter"
"log"
"strings"
"sync"
@@ -16,7 +17,6 @@ import (
"github.com/zclconf/go-cty/cty"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/lang/eval"
"github.com/opentofu/opentofu/internal/lang/grapheval"
"github.com/opentofu/opentofu/internal/states"
"github.com/opentofu/opentofu/internal/tfdiags"
@@ -25,10 +25,15 @@ import (
// Compile produces a compiled version of the graph which will, once executed,
// use the given arguments to interact with other parts of the broader system.
//
// TODO: This currently takes a prior state snapshot using our current models,
// but the state model we have probably isn't the best for this new execution
// approach.
func (g *Graph) Compile(oracle *eval.ApplyOracle, evalCtx *eval.EvalContext, priorState *states.SyncState) (*CompiledGraph, tfdiags.Diagnostics) {
// The [Graph.Compile] function is guaranteed not call any methods on the given
// [ExecContext] during compilation: it will be used only once the returned
// [CompiledGraph] is executed. In particular this means that it's okay for
// there to be a cyclic dependency between the ExecContext and the CompiledGraph
// so that the caller can use [CompiledGraph.ResourceInstanceValue] to satisfy
// requests from the evaluation system for final resource instance values, as
// long as the ExecContext object is updated with a pointer to the returned
// CompiledGraph object before executing the graph.
func (g *Graph) Compile(execCtx ExecContext) (*CompiledGraph, tfdiags.Diagnostics) {
ret := &CompiledGraph{
resourceInstanceValues: addrs.MakeMap[addrs.AbsResourceInstance, func(ctx context.Context) cty.Value](),
cleanupWorker: workgraph.NewWorker(),
@@ -36,10 +41,11 @@ func (g *Graph) Compile(oracle *eval.ApplyOracle, evalCtx *eval.EvalContext, pri
c := &compiler{
sourceGraph: g,
compiledGraph: ret,
oracle: oracle,
execCtx: execCtx,
opResolvers: make([]workgraph.Resolver[nodeResultRaw], len(g.ops)),
opResults: make([]workgraph.Promise[nodeResultRaw], len(g.ops)),
desiredStateFuncs: make([]nodeExecuteRaw, len(g.desiredStateRefs)),
priorStateFuncs: make([]nodeExecuteRaw, len(g.priorStateRefs)),
providerInstConfigFuncs: make([]nodeExecuteRaw, len(g.providerInstConfigRefs)),
}
// We'll prepopulate all of the operation promises, and then the compiler
@@ -66,9 +72,7 @@ func (g *Graph) Compile(oracle *eval.ApplyOracle, evalCtx *eval.EvalContext, pri
type compiler struct {
sourceGraph *Graph
compiledGraph *CompiledGraph
oracle *eval.ApplyOracle
evalCtx *eval.EvalContext
priorState *states.SyncState
execCtx ExecContext
// opResolvers and opResults track our requests for our operation results,
// each of which should be resolved by one of the "steps" in the compiled
@@ -88,6 +92,7 @@ type compiler struct {
// The indices of each of these correlate with the matching slices in
// sourceGraph.
desiredStateFuncs []nodeExecuteRaw
priorStateFuncs []nodeExecuteRaw
providerInstConfigFuncs []nodeExecuteRaw
// diags accumulates any problems we detect during the compilation process,
@@ -109,16 +114,23 @@ func (c *compiler) Compile() (*CompiledGraph, tfdiags.Diagnostics) {
// of other nodes as we go along only as needed to satisfy the operations.
opResolvers := c.opResolvers
for opIdx, opDesc := range c.sourceGraph.ops {
operands := newCompilerOperands(c.compileOperands(opDesc.operands))
operands := newCompilerOperands(opDesc.opCode, c.compileOperands(opDesc.operands))
var compileFunc func(operands *compilerOperands) nodeExecuteRaw
switch opDesc.opCode {
case opManagedFinalPlan:
compileFunc = c.compileOpManagedFinalPlan
case opManagedApplyChanges:
compileFunc = c.compileOpManagedApplyChanges
// TODO: opDataRead
case opOpenProvider:
compileFunc = c.compileOpOpenProvider
case opCloseProvider:
compileFunc = c.compileOpCloseProvider
default:
c.diags = c.diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Unsupported opcode in execution graph",
fmt.Sprintf("Execution graph includes opcode %s, but the compiler doesn't know how to handle it. This is a bug in OpenTofu.", opDesc.opCode),
fmt.Sprintf("Execution graph includes %s, but the compiler doesn't know how to handle it. This is a bug in OpenTofu.", opDesc.opCode),
))
continue
}
@@ -127,7 +139,7 @@ func (c *compiler) Compile() (*CompiledGraph, tfdiags.Diagnostics) {
// the operation results to propagate through the graph using the
// promises set up in [Graph.Compile].
mainExec := compileFunc(operands)
graphExec := func(parentCtx context.Context) (any, bool, tfdiags.Diagnostics) {
graphStep := func(parentCtx context.Context) tfdiags.Diagnostics {
// Each operation's execution must have its own workgraph worker
// that's responsible for resolving the associated promise, since
// that allows us to detect if operations try to depend on their
@@ -143,9 +155,9 @@ func (c *compiler) Compile() (*CompiledGraph, tfdiags.Diagnostics) {
CanContinue: ok,
Diagnostics: diags,
})
return ret, ok, diags
return diags
}
c.compiledGraph.steps = append(c.compiledGraph.steps, graphExec)
c.compiledGraph.steps = append(c.compiledGraph.steps, graphStep)
}
if c.diags.HasErrors() {
// Don't expose the likely-invalid compiled graph, then.
@@ -192,7 +204,7 @@ func (c *compiler) compileResultRef(ref AnyResultRef) nodeExecuteRaw {
// pointers to as small a part of the compiler's state as possible, so
// that the overall compiler object can be garbage-collected once
// compilation is complete.
oracle := c.oracle
execCtx := c.execCtx
// For any of the cases that return functions that cause side-effects that
// can potentially fail we must use a "once" wrapper to ensure that the
@@ -222,7 +234,7 @@ func (c *compiler) compileResultRef(ref AnyResultRef) nodeExecuteRaw {
}
c.desiredStateFuncs[index] = nodeExecuteRawOnce(func(ctx context.Context) (any, bool, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
desired := oracle.DesiredResourceInstance(ctx, resourceInstAddrs[index])
desired := execCtx.DesiredResourceInstance(ctx, resourceInstAddrs[index])
if desired == nil {
// If we get here then it suggests a bug in the planning engine,
// because it should not include a node referring to a resource
@@ -236,22 +248,19 @@ func (c *compiler) compileResultRef(ref AnyResultRef) nodeExecuteRaw {
}
return desired, true, diags
})
c.compiledGraph.steps = append(c.compiledGraph.steps, c.desiredStateFuncs[index])
c.compiledGraph.steps = append(c.compiledGraph.steps, compiledGraphStepFromNodeExecuteRaw(c.desiredStateFuncs[index]))
return c.desiredStateFuncs[index]
case resourceInstancePriorStateResultRef:
priorState := c.priorState
priorStateRefs := c.sourceGraph.priorStateRefs
index := ref.index
return func(ctx context.Context) (any, bool, tfdiags.Diagnostics) {
if existing := c.priorStateFuncs[index]; existing != nil {
return existing
}
c.priorStateFuncs[index] = nodeExecuteRawOnce(func(ctx context.Context) (any, bool, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
priorStateRef := priorStateRefs[index]
var gen states.Generation
if priorStateRef.DeposedKey == states.NotDeposed {
gen = states.CurrentGen
} else {
gen = priorStateRef.DeposedKey
}
obj := priorState.ResourceInstanceObject(priorStateRef.ResourceInstance, gen)
log.Printf("[TRACE] execgraph: Getting prior state for %s %s", priorStateRef.ResourceInstance, priorStateRef.DeposedKey)
obj := execCtx.ResourceInstancePriorState(ctx, priorStateRef.ResourceInstance, priorStateRef.DeposedKey)
if obj == nil {
// If we get here then it suggests a bug in the planning engine,
// because it should not include a node referring to a resource
@@ -270,7 +279,9 @@ func (c *compiler) compileResultRef(ref AnyResultRef) nodeExecuteRaw {
return nil, false, diags
}
return obj, true, diags
}
})
c.compiledGraph.steps = append(c.compiledGraph.steps, compiledGraphStepFromNodeExecuteRaw(c.priorStateFuncs[index]))
return c.priorStateFuncs[index]
case providerInstanceConfigResultRef:
providerInstConfigRefs := c.sourceGraph.providerInstConfigRefs
index := ref.index
@@ -278,9 +289,11 @@ func (c *compiler) compileResultRef(ref AnyResultRef) nodeExecuteRaw {
return existing
}
c.providerInstConfigFuncs[index] = nodeExecuteRawOnce(func(ctx context.Context) (any, bool, tfdiags.Diagnostics) {
log.Printf("[TRACE] execgraph: Fetching provider configuration value for %s", providerInstConfigRefs[index])
var diags tfdiags.Diagnostics
configVal := oracle.ProviderInstanceConfig(ctx, providerInstConfigRefs[index])
configVal := execCtx.ProviderInstanceConfig(ctx, providerInstConfigRefs[index])
if configVal == cty.NilVal {
log.Printf("[TRACE] execgraph: No configuration value available for %s", providerInstConfigRefs[index])
// If we get here then it suggests a bug in the planning engine,
// because it should not include a node referring to a provider
// instance that is not present in the configuration.
@@ -291,9 +304,10 @@ func (c *compiler) compileResultRef(ref AnyResultRef) nodeExecuteRaw {
))
return nil, false, diags
}
log.Printf("[TRACE] execgraph: Returning configuration value for %s", providerInstConfigRefs[index])
return configVal, true, diags
})
c.compiledGraph.steps = append(c.compiledGraph.steps, c.desiredStateFuncs[index])
c.compiledGraph.steps = append(c.compiledGraph.steps, compiledGraphStepFromNodeExecuteRaw(c.providerInstConfigFuncs[index]))
return c.providerInstConfigFuncs[index]
case anyOperationResultRef:
// Operations have different result types depending on their opcodes,
@@ -384,6 +398,7 @@ func nodeExecuteRawOnce(inner nodeExecuteRaw) nodeExecuteRaw {
// callers can wait for it.
var resolver workgraph.Resolver[nodeResultRaw]
resolver, promise = workgraph.NewRequest[nodeResultRaw](worker)
reqID = resolver.RequestID()
mu.Unlock() // Allow concurrent callers to begin awaiting the promise
ret, ok, diags := inner(ctx)
@@ -406,6 +421,24 @@ func nodeExecuteRawOnce(inner nodeExecuteRaw) nodeExecuteRaw {
}
}
// compiledGraphStepFromNodeExecuteRaw adapts a [nodeExecuteRaw] into a
// [compiledGraphStep] by arranging for the given function to run in a new
// [workgraph.Worker] and then returning its diagnostics.
//
// This should only be used for helper steps added by
// [compiler.compileResultRef], where the given function will not be responsible
// for resolving any promises. Operation execution steps deal with this a
// different way where the "step" and the result function are two separate
// entities where the first resolves a promise and the second consumes it;
// it's not correct to use this function with operation-related functions.
func compiledGraphStepFromNodeExecuteRaw(f nodeExecuteRaw) compiledGraphStep {
return func(parentCtx context.Context) tfdiags.Diagnostics {
ctx := grapheval.ContextWithNewWorker(parentCtx)
_, _, diags := f(ctx)
return diags
}
}
// nodeExecute is the type of a function that blocks until the result of a node
// is available and then returns that result.
//

View File

@@ -9,6 +9,7 @@ import (
"context"
"fmt"
"iter"
"reflect"
"strings"
"github.com/opentofu/opentofu/internal/tfdiags"
@@ -32,6 +33,7 @@ import (
// // compilation fails
// }
type compilerOperands struct {
opCode opCode
nextOperand func() (AnyResultRef, nodeExecuteRaw, bool)
stop func()
idx int
@@ -44,9 +46,10 @@ type compilerOperands struct {
//
// Refer to the documentation of [compilerOperands] for an example of how to
// use the result.
func newCompilerOperands(operands iter.Seq2[AnyResultRef, nodeExecuteRaw]) *compilerOperands {
func newCompilerOperands(opCode opCode, operands iter.Seq2[AnyResultRef, nodeExecuteRaw]) *compilerOperands {
next, stop := iter.Pull2(operands)
return &compilerOperands{
opCode: opCode,
nextOperand: next,
stop: stop,
idx: 0,
@@ -67,7 +70,8 @@ func nextOperand[T any](operands *compilerOperands) nodeExecute[T] {
// the expected type.
if _, typeOk := resultRef.(ResultRef[T]); !typeOk {
var zero T
operands.problems = append(operands.problems, fmt.Sprintf("operand %d not of expected type %T (got %T)", idx, any(zero), resultRef))
ty := reflect.TypeOf(&zero).Elem()
operands.problems = append(operands.problems, fmt.Sprintf("operand %d not of expected type %s.%s (got %T)", idx, ty.PkgPath(), ty.Name(), resultRef))
return nil
}
@@ -132,7 +136,7 @@ func (ops *compilerOperands) Finish() tfdiags.Diagnostics {
}
if len(problems) != 0 {
var buf strings.Builder
buf.WriteString("Found incorrect operands when compiling operation:\n")
fmt.Fprintf(&buf, "Found incorrect operands when compiling %s:\n", ops.opCode)
for _, problem := range problems {
fmt.Fprintf(&buf, " - %s\n", problem)
}

View File

@@ -7,13 +7,190 @@ package execgraph
import (
"context"
"fmt"
"log"
"github.com/zclconf/go-cty/cty"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/lang/eval"
"github.com/opentofu/opentofu/internal/providers"
"github.com/opentofu/opentofu/internal/states"
"github.com/opentofu/opentofu/internal/tfdiags"
)
func (c *compiler) compileOpManagedFinalPlan(operands *compilerOperands) nodeExecuteRaw {
getDesired := nextOperand[*eval.DesiredResourceInstance](operands)
getPrior := nextOperand[*states.ResourceInstanceObject](operands)
getInitialPlanned := nextOperand[cty.Value](operands)
getProviderClient := nextOperand[providers.Configured](operands)
waitForDeps := operands.OperandWaiter()
diags := operands.Finish()
c.diags = c.diags.Append(diags)
if diags.HasErrors() {
return nil
}
return func(ctx context.Context) (any, bool, tfdiags.Diagnostics) {
log.Printf("[TRACE] execgraph: opManagedFinalPlan waiting for dependencies to complete")
var diags tfdiags.Diagnostics
if !waitForDeps(ctx) {
log.Printf("[TRACE] execgraph: opManagedFinalPlan upstream dependency failed")
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opManagedFinalPlan waiting for provider client")
providerClient, ok, moreDiags := getProviderClient(ctx)
diags = diags.Append(moreDiags)
if !ok {
log.Printf("[TRACE] execgraph: opManagedFinalPlan failed to get provider client")
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opManagedFinalPlan waiting for desired state")
desired, ok, moreDiags := getDesired(ctx)
diags = diags.Append(moreDiags)
if !ok {
log.Printf("[TRACE] execgraph: opManagedFinalPlan failed to get desired state")
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opManagedFinalPlan waiting for prior state")
prior, ok, moreDiags := getPrior(ctx)
diags = diags.Append(moreDiags)
if !ok {
log.Printf("[TRACE] execgraph: opManagedFinalPlan failed to get prior state")
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opManagedFinalPlan waiting for initial planned state")
initialPlanned, ok, moreDiags := getInitialPlanned(ctx)
diags = diags.Append(moreDiags)
if !ok {
log.Printf("[TRACE] execgraph: opManagedFinalPlan failed to get planned state")
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opManagedFinalPlan ready to execute")
var resourceTypeName string
if desired != nil {
resourceTypeName = desired.Addr.Resource.Resource.Type
} else {
// FIXME: We don't haave anywhere to get the resource type name from
// if the resource instance is not desired. This is one of the
// annoyances of using our existing states.ResourceInstanceObject
// model, since it was designed to be used by callers that also have
// access to the rest of the state data structure that would've
// indicated which resource instance the object belongs to.
resourceTypeName = "<FIXME: no resource type available!>"
}
req := providers.PlanResourceChangeRequest{
TypeName: resourceTypeName,
}
if desired != nil {
req.Config = desired.ConfigVal
} else {
req.Config = cty.NullVal(cty.DynamicPseudoType)
}
if prior != nil {
req.PriorState = prior.Value
req.PriorPrivate = prior.Private
}
// TODO: req.ProviderMeta, maybe.
// TODO: req.ProposedNewState, but we need the provider's schema in
// here in order to build that with objchange.ProposedNew. :(
// It sure would be nice if these concerns weren't all so tangled
// together. Maybe we could address that with a higher-level type
// for the provider client, instead of using [providers.Configured]
// directly, which has access to the schema cache and maybe even
// knows which resource type it's trying to be a client for so it
// can handle these schema-related details internally itself, since
// the provider client already has schema information available to
// it in order to marshal the other values in the request.
resp := providerClient.PlanResourceChange(ctx, req)
diags = diags.Append(resp.Diagnostics)
if resp.Diagnostics.HasErrors() {
return nil, false, diags
}
// TODO: Check whether the final plan is valid in comparison to the
// initial plan. But again, we need access to the schema here to do
// that directly, which is annoying since it would be nice if that
// were all encapsulated away somewhere.
_ = initialPlanned
ret := &ManagedResourceObjectFinalPlan{
ResourceType: resourceTypeName,
ConfigVal: desired.ConfigVal,
PriorStateVal: req.PriorState,
PlannedVal: resp.PlannedState,
}
return ret, true, diags
}
}
func (c *compiler) compileOpManagedApplyChanges(operands *compilerOperands) nodeExecuteRaw {
getFinalPlan := nextOperand[*ManagedResourceObjectFinalPlan](operands)
getProviderClient := nextOperand[providers.Configured](operands)
diags := operands.Finish()
c.diags = c.diags.Append(diags)
if diags.HasErrors() {
return nil
}
return func(ctx context.Context) (any, bool, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
log.Printf("[TRACE] execgraph: opManagedApplyChanges waiting for provider client")
providerClient, ok, moreDiags := getProviderClient(ctx)
diags = diags.Append(moreDiags)
if !ok {
log.Printf("[TRACE] execgraph: opManagedApplyChanges failed to get provider client")
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opManagedApplyChanges waiting for final plan")
finalPlan, ok, moreDiags := getFinalPlan(ctx)
diags = diags.Append(moreDiags)
if !ok {
log.Printf("[TRACE] execgraph: opManagedApplyChanges failed to get final plan")
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opManagedApplyChanges ready to apply changes for %q", finalPlan.ResourceType)
req := providers.ApplyResourceChangeRequest{
TypeName: finalPlan.ResourceType,
PriorState: finalPlan.PriorStateVal,
PlannedState: finalPlan.PlannedVal,
Config: finalPlan.ConfigVal,
// TODO: PlannedPrivate
// TODO: ProviderMeta(?)
}
resp := providerClient.ApplyResourceChange(ctx, req)
diags = diags.Append(resp.Diagnostics)
if resp.Diagnostics.HasErrors() {
// FIXME: We need to be able to return a new state object even
// if the apply failed, because it should be saved -- possibly
// as tainted, if we were trying to create it -- so that the
// next round can try to recover from the problem.
return nil, false, diags
}
// TODO: Check whether the final state is valid in comparison to the
// final plan. But we need access to the schema here to do that
// directly, which is annoying since it would be nice if that were all
// encapsulated away somewhere.
ret := &states.ResourceInstanceObject{
Value: resp.NewState,
Private: resp.Private,
Status: states.ObjectReady,
// TODO: Dependencies ... they come from the "desired" object
// so maybe we should send that whole thing over here instead of
// just the ConfigVal?
// TODO: CreateBeforeDestroy ... also from the "desired" object.
}
return ret, true, diags
}
}
func (c *compiler) compileOpOpenProvider(operands *compilerOperands) nodeExecuteRaw {
getProviderAddr := nextOperand[addrs.Provider](operands)
getConfigVal := nextOperand[cty.Value](operands)
@@ -24,25 +201,32 @@ func (c *compiler) compileOpOpenProvider(operands *compilerOperands) nodeExecute
return nil
}
providers := c.evalCtx.Providers
execCtx := c.execCtx
return func(ctx context.Context) (any, bool, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if !waitForDeps(ctx) {
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opOpenProvider waiting for provider address")
providerAddr, ok, moreDiags := getProviderAddr(ctx)
diags = diags.Append(moreDiags)
if !ok {
log.Printf("[TRACE] execgraph: opOpenProvider failed to get provider address")
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opOpenProvider %s waiting for dependencies to complete", providerAddr)
if !waitForDeps(ctx) {
log.Printf("[TRACE] execgraph: opOpenProvider upstream dependency failed")
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opOpenProvider %s waiting for configuration value", providerAddr)
configVal, ok, moreDiags := getConfigVal(ctx)
diags = diags.Append(moreDiags)
if !ok {
log.Printf("[TRACE] execgraph: opOpenProvider failed to get configuration value")
return nil, false, diags
}
ret, moreDiags := providers.NewConfiguredProvider(ctx, providerAddr, configVal)
log.Printf("[TRACE] execgraph: opOpenProvider creating a configured client for %s", providerAddr)
ret, moreDiags := execCtx.NewProviderClient(ctx, providerAddr, configVal)
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
return nil, false, diags
@@ -50,3 +234,46 @@ func (c *compiler) compileOpOpenProvider(operands *compilerOperands) nodeExecute
return ret, true, diags
}
}
func (c *compiler) compileOpCloseProvider(operands *compilerOperands) nodeExecuteRaw {
getProviderClient := nextOperand[providers.Configured](operands)
waitForUsers := operands.OperandWaiter()
diags := operands.Finish()
c.diags = c.diags.Append(diags)
if diags.HasErrors() {
return nil
}
return func(ctx context.Context) (any, bool, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
log.Printf("[TRACE] execgraph: opCloseProvider waiting for all provider users to finish")
// We intentionally ignore results here because we want to close the
// provider even if one of its users fails.
waitForUsers(ctx)
log.Printf("[TRACE] execgraph: opCloseProvider waiting for provider client")
providerClient, ok, moreDiags := getProviderClient(ctx)
diags = diags.Append(moreDiags)
if !ok {
log.Printf("[TRACE] execgraph: opCloseProvider failed to get provider client")
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opCloseProvider calling Close on provider")
err := providerClient.Close(ctx)
if err != nil {
log.Printf("[TRACE] execgraph: opCloseProvider failed to close: %s", err)
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to close provider client",
fmt.Sprintf("Error closing provider client: %s.", tfdiags.FormatError(err)),
))
return nil, false, diags
}
log.Printf("[TRACE] execgraph: opCloseProvider closed provider successfully")
// This operation has no real result, and so we use an empty struct
// value to represent "nothing".
return struct{}{}, true, diags
}
}

View File

@@ -4,3 +4,237 @@
// SPDX-License-Identifier: MPL-2.0
package execgraph
import (
"cmp"
"context"
"slices"
"sync"
"testing"
gcmp "github.com/google/go-cmp/cmp"
"github.com/zclconf/go-cty-debug/ctydebug"
"github.com/zclconf/go-cty/cty"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/lang/eval"
"github.com/opentofu/opentofu/internal/lang/grapheval"
"github.com/opentofu/opentofu/internal/providers"
"github.com/opentofu/opentofu/internal/states"
"github.com/opentofu/opentofu/internal/tfdiags"
)
func TestCompiler_resourceInstanceBasics(t *testing.T) {
// The following approximates might appear in the planning engine's code
// for building the execution subgraph for a desired resource instance,
// arranging for its changes to be planned and applied with whatever
// provider instance was selected in the configuration.
builder := NewBuilder()
resourceInstAddr := addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "bar_thing",
Name: "example",
}.Absolute(addrs.RootModuleInstance).Instance(addrs.NoKey)
providerAddr := addrs.MustParseProviderSourceString("example.com/foo/bar")
providerInstAddr := addrs.AbsProviderInstanceCorrect{
Config: addrs.AbsProviderConfigCorrect{
Config: addrs.ProviderConfigCorrect{
Provider: providerAddr,
},
},
}
initialPlannedValue := builder.ConstantValue(cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("thingy"),
}))
providerClient, addProviderUser := builder.ProviderInstance(providerInstAddr, nil)
desiredInst := builder.DesiredResourceInstance(resourceInstAddr)
priorState := builder.ResourceInstancePriorState(resourceInstAddr)
finalPlan := builder.ManagedResourceObjectFinalPlan(
desiredInst,
priorState,
initialPlannedValue,
providerClient,
nil,
)
newState := builder.ApplyManagedResourceObjectChanges(finalPlan, providerClient)
addProviderUser(newState)
builder.SetResourceInstanceFinalStateResult(resourceInstAddr, newState)
sourceGraph := builder.Finish()
t.Log("source graph:\n" + sourceGraph.DebugRepr())
// The rest of this is approximating what the apply phase might do, although
// only the part that relates to this package in particular since we're
// focused only on testing the compiler and our ability to execute what
// it produces.
var execCtx *MockExecContext
execCtx = &MockExecContext{
DesiredResourceInstanceFunc: func(ctx context.Context, addr addrs.AbsResourceInstance) *eval.DesiredResourceInstance {
if !addr.Equal(resourceInstAddr) {
return nil
}
return &eval.DesiredResourceInstance{
Addr: addr,
ConfigVal: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("thingy"),
}),
Provider: providerAddr,
ProviderInstance: &providerInstAddr,
}
},
ResourceInstancePriorStateFunc: func(ctx context.Context, addr addrs.AbsResourceInstance, deposedKey states.DeposedKey) *states.ResourceInstanceObject {
return &states.ResourceInstanceObject{
Status: states.ObjectReady,
Value: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prior"),
}),
}
},
ProviderInstanceConfigFunc: func(ctx context.Context, addr addrs.AbsProviderInstanceCorrect) cty.Value {
if !addr.Equal(providerInstAddr) {
return cty.NilVal
}
return cty.ObjectVal(map[string]cty.Value{
"provider_config": cty.True,
})
},
NewProviderClientFunc: func(ctx context.Context, addr addrs.Provider, configVal cty.Value) (providers.Configured, tfdiags.Diagnostics) {
return execCtx.NewManagedResourceProviderClient(
func(ctx context.Context, req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
return providers.PlanResourceChangeResponse{
PlannedState: req.Config,
}
},
func(ctx context.Context, req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse {
return providers.ApplyResourceChangeResponse{
NewState: req.PlannedState,
}
},
), nil
},
}
compiledGraph, diags := sourceGraph.Compile(execCtx)
if diags.HasErrors() {
t.Fatal("unexpected compile errors\n" + diags.Err().Error())
}
var wg sync.WaitGroup
diagsCh := make(chan tfdiags.Diagnostics, 1)
wg.Go(func() {
diagsCh <- compiledGraph.Execute(t.Context())
close(diagsCh)
})
gotValue := compiledGraph.ResourceInstanceValue(grapheval.ContextWithNewWorker(t.Context()), resourceInstAddr)
wantValue := cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("thingy"),
})
if diff := gcmp.Diff(wantValue, gotValue, ctydebug.CmpOptions); diff != "" {
t.Errorf("wrong result for %s: %s", resourceInstAddr, diff)
}
wg.Wait()
diags = <-diagsCh
if diags.HasErrors() {
t.Fatal("unexpected execute errors\n" + diags.Err().Error())
}
gotLog := execCtx.Calls
// There are multiple valid call orders, so we'll just discard the order
// by sorting the log by method name since we only expect one call to
// each method in this particular test.
slices.SortFunc(gotLog, func(a, b MockExecContextCall) int {
return cmp.Compare(a.MethodName, b.MethodName)
})
// We also can't compare the actual provider client, so we'll stub that
// result out.
for i := range gotLog {
if gotLog[i].MethodName == "NewProviderClient" {
gotLog[i].Result = "<ignored for test comparison>"
}
}
wantLog := []MockExecContextCall{
{
MethodName: "DesiredResourceInstance",
Args: []any{resourceInstAddr},
Result: &eval.DesiredResourceInstance{
Addr: resourceInstAddr,
ConfigVal: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("thingy"),
}),
Provider: providerAddr,
ProviderInstance: &providerInstAddr,
},
},
{
MethodName: "NewProviderClient",
Args: []any{
providerAddr,
cty.ObjectVal(map[string]cty.Value{
"provider_config": cty.True,
}),
},
Result: "<ignored for test comparison>",
},
{
MethodName: "ProviderInstanceConfig",
Args: []any{providerInstAddr},
Result: cty.ObjectVal(map[string]cty.Value{
"provider_config": cty.True,
}),
},
{
MethodName: "ResourceInstancePriorState",
Args: []any{resourceInstAddr, states.NotDeposed},
Result: &states.ResourceInstanceObject{
Status: states.ObjectReady,
Value: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prior"),
}),
},
},
{
MethodName: "providerClient.ApplyResourceChange",
Args: []any{
providers.ApplyResourceChangeRequest{
TypeName: "bar_thing",
PriorState: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prior"),
}),
PlannedState: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("thingy"),
}),
Config: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("thingy"),
}),
},
},
Result: providers.ApplyResourceChangeResponse{
NewState: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("thingy"),
}),
},
},
{
MethodName: "providerClient.PlanResourceChange",
Args: []any{
providers.PlanResourceChangeRequest{
TypeName: "bar_thing",
PriorState: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("prior"),
}),
Config: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("thingy"),
}),
},
},
Result: providers.PlanResourceChangeResponse{
PlannedState: cty.ObjectVal(map[string]cty.Value{
"name": cty.StringVal("thingy"),
}),
},
},
}
if diff := gcmp.Diff(wantLog, gotLog, ctydebug.CmpOptions); diff != "" {
t.Errorf("wrong ExecContext calls: %s", diff)
}
}

View File

@@ -0,0 +1,73 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package execgraph
import (
"context"
"github.com/zclconf/go-cty/cty"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/lang/eval"
"github.com/opentofu/opentofu/internal/providers"
"github.com/opentofu/opentofu/internal/states"
"github.com/opentofu/opentofu/internal/tfdiags"
)
// Implementations of ExecContext allow a [CompiledGraph] to interact with other
// parts of OpenTofu during execution.
//
// The apply engine has the main implementation of this type, but it's an
// interface to make it possible to test the functionality in this package
// without depending directly on other components.
//
// The conventional variable name for a value of this type is "execCtx".
type ExecContext interface {
// DesiredResourceInstance returns the [DesiredResourceInstance]
// representation of the resource instance with the given address, or nil
// if the requested address is not part of the desired state described
// by the configuration.
//
// If this returns nil during real execution then that suggests a bug in
// the planning engine, because it should only generate
// desired-resource-instance operations for resource instances that actually
// appeared in the desired state during the planning process.
DesiredResourceInstance(ctx context.Context, addr addrs.AbsResourceInstance) *eval.DesiredResourceInstance
// ResourceInstancePriorState returns either the current object or a deposed
// object associated with the given resource instance address, or nil if
// the requested object was not tracked in the desired state.
//
// Set deposedKey to [states.NotDeposed] to retrieve the current object
// from the prior state.
//
// If this returns nil during real execution then that suggests a bug in
// the planning engine, because it should only generate requests for
// prior state objects that were present and valid in the refreshed state
// during the planning step.
ResourceInstancePriorState(ctx context.Context, addr addrs.AbsResourceInstance, deposedKey states.DeposedKey) *states.ResourceInstanceObject
// ProviderInstanceConfig returns the value that should be sent when
// configuring the specified provider instance, or [cty.NilVal] if
// no such provider instance is declared.
//
// If this returns cty.NilVal during real execution then that suggests
// a bug in the planning engine, because it should not generate an execution
// graph that attempts to use an undeclared provider instance.
ProviderInstanceConfig(ctx context.Context, addr addrs.AbsProviderInstanceCorrect) cty.Value
// NewProviderClient returns a preconfigured client for the given provider,
// using configVal as its configuration.
//
// If the provider refuses the configuration, or launching and configuring
// the provider fails for any other reason, the returned diagnostics
// contain end-user-oriented errors describing the problem(s).
//
// Each call to NewProviderClient returns a separate provider client. The
// implementation should not attempt to reuse clients across multiple calls
// to this method.
NewProviderClient(ctx context.Context, addr addrs.Provider, configVal cty.Value) (providers.Configured, tfdiags.Diagnostics)
}

View File

@@ -0,0 +1,216 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package execgraph
import (
"context"
"errors"
"sync"
"github.com/zclconf/go-cty/cty"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/lang/eval"
"github.com/opentofu/opentofu/internal/providers"
"github.com/opentofu/opentofu/internal/states"
"github.com/opentofu/opentofu/internal/tfdiags"
)
type MockExecContext struct {
Calls []MockExecContextCall
DesiredResourceInstanceFunc func(ctx context.Context, addr addrs.AbsResourceInstance) *eval.DesiredResourceInstance
NewProviderClientFunc func(ctx context.Context, addr addrs.Provider, configVal cty.Value) (providers.Configured, tfdiags.Diagnostics)
ProviderInstanceConfigFunc func(ctx context.Context, addr addrs.AbsProviderInstanceCorrect) cty.Value
ResourceInstancePriorStateFunc func(ctx context.Context, addr addrs.AbsResourceInstance, deposedKey states.DeposedKey) *states.ResourceInstanceObject
mu sync.Mutex
}
// DesiredResourceInstance implements ExecContext.
func (m *MockExecContext) DesiredResourceInstance(ctx context.Context, addr addrs.AbsResourceInstance) *eval.DesiredResourceInstance {
var result *eval.DesiredResourceInstance
if m.DesiredResourceInstanceFunc != nil {
result = m.DesiredResourceInstanceFunc(ctx, addr)
}
m.appendLog("DesiredResourceInstance", []any{addr}, result)
return result
}
// NewProviderClient implements ExecContext.
func (m *MockExecContext) NewProviderClient(ctx context.Context, addr addrs.Provider, configVal cty.Value) (providers.Configured, tfdiags.Diagnostics) {
var result providers.Configured
var diags tfdiags.Diagnostics
if m.NewProviderClientFunc != nil {
result, diags = m.NewProviderClientFunc(ctx, addr, configVal)
} else {
diags = diags.Append(errors.New("no provider clients available in this MockExecContext"))
}
m.appendLog("NewProviderClient", []any{addr, configVal}, result)
return result, diags
}
// ProviderInstanceConfig implements ExecContext.
func (m *MockExecContext) ProviderInstanceConfig(ctx context.Context, addr addrs.AbsProviderInstanceCorrect) cty.Value {
var result cty.Value
if m.ProviderInstanceConfigFunc != nil {
result = m.ProviderInstanceConfigFunc(ctx, addr)
}
m.appendLog("ProviderInstanceConfig", []any{addr}, result)
return result
}
// ResourceInstancePriorState implements ExecContext.
func (m *MockExecContext) ResourceInstancePriorState(ctx context.Context, addr addrs.AbsResourceInstance, deposedKey states.DeposedKey) *states.ResourceInstanceObject {
var result *states.ResourceInstanceObject
if m.ResourceInstancePriorStateFunc != nil {
result = m.ResourceInstancePriorStateFunc(ctx, addr, deposedKey)
}
m.appendLog("ResourceInstancePriorState", []any{addr, deposedKey}, result)
return result
}
func (m *MockExecContext) NewManagedResourceProviderClient(
planFunc func(context.Context, providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse,
applyFunc func(context.Context, providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse,
) providers.Configured {
return &managedResourceInstanceMockProvider{
PlanResourceChangeFunc: planFunc,
ApplyResourceChangeFunc: applyFunc,
execCtx: m,
}
}
func (m *MockExecContext) appendLog(methodName string, args []any, result any) {
//log.Printf("[TRACE] execgraph.MockExecContext: %s(%#v) -> %#v", methodName, args, result)
m.mu.Lock()
m.Calls = append(m.Calls, MockExecContextCall{
MethodName: methodName,
Args: args,
Result: result,
})
m.mu.Unlock()
}
var _ ExecContext = (*MockExecContext)(nil)
type MockExecContextCall struct {
MethodName string
Args []any
Result any
}
type managedResourceInstanceMockProvider struct {
PlanResourceChangeFunc func(ctx context.Context, req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse
ApplyResourceChangeFunc func(ctx context.Context, req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse
execCtx *MockExecContext
}
var _ providers.Configured = (*managedResourceInstanceMockProvider)(nil)
// ApplyResourceChange implements providers.Configured.
func (m *managedResourceInstanceMockProvider) ApplyResourceChange(ctx context.Context, req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse {
resp := m.ApplyResourceChangeFunc(ctx, req)
m.execCtx.appendLog("providerClient.ApplyResourceChange", []any{req}, resp)
return resp
}
// CallFunction implements providers.Configured.
func (m *managedResourceInstanceMockProvider) CallFunction(context.Context, providers.CallFunctionRequest) providers.CallFunctionResponse {
panic("unimplemented")
}
// Close implements providers.Configured.
func (m *managedResourceInstanceMockProvider) Close(context.Context) error {
return nil
}
// CloseEphemeralResource implements providers.Configured.
func (m *managedResourceInstanceMockProvider) CloseEphemeralResource(context.Context, providers.CloseEphemeralResourceRequest) providers.CloseEphemeralResourceResponse {
panic("unimplemented")
}
// ConfigureProvider implements providers.Configured.
func (m *managedResourceInstanceMockProvider) ConfigureProvider(context.Context, providers.ConfigureProviderRequest) providers.ConfigureProviderResponse {
return providers.ConfigureProviderResponse{}
}
// GetFunctions implements providers.Configured.
func (m *managedResourceInstanceMockProvider) GetFunctions(context.Context) providers.GetFunctionsResponse {
panic("unimplemented")
}
// GetProviderSchema implements providers.Configured.
func (m *managedResourceInstanceMockProvider) GetProviderSchema(context.Context) providers.GetProviderSchemaResponse {
panic("unimplemented")
}
// ImportResourceState implements providers.Configured.
func (m *managedResourceInstanceMockProvider) ImportResourceState(context.Context, providers.ImportResourceStateRequest) providers.ImportResourceStateResponse {
panic("unimplemented")
}
// MoveResourceState implements providers.Configured.
func (m *managedResourceInstanceMockProvider) MoveResourceState(context.Context, providers.MoveResourceStateRequest) providers.MoveResourceStateResponse {
panic("unimplemented")
}
// OpenEphemeralResource implements providers.Configured.
func (m *managedResourceInstanceMockProvider) OpenEphemeralResource(context.Context, providers.OpenEphemeralResourceRequest) providers.OpenEphemeralResourceResponse {
panic("unimplemented")
}
// PlanResourceChange implements providers.Configured.
func (m *managedResourceInstanceMockProvider) PlanResourceChange(ctx context.Context, req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
resp := m.PlanResourceChangeFunc(ctx, req)
m.execCtx.appendLog("providerClient.PlanResourceChange", []any{req}, resp)
return resp
}
// ReadDataSource implements providers.Configured.
func (m *managedResourceInstanceMockProvider) ReadDataSource(context.Context, providers.ReadDataSourceRequest) providers.ReadDataSourceResponse {
panic("unimplemented")
}
// ReadResource implements providers.Configured.
func (m *managedResourceInstanceMockProvider) ReadResource(context.Context, providers.ReadResourceRequest) providers.ReadResourceResponse {
panic("unimplemented")
}
// RenewEphemeralResource implements providers.Configured.
func (m *managedResourceInstanceMockProvider) RenewEphemeralResource(context.Context, providers.RenewEphemeralResourceRequest) (resp providers.RenewEphemeralResourceResponse) {
panic("unimplemented")
}
// Stop implements providers.Configured.
func (m *managedResourceInstanceMockProvider) Stop(context.Context) error {
panic("unimplemented")
}
// UpgradeResourceState implements providers.Configured.
func (m *managedResourceInstanceMockProvider) UpgradeResourceState(context.Context, providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse {
panic("unimplemented")
}
// ValidateDataResourceConfig implements providers.Configured.
func (m *managedResourceInstanceMockProvider) ValidateDataResourceConfig(context.Context, providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse {
panic("unimplemented")
}
// ValidateEphemeralConfig implements providers.Configured.
func (m *managedResourceInstanceMockProvider) ValidateEphemeralConfig(context.Context, providers.ValidateEphemeralConfigRequest) providers.ValidateEphemeralConfigResponse {
panic("unimplemented")
}
// ValidateProviderConfig implements providers.Configured.
func (m *managedResourceInstanceMockProvider) ValidateProviderConfig(context.Context, providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse {
panic("unimplemented")
}
// ValidateResourceConfig implements providers.Configured.
func (m *managedResourceInstanceMockProvider) ValidateResourceConfig(context.Context, providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse {
panic("unimplemented")
}