mirror of
https://github.com/opentffoundation/opentf.git
synced 2025-12-19 09:48:32 -05:00
OpenTelemetry has various Go packages split across several Go modules that often need to be carefully upgraded together. And in particular, we are using the "semconv" package in conjunction with the OpenTelemetry SDK's "resource" package in a way that requires that they both agree on which version of the OpenTelemetry Semantic Conventions are being followed. To help avoid "dependency hell" situations when upgrading, this centralizes all of our direct calls into the OpenTelemetry SDK and tracing API into packages under internal/tracing, by exposing a few thin wrapper functions that other packages can use to access the same functionality indirectly. We only use a relatively small subset of the OpenTelemetry library surface area, so we don't need too many of these reexports and they should not represent a significant additional maintenance burden. For the semconv and resource interaction in particular this also factors that out into a separate helper function with a unit test, so we should notice quickly whenever they become misaligned. This complements the end-to-end test previously added in opentofu/opentofu#3447 to give us faster feedback about this particular problem, while the end-to-end test has the broader scope of making sure there aren't any errors at all when initializing OpenTelemetry tracing. Finally, this also replaces the constants we previously had in package traceaddrs with functions that return attribute.KeyValue values directly. This matches the API style used by the OpenTelemetry semconv packages, and makes the calls to these helpers from elsewhere in the system a little more concise. Signed-off-by: Martin Atkins <mart@degeneration.co.uk>
177 lines
6.2 KiB
Go
177 lines
6.2 KiB
Go
// Copyright (c) The OpenTofu Authors
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
// Copyright (c) 2023 HashiCorp, Inc.
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
package tofu
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"log"
|
|
|
|
"github.com/opentofu/opentofu/internal/dag"
|
|
"github.com/opentofu/opentofu/internal/tracing/traceattrs"
|
|
|
|
"github.com/opentofu/opentofu/internal/addrs"
|
|
"github.com/opentofu/opentofu/internal/plans"
|
|
"github.com/opentofu/opentofu/internal/states"
|
|
"github.com/opentofu/opentofu/internal/tfdiags"
|
|
"github.com/opentofu/opentofu/internal/tracing"
|
|
"github.com/zclconf/go-cty/cty"
|
|
)
|
|
|
|
// NodePlanDestroyableResourceInstance represents a resource that is ready
|
|
// to be planned for destruction.
|
|
type NodePlanDestroyableResourceInstance struct {
|
|
*NodeAbstractResourceInstance
|
|
|
|
// skipRefresh indicates that we should skip refreshing
|
|
skipRefresh bool
|
|
}
|
|
|
|
var (
|
|
_ GraphNodeModuleInstance = (*NodePlanDestroyableResourceInstance)(nil)
|
|
_ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil)
|
|
_ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil)
|
|
_ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil)
|
|
_ GraphNodeConfigResource = (*NodePlanDestroyableResourceInstance)(nil)
|
|
_ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil)
|
|
_ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil)
|
|
_ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil)
|
|
_ GraphNodeExecutable = (*NodePlanDestroyableResourceInstance)(nil)
|
|
_ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil)
|
|
_ dag.NamedVertex = (*NodePlanDestroyableResourceInstance)(nil)
|
|
)
|
|
|
|
// dag.NamedVertex
|
|
func (n *NodePlanDestroyableResourceInstance) Name() string {
|
|
return n.NodeAbstractResourceInstance.Name() + " (destroy)"
|
|
}
|
|
|
|
// GraphNodeDestroyer
|
|
func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance {
|
|
addr := n.ResourceInstanceAddr()
|
|
return &addr
|
|
}
|
|
|
|
// GraphNodeEvalable
|
|
func (n *NodePlanDestroyableResourceInstance) Execute(ctx context.Context, evalCtx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
|
|
addr := n.ResourceInstanceAddr()
|
|
|
|
ctx, span := tracing.Tracer().Start(
|
|
ctx, traceNamePlanResourceInstance,
|
|
tracing.SpanAttributes(
|
|
traceattrs.String(traceAttrResourceInstanceAddr, addr.String()),
|
|
traceattrs.Bool(traceAttrPlanRefresh, !n.skipRefresh),
|
|
),
|
|
)
|
|
defer span.End()
|
|
|
|
diags = diags.Append(n.resolveProvider(ctx, evalCtx, false, states.NotDeposed))
|
|
if diags.HasErrors() {
|
|
tracing.SetSpanError(span, diags)
|
|
return diags
|
|
}
|
|
span.SetAttributes(
|
|
traceattrs.String(traceAttrProviderInstanceAddr, traceProviderInstanceAddr(n.ResolvedProvider.ProviderConfig, n.ResolvedProviderKey)),
|
|
)
|
|
|
|
switch addr.Resource.Resource.Mode {
|
|
case addrs.ManagedResourceMode:
|
|
diags = diags.Append(
|
|
n.managedResourceExecute(ctx, evalCtx, op),
|
|
)
|
|
case addrs.DataResourceMode:
|
|
diags = diags.Append(
|
|
n.dataResourceExecute(ctx, evalCtx, op),
|
|
)
|
|
case addrs.EphemeralResourceMode:
|
|
diags = diags.Append(
|
|
n.ephemeralResourceExecute(ctx, evalCtx, op),
|
|
)
|
|
default:
|
|
panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
|
|
}
|
|
tracing.SetSpanError(span, diags)
|
|
return diags
|
|
}
|
|
|
|
func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(ctx context.Context, evalCtx EvalContext, _ walkOperation) (diags tfdiags.Diagnostics) {
|
|
addr := n.ResourceInstanceAddr()
|
|
|
|
// Declare a bunch of variables that are used for state during
|
|
// evaluation. These are written to by address in the EvalNodes we
|
|
// declare below.
|
|
var change *plans.ResourceInstanceChange
|
|
var state *states.ResourceInstanceObject
|
|
|
|
state, err := n.readResourceInstanceState(ctx, evalCtx, addr)
|
|
diags = diags.Append(err)
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
// If we are in the "skip refresh" mode then we will have skipped over our
|
|
// usual opportunity to update the previous run state and refresh state
|
|
// with the result of any provider schema upgrades, so we'll compensate
|
|
// by doing that here.
|
|
//
|
|
// NOTE: this is coupled with logic in Context.destroyPlan which skips
|
|
// running a normal plan walk when refresh is enabled. These two
|
|
// conditionals must agree (be exactly opposite) in order to get the
|
|
// correct behavior in both cases.
|
|
if n.skipRefresh {
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, state, prevRunState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, evalCtx, state, refreshState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
}
|
|
|
|
change, destroyPlanDiags := n.planDestroy(ctx, evalCtx, state, "")
|
|
diags = diags.Append(destroyPlanDiags)
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
diags = diags.Append(n.writeChange(ctx, evalCtx, change, ""))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
diags = diags.Append(n.checkPreventDestroy(change))
|
|
return diags
|
|
}
|
|
|
|
func (n *NodePlanDestroyableResourceInstance) dataResourceExecute(ctx context.Context, evalCtx EvalContext, _ walkOperation) (diags tfdiags.Diagnostics) {
|
|
|
|
// We may not be able to read a prior data source from the state if the
|
|
// schema was upgraded and we are destroying before ever refreshing that
|
|
// data source. Regardless, a data source "destroy" is simply writing a
|
|
// null state, which we can do with a null prior state too.
|
|
change := &plans.ResourceInstanceChange{
|
|
Addr: n.ResourceInstanceAddr(),
|
|
PrevRunAddr: n.prevRunAddr(evalCtx),
|
|
Change: plans.Change{
|
|
Action: plans.Delete,
|
|
Before: cty.NullVal(cty.DynamicPseudoType),
|
|
After: cty.NullVal(cty.DynamicPseudoType),
|
|
},
|
|
ProviderAddr: n.ResolvedProvider.ProviderConfig,
|
|
}
|
|
return diags.Append(n.writeChange(ctx, evalCtx, change, ""))
|
|
}
|
|
|
|
func (n *NodePlanDestroyableResourceInstance) ephemeralResourceExecute(_ context.Context, _ EvalContext, _ walkOperation) (diags tfdiags.Diagnostics) {
|
|
log.Printf("[TRACE] NodePlanDestroyableResourceInstance: called for ephemeral resource %s", n.Addr)
|
|
return diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"An ephemeral resource planned for destroy",
|
|
fmt.Sprintf("A destroy operation has been planned for the ephemeral resource %q. This is an OpenTofu error. Please report this.", n.Addr),
|
|
))
|
|
}
|