mirror of
https://github.com/opentffoundation/opentf.git
synced 2025-12-19 17:59:05 -05:00
Per our rule that the content of the state can never make a move statement invalid, our behavior for two objects trying to occupy the same address will be to just ignore that and let the object already at the address take priority. For the moment this is silent from an end-user perspective and appears only in our internal logs. However, I'm hoping that our future planned adjustment to the interface of this function will include some way to allow reporting these collisions in some end-user-visible way, either as a separate warning per collision or as a single warning that collects together all of the collisions into a single message somehow. This situation can arise both because the previous run state already contained an object at the target address of a move and because more than one move ends up trying to target the same location. In the latter case, which one "wins" is decided by our depth-first traversal order, which is in turn derived from our chaining and nesting rules and is therefore arbitrary but deterministic.
221 lines
8.2 KiB
Go
221 lines
8.2 KiB
Go
package refactoring
|
|
|
|
import (
|
|
"fmt"
|
|
"log"
|
|
|
|
"github.com/hashicorp/terraform/internal/addrs"
|
|
"github.com/hashicorp/terraform/internal/dag"
|
|
"github.com/hashicorp/terraform/internal/logging"
|
|
"github.com/hashicorp/terraform/internal/states"
|
|
)
|
|
|
|
type MoveResult struct {
|
|
From, To addrs.AbsResourceInstance
|
|
}
|
|
|
|
// ApplyMoves modifies in-place the given state object so that any existing
|
|
// objects that are matched by a "from" argument of one of the move statements
|
|
// will be moved to instead appear at the "to" argument of that statement.
|
|
//
|
|
// The result is a map from the unique key of each absolute address that was
|
|
// either the source or destination of a move to a MoveResult describing
|
|
// what happened at that address.
|
|
//
|
|
// ApplyMoves does not have any error situations itself, and will instead just
|
|
// ignore any unresolvable move statements. Validation of a set of moves is
|
|
// a separate concern applied to the configuration, because validity of
|
|
// moves is always dependent only on the configuration, not on the state.
|
|
//
|
|
// ApplyMoves expects exclusive access to the given state while it's running.
|
|
// Don't read or write any part of the state structure until ApplyMoves returns.
|
|
func ApplyMoves(stmts []MoveStatement, state *states.State) map[addrs.UniqueKey]MoveResult {
|
|
results := make(map[addrs.UniqueKey]MoveResult)
|
|
|
|
// The methodology here is to construct a small graph of all of the move
|
|
// statements where the edges represent where a particular statement
|
|
// is either chained from or nested inside the effect of another statement.
|
|
// That then means we can traverse the graph in topological sort order
|
|
// to gradually move objects through potentially multiple moves each.
|
|
|
|
g := buildMoveStatementGraph(stmts)
|
|
|
|
// If there are any cycles in the graph then we'll not take any action
|
|
// at all. The separate validation step should detect this and return
|
|
// an error.
|
|
if len(g.Cycles()) != 0 {
|
|
return results
|
|
}
|
|
|
|
// The starting nodes are the ones that don't depend on any other nodes.
|
|
startNodes := make(dag.Set, len(stmts))
|
|
for _, v := range g.Vertices() {
|
|
if len(g.DownEdges(v)) == 0 {
|
|
startNodes.Add(v)
|
|
}
|
|
}
|
|
|
|
if startNodes.Len() == 0 {
|
|
log.Println("[TRACE] refactoring.ApplyMoves: No 'moved' statements to consider in this configuration")
|
|
return results
|
|
}
|
|
|
|
log.Printf("[TRACE] refactoring.ApplyMoves: Processing 'moved' statements in the configuration\n%s", logging.Indent(g.String()))
|
|
|
|
g.ReverseDepthFirstWalk(startNodes, func(v dag.Vertex, depth int) error {
|
|
stmt := v.(*MoveStatement)
|
|
|
|
for _, ms := range state.Modules {
|
|
modAddr := ms.Addr
|
|
if !stmt.From.SelectsModule(modAddr) {
|
|
continue
|
|
}
|
|
|
|
// We now know that the current module is relevant but what
|
|
// we'll do with it depends on the object kind.
|
|
switch kind := stmt.ObjectKind(); kind {
|
|
case addrs.MoveEndpointModule:
|
|
// For a module endpoint we just try the module address
|
|
// directly.
|
|
if newAddr, matches := modAddr.MoveDestination(stmt.From, stmt.To); matches {
|
|
log.Printf("[TRACE] refactoring.ApplyMoves: %s has moved to %s", modAddr, newAddr)
|
|
|
|
// If we already have a module at the new address then
|
|
// we'll skip this move and let the existing object take
|
|
// priority.
|
|
// TODO: This should probably generate a user-visible
|
|
// warning, but we'd need to rethink the signature of this
|
|
// function to achieve that.
|
|
if ms := state.Module(newAddr); ms != nil {
|
|
log.Printf("[WARN] Skipped moving %s to %s, because there's already another module instance at the destination", modAddr, newAddr)
|
|
continue
|
|
}
|
|
|
|
// We need to visit all of the resource instances in the
|
|
// module and record them individually as results.
|
|
for _, rs := range ms.Resources {
|
|
relAddr := rs.Addr.Resource
|
|
for key := range rs.Instances {
|
|
oldInst := relAddr.Instance(key).Absolute(modAddr)
|
|
newInst := relAddr.Instance(key).Absolute(newAddr)
|
|
result := MoveResult{
|
|
From: oldInst,
|
|
To: newInst,
|
|
}
|
|
results[oldInst.UniqueKey()] = result
|
|
results[newInst.UniqueKey()] = result
|
|
}
|
|
}
|
|
|
|
state.MoveModuleInstance(modAddr, newAddr)
|
|
continue
|
|
}
|
|
case addrs.MoveEndpointResource:
|
|
// For a resource endpoint we need to search each of the
|
|
// resources and resource instances in the module.
|
|
for _, rs := range ms.Resources {
|
|
rAddr := rs.Addr
|
|
if newAddr, matches := rAddr.MoveDestination(stmt.From, stmt.To); matches {
|
|
log.Printf("[TRACE] refactoring.ApplyMoves: resource %s has moved to %s", rAddr, newAddr)
|
|
|
|
// If we already have a resource at the new address then
|
|
// we'll skip this move and let the existing object take
|
|
// priority.
|
|
// TODO: This should probably generate a user-visible
|
|
// warning, but we'd need to rethink the signature of this
|
|
// function to achieve that.
|
|
if rs := state.Resource(newAddr); rs != nil {
|
|
log.Printf("[WARN] Skipped moving %s to %s, because there's already another resource at the destination", rAddr, newAddr)
|
|
continue
|
|
}
|
|
|
|
for key := range rs.Instances {
|
|
oldInst := rAddr.Instance(key)
|
|
newInst := newAddr.Instance(key)
|
|
result := MoveResult{
|
|
From: oldInst,
|
|
To: newInst,
|
|
}
|
|
results[oldInst.UniqueKey()] = result
|
|
results[newInst.UniqueKey()] = result
|
|
}
|
|
state.MoveAbsResource(rAddr, newAddr)
|
|
continue
|
|
}
|
|
for key := range rs.Instances {
|
|
iAddr := rAddr.Instance(key)
|
|
if newAddr, matches := iAddr.MoveDestination(stmt.From, stmt.To); matches {
|
|
log.Printf("[TRACE] refactoring.ApplyMoves: resource instance %s has moved to %s", iAddr, newAddr)
|
|
|
|
// If we already have a resource instance at the new
|
|
// address then we'll skip this move and let the existing
|
|
// object take priority.
|
|
// TODO: This should probably generate a user-visible
|
|
// warning, but we'd need to rethink the signature of this
|
|
// function to achieve that.
|
|
if is := state.ResourceInstance(newAddr); is != nil {
|
|
log.Printf("[WARN] Skipped moving %s to %s, because there's already another resource instance at the destination", iAddr, newAddr)
|
|
continue
|
|
}
|
|
|
|
result := MoveResult{From: iAddr, To: newAddr}
|
|
results[iAddr.UniqueKey()] = result
|
|
results[newAddr.UniqueKey()] = result
|
|
|
|
state.MoveAbsResourceInstance(iAddr, newAddr)
|
|
continue
|
|
}
|
|
}
|
|
}
|
|
default:
|
|
panic(fmt.Sprintf("unhandled move object kind %s", kind))
|
|
}
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
// FIXME: In the case of either chained or nested moves, "results" will
|
|
// be left in a pretty interesting shape where the "old" address will
|
|
// refer to a result that describes only the first step, while the "new"
|
|
// address will refer to a result that describes only the last step.
|
|
// To make that actually useful we'll need a different strategy where
|
|
// the result describes the _effective_ source and destination, skipping
|
|
// over any intermediate steps we took to get there, so that ultimately
|
|
// we'll have enough information to annotate items in the plan with the
|
|
// addresses the originally moved from.
|
|
|
|
return results
|
|
}
|
|
|
|
// buildMoveStatementGraph constructs a dependency graph of the given move
|
|
// statements, where the nodes are all pointers to statements in the given
|
|
// slice and the edges represent either chaining or nesting relationships.
|
|
//
|
|
// buildMoveStatementGraph doesn't do any validation of the graph, so it
|
|
// may contain cycles and other sorts of invalidity.
|
|
func buildMoveStatementGraph(stmts []MoveStatement) *dag.AcyclicGraph {
|
|
g := &dag.AcyclicGraph{}
|
|
for i := range stmts {
|
|
// The graph nodes are pointers to the actual statements directly.
|
|
g.Add(&stmts[i])
|
|
}
|
|
|
|
// Now we'll add the edges representing chaining and nesting relationships.
|
|
// We assume that a reasonable configuration will have at most tens of
|
|
// move statements and thus this N*M algorithm is acceptable.
|
|
for dependerI := range stmts {
|
|
depender := &stmts[dependerI]
|
|
for dependeeI := range stmts {
|
|
dependee := &stmts[dependeeI]
|
|
dependeeTo := dependee.To
|
|
dependerFrom := depender.From
|
|
if dependerFrom.CanChainFrom(dependeeTo) || dependerFrom.NestedWithin(dependeeTo) {
|
|
g.Connect(dag.BasicEdge(depender, dependee))
|
|
}
|
|
}
|
|
}
|
|
|
|
return g
|
|
}
|