Remove global schema cache and clean up tofu schema/contextPlugins (#3589)

Signed-off-by: Christian Mesh <christianmesh1@gmail.com>
Co-authored-by: Martin Atkins <mart@degeneration.co.uk>
This commit is contained in:
Christian Mesh
2025-12-17 09:49:39 -05:00
committed by GitHub
parent 4f09b06624
commit a3fe39ff33
11 changed files with 308 additions and 541 deletions

View File

@@ -365,6 +365,8 @@ func (m *Meta) internalProviders() map[string]providers.Factory {
// file in the given cache package and uses go-plugin to implement
// providers.Interface against it.
func providerFactory(meta *providercache.CachedProvider) providers.Factory {
schemaCache := providers.NewSchemaCache()
return func() (providers.Interface, error) {
execFile, err := meta.ExecutableFile()
if err != nil {
@@ -395,7 +397,7 @@ func providerFactory(meta *providercache.CachedProvider) providers.Factory {
}
protoVer := client.NegotiatedVersion()
p, err := initializeProviderInstance(raw, protoVer, client, meta.Provider)
p, err := initializeProviderInstance(raw, protoVer, client, schemaCache)
if errors.Is(err, errUnsupportedProtocolVersion) {
panic(err)
}
@@ -406,18 +408,18 @@ func providerFactory(meta *providercache.CachedProvider) providers.Factory {
// initializeProviderInstance uses the plugin dispensed by the RPC client, and initializes a plugin instance
// per the protocol version
func initializeProviderInstance(plugin interface{}, protoVer int, pluginClient *plugin.Client, pluginAddr addrs.Provider) (providers.Interface, error) {
func initializeProviderInstance(plugin interface{}, protoVer int, pluginClient *plugin.Client, schemaCache providers.SchemaCache) (providers.Interface, error) {
// store the client so that the plugin can kill the child process
switch protoVer {
case 5:
p := plugin.(*tfplugin.GRPCProvider)
p.PluginClient = pluginClient
p.Addr = pluginAddr
p.SchemaCache = schemaCache
return p, nil
case 6:
p := plugin.(*tfplugin6.GRPCProvider)
p.PluginClient = pluginClient
p.Addr = pluginAddr
p.SchemaCache = schemaCache
return p, nil
default:
return nil, errUnsupportedProtocolVersion
@@ -441,6 +443,8 @@ func devOverrideProviderFactory(provider addrs.Provider, localDir getproviders.P
// reattach information to connect to go-plugin processes that are already
// running, and implements providers.Interface against it.
func unmanagedProviderFactory(provider addrs.Provider, reattach *plugin.ReattachConfig) providers.Factory {
schemaCache := providers.NewSchemaCache()
return func() (providers.Interface, error) {
config := &plugin.ClientConfig{
HandshakeConfig: tfplugin.Handshake,
@@ -490,7 +494,7 @@ func unmanagedProviderFactory(provider addrs.Provider, reattach *plugin.Reattach
protoVer = 5
}
return initializeProviderInstance(raw, protoVer, client, provider)
return initializeProviderInstance(raw, protoVer, client, schemaCache)
}
}

View File

@@ -9,7 +9,6 @@ import (
"context"
"errors"
"fmt"
"sync"
plugin "github.com/hashicorp/go-plugin"
"github.com/opentofu/opentofu/internal/plugin/validation"
@@ -18,7 +17,6 @@ import (
"github.com/zclconf/go-cty/cty/msgpack"
"google.golang.org/grpc"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/logging"
"github.com/opentofu/opentofu/internal/plugin/convert"
"github.com/opentofu/opentofu/internal/providers"
@@ -67,11 +65,6 @@ type GRPCProvider struct {
// used in an end to end test of a provider.
TestServer *grpc.Server
// Addr uniquely identifies the type of provider.
// Normally executed providers will have this set during initialization,
// but it may not always be available for alternative execute modes.
Addr addrs.Provider
// Proto client use to make the grpc service calls.
client proto.ProviderClient
@@ -86,53 +79,40 @@ type GRPCProvider struct {
// to use as the parent context for gRPC API calls.
ctx context.Context
mu sync.Mutex
// schema stores the schema for this provider. This is used to properly
// serialize the requests for schemas.
schema providers.GetProviderSchemaResponse
// SchemaCache stores the schema for this provider. This is used to properly
// serialize the requests for schemas. This is shared between instances
// of the provider.
SchemaCache providers.SchemaCache
// Keep track of if the proto schema fetch call has happend for GetProviderSchemaOptional
// This allows caching to still function efficiently, without violating legacy provider's requirements
hasFetchedSchema bool
}
var _ providers.Interface = new(GRPCProvider)
func (p *GRPCProvider) GetProviderSchema(ctx context.Context) (resp providers.GetProviderSchemaResponse) {
logger.Trace("GRPCProvider: GetProviderSchema")
p.mu.Lock()
defer p.mu.Unlock()
// First, we check the global cache.
// The cache could contain this schema if an instance of this provider has previously been started.
if !p.Addr.IsZero() {
// Even if the schema is cached, GetProviderSchemaOptional could be false. This would indicate that once instantiated,
// this provider requires the get schema call to be made at least once, as it handles part of the provider's setup.
// At this point, we don't know if this is the first call to a provider instance or not, so we don't use the result in that case.
if schemaCached, ok := providers.SchemaCache.Get(p.Addr); ok && schemaCached.ServerCapabilities.GetProviderSchemaOptional {
logger.Trace("GRPCProvider: GetProviderSchema: serving from global schema cache", "address", p.Addr)
return schemaCached
}
schema := p.SchemaCache(func() providers.GetProviderSchemaResponse {
return p.getProviderSchema(ctx)
})
if !p.hasFetchedSchema && !schema.ServerCapabilities.GetProviderSchemaOptional {
// Force call, we only care that we are satisfying the legacy provider's schema call request
_, _ = p.getProtoProviderSchema(ctx)
}
// If the local cache is non-zero, we know this instance has called
// GetProviderSchema at least once, so has satisfied the possible requirement of `GetProviderSchemaOptional=false`.
// This means that we can return early now using the locally cached schema, without making this call again.
if p.schema.Provider.Block != nil {
return p.schema
}
return schema
}
func (p *GRPCProvider) getProviderSchema(ctx context.Context) (resp providers.GetProviderSchemaResponse) {
resp.ResourceTypes = make(map[string]providers.Schema)
resp.DataSources = make(map[string]providers.Schema)
resp.EphemeralResources = make(map[string]providers.Schema)
resp.Functions = make(map[string]providers.FunctionSpec)
// Some providers may generate quite large schemas, and the internal default
// grpc response size limit is 4MB. 64MB should cover most any use case, and
// if we get providers nearing that we may want to consider a finer-grained
// API to fetch individual resource schemas.
// Note: this option is marked as EXPERIMENTAL in the grpc API. We keep
// this for compatibility, but recent providers all set the max message
// size much higher on the server side, which is the supported method for
// determining payload size.
const maxRecvSize = 64 << 20
protoResp, err := p.client.GetSchema(ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize})
protoResp, err := p.getProtoProviderSchema(ctx)
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
return resp
@@ -180,26 +160,29 @@ func (p *GRPCProvider) GetProviderSchema(ctx context.Context) (resp providers.Ge
resp.ServerCapabilities.GetProviderSchemaOptional = protoResp.ServerCapabilities.GetProviderSchemaOptional
}
// Set the global provider cache so that future calls to this provider can use the cached value.
// Crucially, this doesn't look at GetProviderSchemaOptional, because the layers above could use this cache
// *without* creating an instance of this provider. And if there is no instance,
// then we don't need to set up anything (cause there is nothing to set up), so we need no call
// to the providers GetSchema rpc.
if !p.Addr.IsZero() {
providers.SchemaCache.Set(p.Addr, resp)
}
// Always store this here in the client for providers that are not able to use GetProviderSchemaOptional.
// Crucially, this indicates that we've made at least one call to GetProviderSchema to this instance of the provider,
// which means in the future we'll be able to return using this cache
// (because the possible setup contained in the GetProviderSchema call has happened).
// If GetProviderSchemaOptional is true then this cache won't actually ever be used, because the calls to this method
// will be satisfied by the global provider cache.
p.schema = resp
return resp
}
// Common code to fetch the raw schema data from the provider. This is called from
// multiple locations due to GetProviderSchemaOptional
func (p *GRPCProvider) getProtoProviderSchema(ctx context.Context) (*proto.GetProviderSchema_Response, error) {
// Some providers may generate quite large schemas, and the internal default
// grpc response size limit is 4MB. 64MB should cover most any use case, and
// if we get providers nearing that we may want to consider a finer-grained
// API to fetch individual resource schemas.
// Note: this option is marked as EXPERIMENTAL in the grpc API. We keep
// this for compatibility, but recent providers all set the max message
// size much higher on the server side, which is the supported method for
// determining payload size.
const maxRecvSize = 64 << 20
resp, err := p.client.GetSchema(ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize})
// Mark that we have handled the internal requirement for legacy providers (!GetProviderSchemaOptional)
p.hasFetchedSchema = true
return resp, err
}
func (p *GRPCProvider) ValidateProviderConfig(ctx context.Context, r providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) {
logger.Trace("GRPCProvider: ValidateProviderConfig")

View File

@@ -21,7 +21,6 @@ import (
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/legacy/hcl2shim"
mockproto "github.com/opentofu/opentofu/internal/plugin/mock_proto"
"github.com/opentofu/opentofu/internal/providers"
@@ -62,6 +61,13 @@ func mockProviderClientWithSchema(t *testing.T, schema *proto.GetProviderSchema_
return client
}
func newGRPCProvider(client proto.ProviderClient) *GRPCProvider {
return &GRPCProvider{
client: client,
SchemaCache: providers.NewSchemaCache(),
}
}
func checkDiags(t *testing.T, d tfdiags.Diagnostics) {
t.Helper()
if d.HasErrors() {
@@ -156,9 +162,7 @@ func providerProtoSchema() *proto.GetProviderSchema_Response {
}
func TestGRPCProvider_GetSchema(t *testing.T) {
p := &GRPCProvider{
client: mockProviderClient(t),
}
p := newGRPCProvider(mockProviderClient(t))
resp := p.GetProviderSchema(t.Context())
checkDiags(t, resp.Diagnostics)
@@ -192,9 +196,7 @@ func TestGRPCProvider_GetSchema_GRPCError(t *testing.T) {
gomock.Any(),
).Return(&proto.GetProviderSchema_Response{}, fmt.Errorf("test error"))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
resp := p.GetProviderSchema(t.Context())
@@ -204,13 +206,8 @@ func TestGRPCProvider_GetSchema_GRPCError(t *testing.T) {
func TestGRPCProvider_GetSchema_GlobalCacheEnabled(t *testing.T) {
ctrl := gomock.NewController(t)
client := mockproto.NewMockProviderClient(ctrl)
// The SchemaCache is global and is saved between test runs
providers.SchemaCache = providers.NewMockSchemaCache()
providerAddr := addrs.Provider{
Namespace: "namespace",
Type: "type",
}
cache := providers.NewSchemaCache()
mockedProviderResponse := &proto.Schema{Version: 2, Block: &proto.Schema_Block{}}
@@ -225,10 +222,8 @@ func TestGRPCProvider_GetSchema_GlobalCacheEnabled(t *testing.T) {
// Run GetProviderTwice, expect GetSchema to be called once
// Re-initialize the provider before each run to avoid usage of the local cache
p := &GRPCProvider{
client: client,
Addr: providerAddr,
}
p := newGRPCProvider(client)
p.SchemaCache = cache
resp := p.GetProviderSchema(t.Context())
checkDiags(t, resp.Diagnostics)
@@ -236,10 +231,8 @@ func TestGRPCProvider_GetSchema_GlobalCacheEnabled(t *testing.T) {
t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version))
}
p = &GRPCProvider{
client: client,
Addr: providerAddr,
}
p = newGRPCProvider(client)
p.SchemaCache = cache
resp = p.GetProviderSchema(t.Context())
checkDiags(t, resp.Diagnostics)
@@ -251,13 +244,6 @@ func TestGRPCProvider_GetSchema_GlobalCacheEnabled(t *testing.T) {
func TestGRPCProvider_GetSchema_GlobalCacheDisabled(t *testing.T) {
ctrl := gomock.NewController(t)
client := mockproto.NewMockProviderClient(ctrl)
// The SchemaCache is global and is saved between test runs
providers.SchemaCache = providers.NewMockSchemaCache()
providerAddr := addrs.Provider{
Namespace: "namespace",
Type: "type",
}
mockedProviderResponse := &proto.Schema{Version: 2, Block: &proto.Schema_Block{}}
@@ -272,10 +258,7 @@ func TestGRPCProvider_GetSchema_GlobalCacheDisabled(t *testing.T) {
// Run GetProviderTwice, expect GetSchema to be called once
// Re-initialize the provider before each run to avoid usage of the local cache
p := &GRPCProvider{
client: client,
Addr: providerAddr,
}
p := newGRPCProvider(client)
resp := p.GetProviderSchema(t.Context())
checkDiags(t, resp.Diagnostics)
@@ -283,10 +266,7 @@ func TestGRPCProvider_GetSchema_GlobalCacheDisabled(t *testing.T) {
t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version))
}
p = &GRPCProvider{
client: client,
Addr: providerAddr,
}
p = newGRPCProvider(client)
resp = p.GetProviderSchema(t.Context())
checkDiags(t, resp.Diagnostics)
@@ -317,9 +297,7 @@ func TestGRPCProvider_GetSchema_ResponseErrorDiagnostic(t *testing.T) {
Provider: &proto.Schema{},
}, nil)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
resp := p.GetProviderSchema(t.Context())
@@ -328,9 +306,7 @@ func TestGRPCProvider_GetSchema_ResponseErrorDiagnostic(t *testing.T) {
func TestGRPCProvider_PrepareProviderConfig(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().PrepareProviderConfig(
gomock.Any(),
@@ -344,9 +320,7 @@ func TestGRPCProvider_PrepareProviderConfig(t *testing.T) {
func TestGRPCProvider_ValidateResourceConfig(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ValidateResourceTypeConfig(
gomock.Any(),
@@ -363,9 +337,7 @@ func TestGRPCProvider_ValidateResourceConfig(t *testing.T) {
func TestGRPCProvider_ValidateDataSourceConfig(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ValidateDataSourceConfig(
gomock.Any(),
@@ -382,9 +354,7 @@ func TestGRPCProvider_ValidateDataSourceConfig(t *testing.T) {
func TestGRPCProvider_ValidateEphemeralResourceConfig(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ValidateEphemeralResourceConfig(
gomock.Any(),
@@ -401,9 +371,7 @@ func TestGRPCProvider_ValidateEphemeralResourceConfig(t *testing.T) {
func TestGRPCProvider_UpgradeResourceState(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().UpgradeResourceState(
gomock.Any(),
@@ -432,9 +400,7 @@ func TestGRPCProvider_UpgradeResourceState(t *testing.T) {
func TestGRPCProvider_UpgradeResourceStateJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().UpgradeResourceState(
gomock.Any(),
@@ -468,9 +434,7 @@ func TestGRPCProvider_UpgradeResourceStateWithWriteOnlyReturned(t *testing.T) {
Optional: true,
WriteOnly: true,
})))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().UpgradeResourceState(
gomock.Any(),
@@ -505,9 +469,7 @@ func TestGRPCProvider_UpgradeResourceStateWithWriteOnlyReturned(t *testing.T) {
func TestGRPCProvider_MoveResourceState(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().MoveResourceState(
gomock.Any(),
@@ -546,9 +508,7 @@ func TestGRPCProvider_MoveResourceStateReturnsWriteOnlyValue(t *testing.T) {
Optional: true,
WriteOnly: true,
})))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().MoveResourceState(
gomock.Any(),
@@ -584,9 +544,7 @@ func TestGRPCProvider_MoveResourceStateReturnsWriteOnlyValue(t *testing.T) {
func TestGRPCProvider_Configure(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().Configure(
gomock.Any(),
@@ -604,9 +562,7 @@ func TestGRPCProvider_Configure(t *testing.T) {
func TestGRPCProvider_Stop(t *testing.T) {
ctrl := gomock.NewController(t)
client := mockproto.NewMockProviderClient(ctrl)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().Stop(
gomock.Any(),
@@ -621,9 +577,7 @@ func TestGRPCProvider_Stop(t *testing.T) {
func TestGRPCProvider_ReadResource(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadResource(
gomock.Any(),
@@ -654,9 +608,7 @@ func TestGRPCProvider_ReadResource(t *testing.T) {
func TestGRPCProvider_ReadResourceJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadResource(
gomock.Any(),
@@ -692,9 +644,7 @@ func TestGRPCProvider_ReadResourceReturnsWriteOnlyValue(t *testing.T) {
Optional: true,
WriteOnly: true,
})))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadResource(
gomock.Any(),
@@ -732,9 +682,7 @@ func TestGRPCProvider_ReadResourceReturnsWriteOnlyValue(t *testing.T) {
func TestGRPCProvider_ReadEmptyJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadResource(
gomock.Any(),
@@ -764,9 +712,7 @@ func TestGRPCProvider_ReadEmptyJSON(t *testing.T) {
func TestGRPCProvider_PlanResourceChange(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -827,9 +773,7 @@ func TestGRPCProvider_PlanResourceChange(t *testing.T) {
func TestGRPCProvider_PlanResourceChangeJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -895,9 +839,7 @@ func TestGRPCProvider_PlanResourceChangeReturnsWriteOnlyValue(t *testing.T) {
Optional: true,
WriteOnly: true,
})))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().PlanResourceChange(
gomock.Any(),
@@ -940,9 +882,7 @@ func TestGRPCProvider_PlanResourceChangeReturnsWriteOnlyValue(t *testing.T) {
func TestGRPCProvider_ApplyResourceChange(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -987,9 +927,7 @@ func TestGRPCProvider_ApplyResourceChange(t *testing.T) {
func TestGRPCProvider_ApplyResourceChangeJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -1039,9 +977,7 @@ func TestGRPCProvider_ApplyResourceChangeReturnsWriteOnlyValue(t *testing.T) {
Optional: true,
WriteOnly: true,
})))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ApplyResourceChange(
gomock.Any(),
@@ -1086,9 +1022,7 @@ func TestGRPCProvider_ApplyResourceChangeReturnsWriteOnlyValue(t *testing.T) {
func TestGRPCProvider_ImportResourceState(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -1129,9 +1063,7 @@ func TestGRPCProvider_ImportResourceState(t *testing.T) {
}
func TestGRPCProvider_ImportResourceStateJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -1173,9 +1105,7 @@ func TestGRPCProvider_ImportResourceStateJSON(t *testing.T) {
func TestGRPCProvider_ReadDataSource(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadDataSource(
gomock.Any(),
@@ -1206,9 +1136,7 @@ func TestGRPCProvider_ReadDataSource(t *testing.T) {
func TestGRPCProvider_ReadDataSourceJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadDataSource(
gomock.Any(),
@@ -1240,9 +1168,7 @@ func TestGRPCProvider_ReadDataSourceJSON(t *testing.T) {
func TestGRPCProvider_OpenEphemeralResource(t *testing.T) {
t.Run("success", func(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
future := time.Now().Add(time.Minute)
client.EXPECT().OpenEphemeralResource(
@@ -1280,9 +1206,7 @@ func TestGRPCProvider_OpenEphemeralResource(t *testing.T) {
})
t.Run("requested type is not in schema", func(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
resp := p.OpenEphemeralResource(t.Context(), providers.OpenEphemeralResourceRequest{
TypeName: "non_existing",
@@ -1300,9 +1224,7 @@ func TestGRPCProvider_OpenEphemeralResource(t *testing.T) {
func TestGRPCProvider_RenewEphemeralResource(t *testing.T) {
ctrl := gomock.NewController(t)
client := mockproto.NewMockProviderClient(ctrl)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
future := time.Now().Add(time.Minute)
client.EXPECT().RenewEphemeralResource(
@@ -1331,9 +1253,7 @@ func TestGRPCProvider_RenewEphemeralResource(t *testing.T) {
func TestGRPCProvider_CloseEphemeralResource(t *testing.T) {
ctrl := gomock.NewController(t)
client := mockproto.NewMockProviderClient(ctrl)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().CloseEphemeralResource(
gomock.Any(),
@@ -1349,9 +1269,7 @@ func TestGRPCProvider_CloseEphemeralResource(t *testing.T) {
func TestGRPCProvider_CallFunction(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().CallFunction(
gomock.Any(),

View File

@@ -9,7 +9,6 @@ import (
"context"
"errors"
"fmt"
"sync"
plugin "github.com/hashicorp/go-plugin"
"github.com/opentofu/opentofu/internal/plugin6/validation"
@@ -18,7 +17,6 @@ import (
"github.com/zclconf/go-cty/cty/msgpack"
"google.golang.org/grpc"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/logging"
"github.com/opentofu/opentofu/internal/plugin6/convert"
"github.com/opentofu/opentofu/internal/providers"
@@ -75,11 +73,6 @@ type GRPCProvider struct {
// used in an end to end test of a provider.
TestServer *grpc.Server
// Addr uniquely identifies the type of provider.
// Normally executed providers will have this set during initialization,
// but it may not always be available for alternative execute modes.
Addr addrs.Provider
// Proto client use to make the grpc service calls.
client proto6.ProviderClient
@@ -94,53 +87,40 @@ type GRPCProvider struct {
// to use as the parent context for gRPC API calls.
ctx context.Context
mu sync.Mutex
// schema stores the schema for this provider. This is used to properly
// serialize the requests for schemas.
schema providers.GetProviderSchemaResponse
// SchemaCache stores the schema for this provider. This is used to properly
// serialize the requests for schemas. This is shared between instances
// of the provider.
SchemaCache providers.SchemaCache
// Keep track of if the proto schema fetch call has happend for GetProviderSchemaOptional
// This allows caching to still function efficiently, without violating legacy provider's requirements
hasFetchedSchema bool
}
var _ providers.Interface = new(GRPCProvider)
func (p *GRPCProvider) GetProviderSchema(ctx context.Context) (resp providers.GetProviderSchemaResponse) {
logger.Trace("GRPCProvider.v6: GetProviderSchema")
p.mu.Lock()
defer p.mu.Unlock()
// First, we check the global cache.
// The cache could contain this schema if an instance of this provider has previously been started.
if !p.Addr.IsZero() {
// Even if the schema is cached, GetProviderSchemaOptional could be false. This would indicate that once instantiated,
// this provider requires the get schema call to be made at least once, as it handles part of the provider's setup.
// At this point, we don't know if this is the first call to a provider instance or not, so we don't use the result in that case.
if schemaCached, ok := providers.SchemaCache.Get(p.Addr); ok && schemaCached.ServerCapabilities.GetProviderSchemaOptional {
logger.Trace("GRPCProvider: GetProviderSchema: serving from global schema cache", "address", p.Addr)
return schemaCached
}
schema := p.SchemaCache(func() providers.GetProviderSchemaResponse {
return p.getProviderSchema(ctx)
})
if !p.hasFetchedSchema && !schema.ServerCapabilities.GetProviderSchemaOptional {
// Force call, we only care that we are satisfying the legacy provider's schema call request
_, _ = p.getProtoProviderSchema(ctx)
}
// If the local cache is non-zero, we know this instance has called
// GetProviderSchema at least once, so has satisfied the possible requirement of `GetProviderSchemaOptional=false`.
// This means that we can return early now using the locally cached schema, without making this call again.
if p.schema.Provider.Block != nil {
return p.schema
}
return schema
}
func (p *GRPCProvider) getProviderSchema(ctx context.Context) (resp providers.GetProviderSchemaResponse) {
resp.ResourceTypes = make(map[string]providers.Schema)
resp.DataSources = make(map[string]providers.Schema)
resp.EphemeralResources = make(map[string]providers.Schema)
resp.Functions = make(map[string]providers.FunctionSpec)
// Some providers may generate quite large schemas, and the internal default
// grpc response size limit is 4MB. 64MB should cover most any use case, and
// if we get providers nearing that we may want to consider a finer-grained
// API to fetch individual resource schemas.
// Note: this option is marked as EXPERIMENTAL in the grpc API. We keep
// this for compatibility, but recent providers all set the max message
// size much higher on the server side, which is the supported method for
// determining payload size.
const maxRecvSize = 64 << 20
protoResp, err := p.client.GetProviderSchema(ctx, new(proto6.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize})
protoResp, err := p.getProtoProviderSchema(ctx)
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
return resp
@@ -188,26 +168,29 @@ func (p *GRPCProvider) GetProviderSchema(ctx context.Context) (resp providers.Ge
resp.ServerCapabilities.GetProviderSchemaOptional = protoResp.ServerCapabilities.GetProviderSchemaOptional
}
// Set the global provider cache so that future calls to this provider can use the cached value.
// Crucially, this doesn't look at GetProviderSchemaOptional, because the layers above could use this cache
// *without* creating an instance of this provider. And if there is no instance,
// then we don't need to set up anything (cause there is nothing to set up), so we need no call
// to the providers GetSchema rpc.
if !p.Addr.IsZero() {
providers.SchemaCache.Set(p.Addr, resp)
}
// Always store this here in the client for providers that are not able to use GetProviderSchemaOptional.
// Crucially, this indicates that we've made at least one call to GetProviderSchema to this instance of the provider,
// which means in the future we'll be able to return using this cache
// (because the possible setup contained in the GetProviderSchema call has happened).
// If GetProviderSchemaOptional is true then this cache won't actually ever be used, because the calls to this method
// will be satisfied by the global provider cache.
p.schema = resp
return resp
}
// Common code to fetch the raw schema data from the provider. This is called from
// multiple locations due to GetProviderSchemaOptional
func (p *GRPCProvider) getProtoProviderSchema(ctx context.Context) (*proto6.GetProviderSchema_Response, error) {
// Some providers may generate quite large schemas, and the internal default
// grpc response size limit is 4MB. 64MB should cover most any use case, and
// if we get providers nearing that we may want to consider a finer-grained
// API to fetch individual resource schemas.
// Note: this option is marked as EXPERIMENTAL in the grpc API. We keep
// this for compatibility, but recent providers all set the max message
// size much higher on the server side, which is the supported method for
// determining payload size.
const maxRecvSize = 64 << 20
resp, err := p.client.GetProviderSchema(ctx, new(proto6.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize})
// Mark that we have handled the internal requirement for legacy providers (!GetProviderSchemaOptional)
p.hasFetchedSchema = true
return resp, err
}
func (p *GRPCProvider) ValidateProviderConfig(ctx context.Context, r providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) {
logger.Trace("GRPCProvider.v6: ValidateProviderConfig")

View File

@@ -22,7 +22,6 @@ import (
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/legacy/hcl2shim"
mockproto "github.com/opentofu/opentofu/internal/plugin6/mock_proto"
"github.com/opentofu/opentofu/internal/providers"
@@ -69,6 +68,13 @@ func mockProviderClientWithSchema(t *testing.T, schema *proto.GetProviderSchema_
return client
}
func newGRPCProvider(client proto.ProviderClient) *GRPCProvider {
return &GRPCProvider{
client: client,
SchemaCache: providers.NewSchemaCache(),
}
}
func checkDiags(t *testing.T, d tfdiags.Diagnostics) {
t.Helper()
if d.HasErrors() {
@@ -163,9 +169,7 @@ func providerProtoSchema() *proto.GetProviderSchema_Response {
}
func TestGRPCProvider_GetSchema(t *testing.T) {
p := &GRPCProvider{
client: mockProviderClient(t),
}
p := newGRPCProvider(mockProviderClient(t))
resp := p.GetProviderSchema(t.Context())
checkDiags(t, resp.Diagnostics)
@@ -199,9 +203,7 @@ func TestGRPCProvider_GetSchema_GRPCError(t *testing.T) {
gomock.Any(),
).Return(&proto.GetProviderSchema_Response{}, fmt.Errorf("test error"))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
resp := p.GetProviderSchema(t.Context())
@@ -230,9 +232,7 @@ func TestGRPCProvider_GetSchema_ResponseErrorDiagnostic(t *testing.T) {
Provider: &proto.Schema{},
}, nil)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
resp := p.GetProviderSchema(t.Context())
@@ -242,13 +242,8 @@ func TestGRPCProvider_GetSchema_ResponseErrorDiagnostic(t *testing.T) {
func TestGRPCProvider_GetSchema_GlobalCacheEnabled(t *testing.T) {
ctrl := gomock.NewController(t)
client := mockproto.NewMockProviderClient(ctrl)
// The SchemaCache is global and is saved between test runs
providers.SchemaCache = providers.NewMockSchemaCache()
providerAddr := addrs.Provider{
Namespace: "namespace",
Type: "type",
}
cache := providers.NewSchemaCache()
mockedProviderResponse := &proto.Schema{Version: 2, Block: &proto.Schema_Block{}}
@@ -263,10 +258,8 @@ func TestGRPCProvider_GetSchema_GlobalCacheEnabled(t *testing.T) {
// Run GetProviderTwice, expect GetSchema to be called once
// Re-initialize the provider before each run to avoid usage of the local cache
p := &GRPCProvider{
client: client,
Addr: providerAddr,
}
p := newGRPCProvider(client)
p.SchemaCache = cache
resp := p.GetProviderSchema(t.Context())
checkDiags(t, resp.Diagnostics)
@@ -274,10 +267,8 @@ func TestGRPCProvider_GetSchema_GlobalCacheEnabled(t *testing.T) {
t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version))
}
p = &GRPCProvider{
client: client,
Addr: providerAddr,
}
p = newGRPCProvider(client)
p.SchemaCache = cache
resp = p.GetProviderSchema(t.Context())
checkDiags(t, resp.Diagnostics)
@@ -289,13 +280,6 @@ func TestGRPCProvider_GetSchema_GlobalCacheEnabled(t *testing.T) {
func TestGRPCProvider_GetSchema_GlobalCacheDisabled(t *testing.T) {
ctrl := gomock.NewController(t)
client := mockproto.NewMockProviderClient(ctrl)
// The SchemaCache is global and is saved between test runs
providers.SchemaCache = providers.NewMockSchemaCache()
providerAddr := addrs.Provider{
Namespace: "namespace",
Type: "type",
}
mockedProviderResponse := &proto.Schema{Version: 2, Block: &proto.Schema_Block{}}
@@ -310,10 +294,7 @@ func TestGRPCProvider_GetSchema_GlobalCacheDisabled(t *testing.T) {
// Run GetProviderTwice, expect GetSchema to be called once
// Re-initialize the provider before each run to avoid usage of the local cache
p := &GRPCProvider{
client: client,
Addr: providerAddr,
}
p := newGRPCProvider(client)
resp := p.GetProviderSchema(t.Context())
checkDiags(t, resp.Diagnostics)
@@ -321,10 +302,7 @@ func TestGRPCProvider_GetSchema_GlobalCacheDisabled(t *testing.T) {
t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version))
}
p = &GRPCProvider{
client: client,
Addr: providerAddr,
}
p = newGRPCProvider(client)
resp = p.GetProviderSchema(t.Context())
checkDiags(t, resp.Diagnostics)
@@ -335,9 +313,7 @@ func TestGRPCProvider_GetSchema_GlobalCacheDisabled(t *testing.T) {
func TestGRPCProvider_PrepareProviderConfig(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ValidateProviderConfig(
gomock.Any(),
@@ -351,9 +327,7 @@ func TestGRPCProvider_PrepareProviderConfig(t *testing.T) {
func TestGRPCProvider_ValidateResourceConfig(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ValidateResourceConfig(
gomock.Any(),
@@ -370,9 +344,7 @@ func TestGRPCProvider_ValidateResourceConfig(t *testing.T) {
func TestGRPCProvider_ValidateDataResourceConfig(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ValidateDataResourceConfig(
gomock.Any(),
@@ -389,9 +361,7 @@ func TestGRPCProvider_ValidateDataResourceConfig(t *testing.T) {
func TestGRPCProvider_ValidateEphemeralResourceConfig(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ValidateEphemeralResourceConfig(
gomock.Any(),
@@ -408,9 +378,7 @@ func TestGRPCProvider_ValidateEphemeralResourceConfig(t *testing.T) {
func TestGRPCProvider_UpgradeResourceState(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().UpgradeResourceState(
gomock.Any(),
@@ -439,9 +407,7 @@ func TestGRPCProvider_UpgradeResourceState(t *testing.T) {
func TestGRPCProvider_UpgradeResourceStateJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().UpgradeResourceState(
gomock.Any(),
@@ -475,9 +441,7 @@ func TestGRPCProvider_UpgradeResourceStateWithWriteOnlyReturned(t *testing.T) {
Optional: true,
WriteOnly: true,
})))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().UpgradeResourceState(
gomock.Any(),
@@ -512,9 +476,7 @@ func TestGRPCProvider_UpgradeResourceStateWithWriteOnlyReturned(t *testing.T) {
func TestGRPCProvider_MoveResourceState(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().MoveResourceState(
gomock.Any(),
@@ -553,9 +515,7 @@ func TestGRPCProvider_MoveResourceStateReturnsWriteOnlyValue(t *testing.T) {
Optional: true,
WriteOnly: true,
})))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().MoveResourceState(
gomock.Any(),
@@ -591,9 +551,7 @@ func TestGRPCProvider_MoveResourceStateReturnsWriteOnlyValue(t *testing.T) {
func TestGRPCProvider_Configure(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ConfigureProvider(
gomock.Any(),
@@ -611,9 +569,7 @@ func TestGRPCProvider_Configure(t *testing.T) {
func TestGRPCProvider_Stop(t *testing.T) {
ctrl := gomock.NewController(t)
client := mockproto.NewMockProviderClient(ctrl)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().StopProvider(
gomock.Any(),
@@ -628,9 +584,7 @@ func TestGRPCProvider_Stop(t *testing.T) {
func TestGRPCProvider_ReadResource(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadResource(
gomock.Any(),
@@ -661,9 +615,7 @@ func TestGRPCProvider_ReadResource(t *testing.T) {
func TestGRPCProvider_ReadResourceJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadResource(
gomock.Any(),
@@ -699,9 +651,7 @@ func TestGRPCProvider_ReadResourceReturnsWriteOnlyValue(t *testing.T) {
Optional: true,
WriteOnly: true,
})))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadResource(
gomock.Any(),
@@ -739,9 +689,7 @@ func TestGRPCProvider_ReadResourceReturnsWriteOnlyValue(t *testing.T) {
func TestGRPCProvider_ReadEmptyJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadResource(
gomock.Any(),
@@ -771,9 +719,7 @@ func TestGRPCProvider_ReadEmptyJSON(t *testing.T) {
func TestGRPCProvider_PlanResourceChange(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -834,9 +780,7 @@ func TestGRPCProvider_PlanResourceChange(t *testing.T) {
func TestGRPCProvider_PlanResourceChangeJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -902,9 +846,7 @@ func TestGRPCProvider_PlanResourceChangeReturnsWriteOnlyValue(t *testing.T) {
Optional: true,
WriteOnly: true,
})))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().PlanResourceChange(
gomock.Any(),
@@ -947,9 +889,7 @@ func TestGRPCProvider_PlanResourceChangeReturnsWriteOnlyValue(t *testing.T) {
func TestGRPCProvider_ApplyResourceChange(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -994,9 +934,7 @@ func TestGRPCProvider_ApplyResourceChange(t *testing.T) {
func TestGRPCProvider_ApplyResourceChangeJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -1046,9 +984,7 @@ func TestGRPCProvider_ApplyResourceChangeReturnsWriteOnlyValue(t *testing.T) {
Optional: true,
WriteOnly: true,
})))
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ApplyResourceChange(
gomock.Any(),
@@ -1093,9 +1029,7 @@ func TestGRPCProvider_ApplyResourceChangeReturnsWriteOnlyValue(t *testing.T) {
func TestGRPCProvider_ImportResourceState(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -1136,9 +1070,7 @@ func TestGRPCProvider_ImportResourceState(t *testing.T) {
}
func TestGRPCProvider_ImportResourceStateJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
expectedPrivate := []byte(`{"meta": "data"}`)
@@ -1180,9 +1112,7 @@ func TestGRPCProvider_ImportResourceStateJSON(t *testing.T) {
func TestGRPCProvider_ReadDataSource(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadDataSource(
gomock.Any(),
@@ -1213,9 +1143,7 @@ func TestGRPCProvider_ReadDataSource(t *testing.T) {
func TestGRPCProvider_ReadDataSourceJSON(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().ReadDataSource(
gomock.Any(),
@@ -1247,9 +1175,7 @@ func TestGRPCProvider_ReadDataSourceJSON(t *testing.T) {
func TestGRPCProvider_OpenEphemeralResource(t *testing.T) {
t.Run("success", func(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
future := time.Now().Add(time.Minute)
client.EXPECT().OpenEphemeralResource(
@@ -1287,9 +1213,7 @@ func TestGRPCProvider_OpenEphemeralResource(t *testing.T) {
})
t.Run("requested type is not in schema", func(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
resp := p.OpenEphemeralResource(t.Context(), providers.OpenEphemeralResourceRequest{
TypeName: "non_existing",
@@ -1307,9 +1231,7 @@ func TestGRPCProvider_OpenEphemeralResource(t *testing.T) {
func TestGRPCProvider_RenewEphemeralResource(t *testing.T) {
ctrl := gomock.NewController(t)
client := mockproto.NewMockProviderClient(ctrl)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
future := time.Now().Add(time.Minute)
client.EXPECT().RenewEphemeralResource(
@@ -1338,9 +1260,7 @@ func TestGRPCProvider_RenewEphemeralResource(t *testing.T) {
func TestGRPCProvider_CloseEphemeralResource(t *testing.T) {
ctrl := gomock.NewController(t)
client := mockproto.NewMockProviderClient(ctrl)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().CloseEphemeralResource(
gomock.Any(),
@@ -1356,9 +1276,7 @@ func TestGRPCProvider_CloseEphemeralResource(t *testing.T) {
func TestGRPCProvider_CallFunction(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
p := newGRPCProvider(client)
client.EXPECT().CallFunction(
gomock.Any(),

View File

@@ -1,9 +0,0 @@
package providers
import "github.com/opentofu/opentofu/internal/addrs"
func NewMockSchemaCache() *schemaCache {
return &schemaCache{
m: make(map[addrs.Provider]ProviderSchema),
}
}

View File

@@ -1,49 +0,0 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package providers
import (
"sync"
"github.com/opentofu/opentofu/internal/addrs"
)
// SchemaCache is a global cache of Schemas.
// This will be accessed by both core and the provider clients to ensure that
// large schemas are stored in a single location.
var SchemaCache = &schemaCache{
m: make(map[addrs.Provider]ProviderSchema),
}
// Global cache for provider schemas
// Cache the entire response to ensure we capture any new fields, like
// ServerCapabilities. This also serves to capture errors so that multiple
// concurrent calls resulting in an error can be handled in the same manner.
type schemaCache struct {
mu sync.Mutex
m map[addrs.Provider]ProviderSchema
}
func (c *schemaCache) Set(p addrs.Provider, s ProviderSchema) {
c.mu.Lock()
defer c.mu.Unlock()
c.m[p] = s
}
func (c *schemaCache) Get(p addrs.Provider) (ProviderSchema, bool) {
c.mu.Lock()
defer c.mu.Unlock()
s, ok := c.m[p]
return s, ok
}
func (c *schemaCache) Remove(p addrs.Provider) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.m, p)
}

View File

@@ -6,6 +6,9 @@
package providers
import (
"fmt"
"sync"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/configs/configschema"
)
@@ -39,3 +42,62 @@ func (ss ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName
func (ss ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) {
return ss.SchemaForResourceType(addr.Mode, addr.Type)
}
func (resp ProviderSchema) Validate(addr addrs.Provider) error {
if resp.Diagnostics.HasErrors() {
return fmt.Errorf("failed to retrieve schema from provider %q: %w", addr, resp.Diagnostics.Err())
}
if resp.Provider.Version < 0 {
// We're not using the version numbers here yet, but we'll check
// for validity anyway in case we start using them in future.
return fmt.Errorf("provider %s has invalid negative schema version for its configuration blocks,which is a bug in the provider", addr)
}
for t, r := range resp.ResourceTypes {
if err := r.Block.InternalValidate(); err != nil {
return fmt.Errorf("provider %s has invalid schema for managed resource type %q, which is a bug in the provider: %w", addr, t, err)
}
if r.Version < 0 {
return fmt.Errorf("provider %s has invalid negative schema version for managed resource type %q, which is a bug in the provider", addr, t)
}
}
for t, d := range resp.DataSources {
if err := d.Block.InternalValidate(); err != nil {
return fmt.Errorf("provider %s has invalid schema for data resource type %q, which is a bug in the provider: %w", addr, t, err)
}
if d.Version < 0 {
// We're not using the version numbers here yet, but we'll check
// for validity anyway in case we start using them in future.
return fmt.Errorf("provider %s has invalid negative schema version for data resource type %q, which is a bug in the provider", addr, t)
}
}
for t, d := range resp.EphemeralResources {
if err := d.Block.InternalValidate(); err != nil {
return fmt.Errorf("provider %s has invalid schema for ephemeral resource type %q, which is a bug in the provider: %w", addr, t, err)
}
if d.Version < 0 {
// We're not using the version numbers here yet, but we'll check
// for validity anyway in case we start using them in future.
return fmt.Errorf("provider %s has invalid negative schema version for ephemeral resource type %q, which is a bug in the provider", addr, t)
}
}
return nil
}
type SchemaCache func(func() ProviderSchema) ProviderSchema
func NewSchemaCache() SchemaCache {
var once sync.Once
var schema ProviderSchema
return func(getSchema func() ProviderSchema) ProviderSchema {
once.Do(func() {
schema = getSchema()
})
return schema
}
}

View File

@@ -471,11 +471,6 @@ variable "obfmod" {
// Defaulted stub provider with non-custom function
func TestContext2Functions_providerFunctionsStub(t *testing.T) {
p := testProvider("aws")
addr := addrs.ImpliedProviderForUnqualifiedType("aws")
// Explicitly non-parallel
t.Setenv("foo", "bar")
defer providers.SchemaCache.Remove(addr)
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
Functions: map[string]providers.FunctionSpec{
@@ -492,9 +487,6 @@ func TestContext2Functions_providerFunctionsStub(t *testing.T) {
Result: cty.True,
}
// SchemaCache is initialzed earlier on in the command package
providers.SchemaCache.Set(addr, *p.GetProviderSchemaResponse)
m := testModuleInline(t, map[string]string{
"main.tf": `
module "mod" {
@@ -571,11 +563,6 @@ variable "obfmod" {
// Defaulted stub provider with custom function (no allowed)
func TestContext2Functions_providerFunctionsStubCustom(t *testing.T) {
p := testProvider("aws")
addr := addrs.ImpliedProviderForUnqualifiedType("aws")
// Explicitly non-parallel
t.Setenv("foo", "bar")
defer providers.SchemaCache.Remove(addr)
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
Functions: map[string]providers.FunctionSpec{
@@ -592,9 +579,6 @@ func TestContext2Functions_providerFunctionsStubCustom(t *testing.T) {
Result: cty.True,
}
// SchemaCache is initialzed earlier on in the command package
providers.SchemaCache.Set(addr, *p.GetProviderSchemaResponse)
m := testModuleInline(t, map[string]string{
"main.tf": `
module "mod" {
@@ -655,11 +639,6 @@ variable "obfmod" {
// Defaulted stub provider
func TestContext2Functions_providerFunctionsForEachCount(t *testing.T) {
p := testProvider("aws")
addr := addrs.ImpliedProviderForUnqualifiedType("aws")
// Explicitly non-parallel
t.Setenv("foo", "bar")
defer providers.SchemaCache.Remove(addr)
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
Functions: map[string]providers.FunctionSpec{
@@ -676,9 +655,6 @@ func TestContext2Functions_providerFunctionsForEachCount(t *testing.T) {
Result: cty.True,
}
// SchemaCache is initialzed earlier on in the command package
providers.SchemaCache.Set(addr, *p.GetProviderSchemaResponse)
m := testModuleInline(t, map[string]string{
"main.tf": `
provider "aws" {

View File

@@ -9,6 +9,7 @@ import (
"context"
"fmt"
"log"
"sync"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/configs/configschema"
@@ -23,12 +24,23 @@ import (
type contextPlugins struct {
providerFactories map[addrs.Provider]providers.Factory
provisionerFactories map[string]provisioners.Factory
providerSchemasLock sync.Mutex
providerSchemas map[addrs.Provider]providerSchemaEntry
provisionerSchemasLock sync.Mutex
provisionerSchemas map[string]provisionerSchemaEntry
}
type providerSchemaEntry func() (providers.ProviderSchema, error)
type provisionerSchemaEntry func() (*configschema.Block, error)
func newContextPlugins(providerFactories map[addrs.Provider]providers.Factory, provisionerFactories map[string]provisioners.Factory) *contextPlugins {
return &contextPlugins{
providerFactories: providerFactories,
provisionerFactories: provisionerFactories,
providerSchemas: map[addrs.Provider]providerSchemaEntry{},
provisionerSchemas: map[string]provisionerSchemaEntry{},
}
}
@@ -68,72 +80,30 @@ func (cp *contextPlugins) NewProvisionerInstance(typ string) (provisioners.Inter
// to repeatedly call this method with the same address if various different
// parts of OpenTofu all need the same schema information.
func (cp *contextPlugins) ProviderSchema(ctx context.Context, addr addrs.Provider) (providers.ProviderSchema, error) {
// Check the global schema cache first.
// This cache is only written by the provider client, and transparently
// used by GetProviderSchema, but we check it here because at this point we
// may be able to avoid spinning up the provider instance at all.
//
// It's worth noting that ServerCapabilities.GetProviderSchemaOptional is ignored here.
// That is because we're checking *prior* to the provider's instantiation.
// GetProviderSchemaOptional only says that *if we instantiate a provider*,
// then we need to run the get schema call at least once.
// BUG This SHORT CIRCUITS the logic below and is not the only code which inserts provider schemas into the cache!!
schemas, ok := providers.SchemaCache.Get(addr)
if ok {
log.Printf("[TRACE] tofu.contextPlugins: Serving provider %q schema from global schema cache", addr)
return schemas, nil
}
// Coarse lock only for ensuring that a valid entry exists
cp.providerSchemasLock.Lock()
entry, ok := cp.providerSchemas[addr]
if !ok {
entry = sync.OnceValues(func() (providers.ProviderSchema, error) {
log.Printf("[TRACE] tofu.contextPlugins: Initializing provider %q to read its schema", addr)
provider, err := cp.NewProviderInstance(addr)
if err != nil {
return providers.ProviderSchema{}, fmt.Errorf("failed to instantiate provider %q to obtain schema: %w", addr, err)
}
defer provider.Close(ctx)
log.Printf("[TRACE] tofu.contextPlugins: Initializing provider %q to read its schema", addr)
provider, err := cp.NewProviderInstance(addr)
if err != nil {
return schemas, fmt.Errorf("failed to instantiate provider %q to obtain schema: %w", addr, err)
schema := provider.GetProviderSchema(ctx)
return schema, schema.Validate(addr)
})
cp.providerSchemas[addr] = entry
}
defer provider.Close(ctx)
// This lock is only for access to the map. We don't need to hold the lock when calling
// "entry" because [sync.OnceValues] handles synchronization itself.
// We don't defer unlock as the majority of the work of this function happens in calling "entry"
// and we want to release as soon as possible for multiple concurrent callers of different providers
cp.providerSchemasLock.Unlock()
resp := provider.GetProviderSchema(ctx)
if resp.Diagnostics.HasErrors() {
return resp, fmt.Errorf("failed to retrieve schema from provider %q: %w", addr, resp.Diagnostics.Err())
}
if resp.Provider.Version < 0 {
// We're not using the version numbers here yet, but we'll check
// for validity anyway in case we start using them in future.
return resp, fmt.Errorf("provider %s has invalid negative schema version for its configuration blocks,which is a bug in the provider ", addr)
}
for t, r := range resp.ResourceTypes {
if err := r.Block.InternalValidate(); err != nil {
return resp, fmt.Errorf("provider %s has invalid schema for managed resource type %q, which is a bug in the provider: %w", addr, t, err)
}
if r.Version < 0 {
return resp, fmt.Errorf("provider %s has invalid negative schema version for managed resource type %q, which is a bug in the provider", addr, t)
}
}
for t, d := range resp.DataSources {
if err := d.Block.InternalValidate(); err != nil {
return resp, fmt.Errorf("provider %s has invalid schema for data resource type %q, which is a bug in the provider: %w", addr, t, err)
}
if d.Version < 0 {
// We're not using the version numbers here yet, but we'll check
// for validity anyway in case we start using them in future.
return resp, fmt.Errorf("provider %s has invalid negative schema version for data resource type %q, which is a bug in the provider", addr, t)
}
}
for t, d := range resp.EphemeralResources {
if err := d.Block.InternalValidate(); err != nil {
return resp, fmt.Errorf("provider %s has invalid schema for ephemeral resource type %q, which is a bug in the provider: %w", addr, t, err)
}
if d.Version < 0 {
// We're not using the version numbers here yet, but we'll check
// for validity anyway in case we start using them in future.
return resp, fmt.Errorf("provider %s has invalid negative schema version for ephemeral resource type %q, which is a bug in the provider", addr, t)
}
}
return resp, nil
return entry()
}
// ProviderConfigSchema is a helper wrapper around ProviderSchema which first
@@ -176,18 +146,32 @@ func (cp *contextPlugins) ResourceTypeSchema(ctx context.Context, providerAddr a
// ProvisionerSchema memoizes results by provisioner type name, so it's fine
// to repeatedly call this method with the same name if various different
// parts of OpenTofu all need the same schema information.
func (cp *contextPlugins) ProvisionerSchema(typ string) (*configschema.Block, error) {
log.Printf("[TRACE] tofu.contextPlugins: Initializing provisioner %q to read its schema", typ)
provisioner, err := cp.NewProvisionerInstance(typ)
if err != nil {
return nil, fmt.Errorf("failed to instantiate provisioner %q to obtain schema: %w", typ, err)
}
defer provisioner.Close()
func (cp *contextPlugins) ProvisionerSchema(addr string) (*configschema.Block, error) {
// Coarse lock only for ensuring that a valid entry exists
cp.provisionerSchemasLock.Lock()
entry, ok := cp.provisionerSchemas[addr]
if !ok {
entry = sync.OnceValues(func() (*configschema.Block, error) {
log.Printf("[TRACE] tofu.contextPlugins: Initializing provisioner %q to read its schema", addr)
provisioner, err := cp.NewProvisionerInstance(addr)
if err != nil {
return nil, fmt.Errorf("failed to instantiate provisioner %q to obtain schema: %w", addr, err)
}
defer provisioner.Close()
resp := provisioner.GetSchema()
if resp.Diagnostics.HasErrors() {
return nil, fmt.Errorf("failed to retrieve schema from provisioner %q: %w", typ, resp.Diagnostics.Err())
resp := provisioner.GetSchema()
if resp.Diagnostics.HasErrors() {
return nil, fmt.Errorf("failed to retrieve schema from provisioner %q: %w", addr, resp.Diagnostics.Err())
}
return resp.Provisioner, nil
})
cp.provisionerSchemas[addr] = entry
}
// This lock is only for access to the map. We don't need to hold the lock when calling
// "entry" because [sync.OnceValues] handles synchronization itself.
// We don't defer unlock as the majority of the work of this function happens in calling "entry"
// and we want to release as soon as possible for multiple concurrent callers of different provisioners
cp.provisionerSchemasLock.Unlock()
return resp.Provisioner, nil
return entry()
}

View File

@@ -31,18 +31,15 @@ func simpleMockPluginLibrary() *contextPlugins {
// factory into real code under test.
provider := simpleMockProvider()
provisioner := simpleMockProvisioner()
ret := &contextPlugins{
providerFactories: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): func() (providers.Interface, error) {
return provider, nil
},
ret := newContextPlugins(map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): func() (providers.Interface, error) {
return provider, nil
},
provisionerFactories: map[string]provisioners.Factory{
"test": func() (provisioners.Interface, error) {
return provisioner, nil
},
}, map[string]provisioners.Factory{
"test": func() (provisioners.Interface, error) {
return provisioner, nil
},
}
})
return ret
}