mirror of
https://github.com/turbot/steampipe.git
synced 2025-12-19 18:12:43 -05:00
steampipe compiles
This commit is contained in:
108
cmd/list.go
108
cmd/list.go
@@ -1,18 +1,12 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/turbot/steampipe/pkg/cmdconfig"
|
||||
"github.com/turbot/steampipe/pkg/display"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/workspace"
|
||||
)
|
||||
|
||||
// TODO #kai can we just remove this
|
||||
type listSubCmdOptions struct {
|
||||
parentCmd *cobra.Command
|
||||
}
|
||||
@@ -41,95 +35,15 @@ func getRunListSubCmd(opts listSubCmdOptions) func(cmd *cobra.Command, args []st
|
||||
}
|
||||
|
||||
return func(cmd *cobra.Command, _ []string) {
|
||||
ctx := cmd.Context()
|
||||
|
||||
w, inputVariables, errAndWarnings := workspace.LoadWorkspaceVars(ctx)
|
||||
error_helpers.FailOnError(errAndWarnings.GetError())
|
||||
errAndWarnings = w.LoadWorkspaceMod(ctx, inputVariables)
|
||||
error_helpers.FailOnError(errAndWarnings.GetError())
|
||||
|
||||
modResources, depResources, err := listResourcesInMod(ctx, w.Mod, cmd)
|
||||
error_helpers.FailOnErrorWithMessage(err, "could not list resources")
|
||||
if len(modResources)+len(depResources) == 0 {
|
||||
fmt.Println("No resources available to execute.")
|
||||
}
|
||||
|
||||
sortResources(modResources)
|
||||
sortResources(depResources)
|
||||
headers, rows := getOutputDataTable(modResources, depResources)
|
||||
|
||||
display.ShowWrappedTable(headers, rows, &display.ShowWrappedTableOptions{
|
||||
AutoMerge: false,
|
||||
HideEmptyColumns: true,
|
||||
Truncate: true,
|
||||
})
|
||||
// TODO #v1 list query files? or deprecate list commena
|
||||
//ctx := cmd.Context()
|
||||
//
|
||||
//headers, rows := getOutputDataTable(modResources, depResources)
|
||||
//
|
||||
//display.ShowWrappedTable(headers, rows, &display.ShowWrappedTableOptions{
|
||||
// AutoMerge: false,
|
||||
// HideEmptyColumns: true,
|
||||
// Truncate: true,
|
||||
//})
|
||||
}
|
||||
}
|
||||
|
||||
func listResourcesInMod(ctx context.Context, mod *modconfig.Mod, cmd *cobra.Command) (modResources, depResources []modconfig.ModTreeItem, err error) {
|
||||
resourceTypesToDisplay := getResourceTypesToDisplay(cmd)
|
||||
|
||||
err = mod.WalkResources(func(item modconfig.HclResource) (bool, error) {
|
||||
if ctx.Err() != nil {
|
||||
return false, ctx.Err()
|
||||
}
|
||||
|
||||
// if we are not showing this resource type, return
|
||||
if !resourceTypesToDisplay[item.BlockType()] {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
m := item.(modconfig.ModTreeItem)
|
||||
|
||||
itemMod := m.GetMod()
|
||||
if m.GetParents()[0] == itemMod {
|
||||
|
||||
// add to the appropriate array
|
||||
if itemMod.Name() == mod.Name() {
|
||||
modResources = append(modResources, m)
|
||||
} else {
|
||||
depResources = append(depResources, m)
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return modResources, depResources, err
|
||||
}
|
||||
|
||||
func sortResources(items []modconfig.ModTreeItem) {
|
||||
sort.SliceStable(items, func(i, j int) bool {
|
||||
return items[i].Name() < items[j].Name()
|
||||
})
|
||||
}
|
||||
|
||||
func getOutputDataTable(modResources, depResources []modconfig.ModTreeItem) ([]string, [][]string) {
|
||||
rows := make([][]string, len(modResources)+len(depResources))
|
||||
for i, modItem := range modResources {
|
||||
rows[i] = []string{modItem.GetUnqualifiedName(), modItem.GetTitle()}
|
||||
}
|
||||
offset := len(modResources)
|
||||
for i, modItem := range depResources {
|
||||
// use fully qualified name for dependency resources
|
||||
rows[i+offset] = []string{modItem.Name(), modItem.GetTitle()}
|
||||
}
|
||||
return []string{"Name", "Title"}, rows
|
||||
}
|
||||
|
||||
func getResourceTypesToDisplay(cmd *cobra.Command) map[string]bool {
|
||||
parent := cmd.Parent().Name()
|
||||
cmdToTypeMapping := map[string][]string{
|
||||
"check": {"benchmark", "control"},
|
||||
"dashboard": {"dashboard", "benchmark"},
|
||||
"query": {"query"},
|
||||
}
|
||||
resourceTypesToDisplay, found := cmdToTypeMapping[parent]
|
||||
if !found {
|
||||
panic(fmt.Sprintf("could not find resource type lookup list for '%s'", parent))
|
||||
}
|
||||
// add resource types to a map for cheap lookup
|
||||
res := map[string]bool{}
|
||||
for _, t := range resourceTypesToDisplay {
|
||||
res[t] = true
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
322
cmd/query.go
322
cmd/query.go
@@ -3,34 +3,25 @@ package cmd
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/thediveo/enumflag/v2"
|
||||
"github.com/turbot/go-kit/helpers"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe-plugin-sdk/v5/sperr"
|
||||
"github.com/turbot/steampipe/pkg/cloud"
|
||||
"github.com/turbot/steampipe/pkg/cmdconfig"
|
||||
"github.com/turbot/steampipe/pkg/connection_sync"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/contexthelpers"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/display"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/query"
|
||||
"github.com/turbot/steampipe/pkg/query/queryexecute"
|
||||
"github.com/turbot/steampipe/pkg/query/queryresult"
|
||||
"github.com/turbot/steampipe/pkg/snapshot"
|
||||
"github.com/turbot/steampipe/pkg/statushooks"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/workspace"
|
||||
)
|
||||
|
||||
// variable used to assign the timing mode flag
|
||||
@@ -59,21 +50,6 @@ Examples:
|
||||
|
||||
# Run a specific query directly
|
||||
steampipe query "select * from cloud"`,
|
||||
|
||||
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
ctx := cmd.Context()
|
||||
w, err := workspace.LoadResourceNames(ctx, viper.GetString(constants.ArgModLocation))
|
||||
if err != nil {
|
||||
return []string{}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
namedQueries := []string{}
|
||||
for _, name := range w.GetSortedNamedQueryNames() {
|
||||
if strings.HasPrefix(name, toComplete) {
|
||||
namedQueries = append(namedQueries, name)
|
||||
}
|
||||
}
|
||||
return namedQueries, cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
}
|
||||
|
||||
// Notes:
|
||||
@@ -93,7 +69,8 @@ Examples:
|
||||
constants.ArgTiming,
|
||||
fmt.Sprintf("Display query timing; one of: %s", strings.Join(constants.FlagValues(constants.QueryTimingModeIds), ", ")),
|
||||
cmdconfig.FlagOptions.NoOptDefVal(constants.ArgOn)).
|
||||
AddBoolFlag(constants.ArgWatch, true, "Watch SQL files in the current workspace (works only in interactive mode)").
|
||||
// TODO #breakingchange
|
||||
//AddBoolFlag(constants.ArgWatch, true, "Watch SQL files in the current workspace (works only in interactive mode)").
|
||||
AddStringSliceFlag(constants.ArgSearchPath, nil, "Set a custom search_path for the steampipe user for a query session (comma-separated)").
|
||||
AddStringSliceFlag(constants.ArgSearchPathPrefix, nil, "Set a prefix to the current search path for a query session (comma-separated)").
|
||||
AddStringSliceFlag(constants.ArgVarFile, nil, "Specify a file containing variable values").
|
||||
@@ -234,129 +211,133 @@ func executeSnapshotQuery(initData *query.InitData, ctx context.Context) int {
|
||||
}
|
||||
}
|
||||
|
||||
for _, resolvedQuery := range initData.Queries {
|
||||
// if a manual query is being run (i.e. not a named query), convert into a query and add to workspace
|
||||
// this is to allow us to use existing dashboard execution code
|
||||
queryProvider, existingResource := ensureSnapshotQueryResource(resolvedQuery.Name, resolvedQuery, initData.Workspace)
|
||||
|
||||
// we need to pass the embedded initData to GenerateSnapshot
|
||||
baseInitData := &initData.InitData
|
||||
|
||||
// so a dashboard name was specified - just call GenerateSnapshot
|
||||
snap, err := snapshot.GenerateSnapshot(ctx, queryProvider.Name(), baseInitData, nil)
|
||||
if err != nil {
|
||||
exitCode = constants.ExitCodeSnapshotCreationFailed
|
||||
error_helpers.FailOnError(err)
|
||||
}
|
||||
|
||||
// set the filename root for the snapshot (in case needed)
|
||||
if !existingResource {
|
||||
snap.FileNameRoot = "query"
|
||||
}
|
||||
|
||||
// display the result
|
||||
switch viper.GetString(constants.ArgOutput) {
|
||||
case constants.OutputFormatNone:
|
||||
// do nothing
|
||||
case constants.OutputFormatSnapshot, constants.OutputFormatSnapshotShort:
|
||||
// if the format is snapshot, just dump it out
|
||||
jsonOutput, err := json.MarshalIndent(snap, "", " ")
|
||||
if err != nil {
|
||||
error_helpers.FailOnErrorWithMessage(err, "failed to display result as snapshot")
|
||||
}
|
||||
fmt.Println(string(jsonOutput))
|
||||
default:
|
||||
// otherwise convert the snapshot into a query result
|
||||
result, err := snapshotToQueryResult(snap)
|
||||
error_helpers.FailOnErrorWithMessage(err, "failed to display result as snapshot")
|
||||
display.ShowOutput(ctx, result, display.WithTimingDisabled())
|
||||
}
|
||||
|
||||
// share the snapshot if necessary
|
||||
err = publishSnapshotIfNeeded(ctx, snap)
|
||||
if err != nil {
|
||||
exitCode = constants.ExitCodeSnapshotUploadFailed
|
||||
error_helpers.FailOnErrorWithMessage(err, fmt.Sprintf("failed to publish snapshot to %s", viper.GetString(constants.ArgSnapshotLocation)))
|
||||
}
|
||||
|
||||
// export the result if necessary
|
||||
exportArgs := viper.GetStringSlice(constants.ArgExport)
|
||||
exportMsg, err := initData.ExportManager.DoExport(ctx, snap.FileNameRoot, snap, exportArgs)
|
||||
if err != nil {
|
||||
exitCode = constants.ExitCodeSnapshotCreationFailed
|
||||
error_helpers.FailOnErrorWithMessage(err, "failed to export snapshot")
|
||||
}
|
||||
// print the location where the file is exported
|
||||
if len(exportMsg) > 0 && viper.GetBool(constants.ArgProgress) {
|
||||
fmt.Printf("\n")
|
||||
fmt.Println(strings.Join(exportMsg, "\n"))
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
}
|
||||
return 0
|
||||
// TODO fix me
|
||||
//
|
||||
//for _, resolvedQuery := range initData.Queries {
|
||||
// // if a manual query is being run (i.e. not a named query), convert into a query and add to workspace
|
||||
// // this is to allow us to use existing dashboard execution code
|
||||
// queryProvider, existingResource := ensureSnapshotQueryResource(resolvedQuery.Name, resolvedQuery, initData.Workspace)
|
||||
//
|
||||
// // we need to pass the embedded initData to GenerateSnapshot
|
||||
// baseInitData := &initData.InitData
|
||||
//
|
||||
// // so a dashboard name was specified - just call GenerateSnapshot
|
||||
// snap, err := snapshot.GenerateSnapshot(ctx, queryProvider.Name(), baseInitData, nil)
|
||||
// if err != nil {
|
||||
// exitCode = constants.ExitCodeSnapshotCreationFailed
|
||||
// error_helpers.FailOnError(err)
|
||||
// }
|
||||
//
|
||||
// // set the filename root for the snapshot (in case needed)
|
||||
// if !existingResource {
|
||||
// snap.FileNameRoot = "query"
|
||||
// }
|
||||
//
|
||||
// // display the result
|
||||
// switch viper.GetString(constants.ArgOutput) {
|
||||
// case constants.OutputFormatNone:
|
||||
// // do nothing
|
||||
// case constants.OutputFormatSnapshot, constants.OutputFormatSnapshotShort:
|
||||
// // if the format is snapshot, just dump it out
|
||||
// jsonOutput, err := json.MarshalIndent(snap, "", " ")
|
||||
// if err != nil {
|
||||
// error_helpers.FailOnErrorWithMessage(err, "failed to display result as snapshot")
|
||||
// }
|
||||
// fmt.Println(string(jsonOutput))
|
||||
// default:
|
||||
// // otherwise convert the snapshot into a query result
|
||||
// result, err := snapshotToQueryResult(snap)
|
||||
// error_helpers.FailOnErrorWithMessage(err, "failed to display result as snapshot")
|
||||
// display.ShowOutput(ctx, result, display.WithTimingDisabled())
|
||||
// }
|
||||
//
|
||||
// // share the snapshot if necessary
|
||||
// err = publishSnapshotIfNeeded(ctx, snap)
|
||||
// if err != nil {
|
||||
// exitCode = constants.ExitCodeSnapshotUploadFailed
|
||||
// error_helpers.FailOnErrorWithMessage(err, fmt.Sprintf("failed to publish snapshot to %s", viper.GetString(constants.ArgSnapshotLocation)))
|
||||
// }
|
||||
//
|
||||
// // export the result if necessary
|
||||
// exportArgs := viper.GetStringSlice(constants.ArgExport)
|
||||
// exportMsg, err := initData.ExportManager.DoExport(ctx, snap.FileNameRoot, snap, exportArgs)
|
||||
// if err != nil {
|
||||
// exitCode = constants.ExitCodeSnapshotCreationFailed
|
||||
// error_helpers.FailOnErrorWithMessage(err, "failed to export snapshot")
|
||||
// }
|
||||
// // print the location where the file is exported
|
||||
// if len(exportMsg) > 0 && viper.GetBool(constants.ArgProgress) {
|
||||
// fmt.Printf("\n")
|
||||
// fmt.Println(strings.Join(exportMsg, "\n"))
|
||||
// fmt.Printf("\n")
|
||||
// }
|
||||
//}
|
||||
return 0
|
||||
}
|
||||
|
||||
func snapshotToQueryResult(snap *dashboardtypes.SteampipeSnapshot) (*queryresult.Result, error) {
|
||||
// the table of a snapshot query has a fixed name
|
||||
tablePanel, ok := snap.Panels[modconfig.SnapshotQueryTableName]
|
||||
if !ok {
|
||||
return nil, sperr.New("dashboard does not contain table result for query")
|
||||
}
|
||||
chartRun := tablePanel.(*snapshot.LeafRun)
|
||||
if !ok {
|
||||
return nil, sperr.New("failed to read query result from snapshot")
|
||||
}
|
||||
// check for error
|
||||
if err := chartRun.GetError(); err != nil {
|
||||
return nil, error_helpers.DecodePgError(err)
|
||||
}
|
||||
|
||||
res := queryresult.NewResult(chartRun.Data.Columns)
|
||||
|
||||
// start a goroutine to stream the results as rows
|
||||
go func() {
|
||||
for _, d := range chartRun.Data.Rows {
|
||||
// we need to allocate a new slice everytime, since this gets read
|
||||
// asynchronously on the other end and we need to make sure that we don't overwrite
|
||||
// data already sent
|
||||
rowVals := make([]interface{}, len(chartRun.Data.Columns))
|
||||
for i, c := range chartRun.Data.Columns {
|
||||
rowVals[i] = d[c.Name]
|
||||
}
|
||||
res.StreamRow(rowVals)
|
||||
}
|
||||
res.TimingResult <- chartRun.TimingResult
|
||||
res.Close()
|
||||
}()
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// convert the given command line query into a query resource and add to workspace
|
||||
// this is to allow us to use existing dashboard execution code
|
||||
func ensureSnapshotQueryResource(name string, resolvedQuery *modconfig.ResolvedQuery, w *workspace.Workspace) (queryProvider modconfig.HclResource, existingResource bool) {
|
||||
// is this an existing resource?
|
||||
if parsedName, err := modconfig.ParseResourceName(name); err == nil {
|
||||
if resource, found := w.GetResource(parsedName); found {
|
||||
return resource, true
|
||||
}
|
||||
}
|
||||
|
||||
// build name
|
||||
shortName := "command_line_query"
|
||||
|
||||
// this is NOT a named query - create the query using RawSql
|
||||
q := modconfig.NewQuery(&hcl.Block{Type: modconfig.BlockTypeQuery}, w.Mod, shortName).(*modconfig.Query)
|
||||
q.SQL = utils.ToStringPointer(resolvedQuery.RawSQL)
|
||||
q.SetArgs(resolvedQuery.QueryArgs())
|
||||
// add empty metadata
|
||||
q.SetMetadata(&modconfig.ResourceMetadata{})
|
||||
|
||||
// add to the workspace mod so the dashboard execution code can find it
|
||||
w.Mod.AddResource(q)
|
||||
// return the new resource name
|
||||
return q, false
|
||||
}
|
||||
//
|
||||
//func snapshotToQueryResult(snap *dashboardtypes.SteampipeSnapshot) (*queryresult.Result, error) {
|
||||
// // the table of a snapshot query has a fixed name
|
||||
// tablePanel, ok := snap.Panels[modconfig.SnapshotQueryTableName]
|
||||
// if !ok {
|
||||
// return nil, sperr.New("dashboard does not contain table result for query")
|
||||
// }
|
||||
// chartRun := tablePanel.(*snapshot.LeafRun)
|
||||
// if !ok {
|
||||
// return nil, sperr.New("failed to read query result from snapshot")
|
||||
// }
|
||||
// // check for error
|
||||
// if err := chartRun.GetError(); err != nil {
|
||||
// return nil, error_helpers.DecodePgError(err)
|
||||
// }
|
||||
//
|
||||
// res := queryresult.NewResult(chartRun.Data.Columns)
|
||||
//
|
||||
// // start a goroutine to stream the results as rows
|
||||
// go func() {
|
||||
// for _, d := range chartRun.Data.Rows {
|
||||
// // we need to allocate a new slice everytime, since this gets read
|
||||
// // asynchronously on the other end and we need to make sure that we don't overwrite
|
||||
// // data already sent
|
||||
// rowVals := make([]interface{}, len(chartRun.Data.Columns))
|
||||
// for i, c := range chartRun.Data.Columns {
|
||||
// rowVals[i] = d[c.Name]
|
||||
// }
|
||||
// res.StreamRow(rowVals)
|
||||
// }
|
||||
// res.TimingResult <- chartRun.TimingResult
|
||||
// res.Close()
|
||||
// }()
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
//
|
||||
//// convert the given command line query into a query resource and add to workspace
|
||||
//// this is to allow us to use existing dashboard execution code
|
||||
//func ensureSnapshotQueryResource(name string, resolvedQuery *modconfig.ResolvedQuery, w *workspace.Workspace) (queryProvider modconfig.HclResource, existingResource bool) {
|
||||
// // is this an existing resource?
|
||||
// if parsedName, err := modconfig.ParseResourceName(name); err == nil {
|
||||
// if resource, found := w.GetResource(parsedName); found {
|
||||
// return resource, true
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // build name
|
||||
// shortName := "command_line_query"
|
||||
//
|
||||
// // this is NOT a named query - create the query using RawSql
|
||||
// q := modconfig.NewQuery(&hcl.Block{Type: modconfig.BlockTypeQuery}, w.Mod, shortName).(*modconfig.Query)
|
||||
// q.SQL = utils.ToStringPointer(resolvedQuery.RawSQL)
|
||||
// q.SetArgs(resolvedQuery.QueryArgs())
|
||||
// // add empty metadata
|
||||
// q.SetMetadata(&modconfig.ResourceMetadata{})
|
||||
//
|
||||
// // add to the workspace mod so the dashboard execution code can find it
|
||||
// w.Mod.AddResource(q)
|
||||
// // return the new resource name
|
||||
// return q, false
|
||||
//}
|
||||
|
||||
func snapshotRequired() bool {
|
||||
SnapshotFormatNames := []string{constants.OutputFormatSnapshot, constants.OutputFormatSnapshotShort}
|
||||
@@ -391,28 +372,29 @@ func getPipedStdinData() string {
|
||||
return stdinData
|
||||
}
|
||||
|
||||
func publishSnapshotIfNeeded(ctx context.Context, snapshot *dashboardtypes.SteampipeSnapshot) error {
|
||||
shouldShare := viper.GetBool(constants.ArgShare)
|
||||
shouldUpload := viper.GetBool(constants.ArgSnapshot)
|
||||
|
||||
if !(shouldShare || shouldUpload) {
|
||||
return nil
|
||||
}
|
||||
|
||||
message, err := cloud.PublishSnapshot(ctx, snapshot, shouldShare)
|
||||
if err != nil {
|
||||
// reword "402 Payment Required" error
|
||||
return handlePublishSnapshotError(err)
|
||||
}
|
||||
if viper.GetBool(constants.ArgProgress) {
|
||||
fmt.Println(message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handlePublishSnapshotError(err error) error {
|
||||
if err.Error() == "402 Payment Required" {
|
||||
return fmt.Errorf("maximum number of snapshots reached")
|
||||
}
|
||||
return err
|
||||
}
|
||||
//
|
||||
//func publishSnapshotIfNeeded(ctx context.Context, snapshot *dashboardtypes.SteampipeSnapshot) error {
|
||||
// shouldShare := viper.GetBool(constants.ArgShare)
|
||||
// shouldUpload := viper.GetBool(constants.ArgSnapshot)
|
||||
//
|
||||
// if !(shouldShare || shouldUpload) {
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
// message, err := cloud.PublishSnapshot(ctx, snapshot, shouldShare)
|
||||
// if err != nil {
|
||||
// // reword "402 Payment Required" error
|
||||
// return handlePublishSnapshotError(err)
|
||||
// }
|
||||
// if viper.GetBool(constants.ArgProgress) {
|
||||
// fmt.Println(message)
|
||||
// }
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func handlePublishSnapshotError(err error) error {
|
||||
// if err.Error() == "402 Payment Required" {
|
||||
// return fmt.Errorf("maximum number of snapshots reached")
|
||||
// }
|
||||
// return err
|
||||
//}
|
||||
|
||||
@@ -1,145 +1,133 @@
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"strings"
|
||||
// TODO #snapshjot implement me
|
||||
//
|
||||
//func PublishSnapshot(ctx context.Context, snapshot *dashboardtypes.SteampipeSnapshot, share bool) (string, error) {
|
||||
// //snapshotLocation := viper.GetString(constants.ArgSnapshotLocation)
|
||||
// //// snapshotLocation must be set (validation should ensure this)
|
||||
// //if snapshotLocation == "" {
|
||||
// // return "", sperr.New("to share a snapshot, snapshot-location must be set")
|
||||
// //}
|
||||
// //
|
||||
// //// if snapshot location is a workspace handle, upload it
|
||||
// //if steampipeconfig.IsCloudWorkspaceIdentifier(snapshotLocation) {
|
||||
// // url, err := uploadSnapshot(ctx, snapshot, share)
|
||||
// // if err != nil {
|
||||
// // return "", sperr.Wrap(err)
|
||||
// // }
|
||||
// // return fmt.Sprintf("\nSnapshot uploaded to %s\n", url), nil
|
||||
// //}
|
||||
// //
|
||||
// //// otherwise assume snapshot location is a file path
|
||||
// //filePath, err := exportSnapshot(snapshot)
|
||||
// //if err != nil {
|
||||
// // return "", sperr.Wrap(err)
|
||||
// //}
|
||||
// //return fmt.Sprintf("\nSnapshot saved to %s\n", filePath), nil
|
||||
// return "", nil
|
||||
//}
|
||||
|
||||
"github.com/spf13/viper"
|
||||
steampipecloud "github.com/turbot/steampipe-cloud-sdk-go"
|
||||
"github.com/turbot/steampipe-plugin-sdk/v5/sperr"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/export"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig"
|
||||
)
|
||||
|
||||
func PublishSnapshot(ctx context.Context, snapshot *dashboardtypes.SteampipeSnapshot, share bool) (string, error) {
|
||||
snapshotLocation := viper.GetString(constants.ArgSnapshotLocation)
|
||||
// snapshotLocation must be set (validation should ensure this)
|
||||
if snapshotLocation == "" {
|
||||
return "", sperr.New("to share a snapshot, snapshot-location must be set")
|
||||
}
|
||||
|
||||
// if snapshot location is a workspace handle, upload it
|
||||
if steampipeconfig.IsCloudWorkspaceIdentifier(snapshotLocation) {
|
||||
url, err := uploadSnapshot(ctx, snapshot, share)
|
||||
if err != nil {
|
||||
return "", sperr.Wrap(err)
|
||||
}
|
||||
return fmt.Sprintf("\nSnapshot uploaded to %s\n", url), nil
|
||||
}
|
||||
|
||||
// otherwise assume snapshot location is a file path
|
||||
filePath, err := exportSnapshot(snapshot)
|
||||
if err != nil {
|
||||
return "", sperr.Wrap(err)
|
||||
}
|
||||
return fmt.Sprintf("\nSnapshot saved to %s\n", filePath), nil
|
||||
}
|
||||
|
||||
func exportSnapshot(snapshot *dashboardtypes.SteampipeSnapshot) (string, error) {
|
||||
exporter := &export.SnapshotExporter{}
|
||||
|
||||
fileName := export.GenerateDefaultExportFileName(snapshot.FileNameRoot, exporter.FileExtension())
|
||||
dirName := viper.GetString(constants.ArgSnapshotLocation)
|
||||
filePath := path.Join(dirName, fileName)
|
||||
|
||||
err := exporter.Export(context.Background(), snapshot, filePath)
|
||||
if err != nil {
|
||||
return "", sperr.Wrap(err)
|
||||
}
|
||||
return filePath, nil
|
||||
}
|
||||
|
||||
func uploadSnapshot(ctx context.Context, snapshot *dashboardtypes.SteampipeSnapshot, share bool) (string, error) {
|
||||
client := newSteampipeCloudClient(viper.GetString(constants.ArgPipesToken))
|
||||
|
||||
cloudWorkspace := viper.GetString(constants.ArgSnapshotLocation)
|
||||
parts := strings.Split(cloudWorkspace, "/")
|
||||
if len(parts) != 2 {
|
||||
return "", sperr.New("failed to resolve username and workspace handle from workspace %s", cloudWorkspace)
|
||||
}
|
||||
identityHandle := parts[0]
|
||||
workspaceHandle := parts[1]
|
||||
|
||||
// no determine whether this is a user or org workspace
|
||||
// get the identity
|
||||
identity, _, err := client.Identities.Get(ctx, identityHandle).Execute()
|
||||
if err != nil {
|
||||
return "", sperr.Wrap(err)
|
||||
}
|
||||
|
||||
workspaceType := identity.Type
|
||||
|
||||
// set the visibility
|
||||
visibility := "workspace"
|
||||
if share {
|
||||
visibility = "anyone_with_link"
|
||||
}
|
||||
|
||||
// resolve the snapshot title
|
||||
title := resolveSnapshotTitle(snapshot)
|
||||
log.Printf("[TRACE] Uploading snapshot with title %s", title)
|
||||
// populate map of tags tags been set?
|
||||
tags := getTags()
|
||||
|
||||
cloudSnapshot, err := snapshot.AsCloudSnapshot()
|
||||
if err != nil {
|
||||
return "", sperr.Wrap(err)
|
||||
}
|
||||
|
||||
// strip verbose/sensitive fields
|
||||
dashboardtypes.StripSnapshot(cloudSnapshot)
|
||||
|
||||
req := steampipecloud.CreateWorkspaceSnapshotRequest{Data: *cloudSnapshot, Tags: tags, Visibility: &visibility}
|
||||
req.SetTitle(title)
|
||||
|
||||
var uploadedSnapshot steampipecloud.WorkspaceSnapshot
|
||||
if identity.Type == "user" {
|
||||
uploadedSnapshot, _, err = client.UserWorkspaceSnapshots.Create(ctx, identityHandle, workspaceHandle).Request(req).Execute()
|
||||
} else {
|
||||
uploadedSnapshot, _, err = client.OrgWorkspaceSnapshots.Create(ctx, identityHandle, workspaceHandle).Request(req).Execute()
|
||||
}
|
||||
if err != nil {
|
||||
return "", sperr.Wrap(err)
|
||||
}
|
||||
|
||||
snapshotId := uploadedSnapshot.Id
|
||||
snapshotUrl := fmt.Sprintf("https://%s/%s/%s/workspace/%s/snapshot/%s",
|
||||
viper.GetString(constants.ArgPipesHost),
|
||||
workspaceType,
|
||||
identityHandle,
|
||||
workspaceHandle,
|
||||
snapshotId)
|
||||
|
||||
return snapshotUrl, nil
|
||||
}
|
||||
|
||||
func resolveSnapshotTitle(snapshot *dashboardtypes.SteampipeSnapshot) string {
|
||||
if titleArg := viper.GetString(constants.ArgSnapshotTitle); titleArg != "" {
|
||||
return titleArg
|
||||
}
|
||||
// is there a title property set on the snapshot
|
||||
if snapshotTitle := snapshot.Title; snapshotTitle != "" {
|
||||
return snapshotTitle
|
||||
}
|
||||
// fall back to the fully qualified name of the root resource (which is also the FileNameRoot)
|
||||
return snapshot.FileNameRoot
|
||||
}
|
||||
|
||||
func getTags() map[string]any {
|
||||
tags := viper.GetStringSlice(constants.ArgSnapshotTag)
|
||||
res := map[string]any{}
|
||||
|
||||
for _, tagStr := range tags {
|
||||
parts := strings.Split(tagStr, "=")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
res[parts[0]] = parts[1]
|
||||
}
|
||||
return res
|
||||
}
|
||||
//
|
||||
//func exportSnapshot(snapshot *dashboardtypes.SteampipeSnapshot) (string, error) {
|
||||
// exporter := &export.SnapshotExporter{}
|
||||
//
|
||||
// fileName := export.GenerateDefaultExportFileName(snapshot.FileNameRoot, exporter.FileExtension())
|
||||
// dirName := viper.GetString(constants.ArgSnapshotLocation)
|
||||
// filePath := path.Join(dirName, fileName)
|
||||
//
|
||||
// err := exporter.Export(context.Background(), snapshot, filePath)
|
||||
// if err != nil {
|
||||
// return "", sperr.Wrap(err)
|
||||
// }
|
||||
// return filePath, nil
|
||||
//}
|
||||
//
|
||||
//func uploadSnapshot(ctx context.Context, snapshot *dashboardtypes.SteampipeSnapshot, share bool) (string, error) {
|
||||
// client := newSteampipeCloudClient(viper.GetString(constants.ArgPipesToken))
|
||||
//
|
||||
// cloudWorkspace := viper.GetString(constants.ArgSnapshotLocation)
|
||||
// parts := strings.Split(cloudWorkspace, "/")
|
||||
// if len(parts) != 2 {
|
||||
// return "", sperr.New("failed to resolve username and workspace handle from workspace %s", cloudWorkspace)
|
||||
// }
|
||||
// identityHandle := parts[0]
|
||||
// workspaceHandle := parts[1]
|
||||
//
|
||||
// // no determine whether this is a user or org workspace
|
||||
// // get the identity
|
||||
// identity, _, err := client.Identities.Get(ctx, identityHandle).Execute()
|
||||
// if err != nil {
|
||||
// return "", sperr.Wrap(err)
|
||||
// }
|
||||
//
|
||||
// workspaceType := identity.Type
|
||||
//
|
||||
// // set the visibility
|
||||
// visibility := "workspace"
|
||||
// if share {
|
||||
// visibility = "anyone_with_link"
|
||||
// }
|
||||
//
|
||||
// // resolve the snapshot title
|
||||
// title := resolveSnapshotTitle(snapshot)
|
||||
// log.Printf("[TRACE] Uploading snapshot with title %s", title)
|
||||
// // populate map of tags tags been set?
|
||||
// tags := getTags()
|
||||
//
|
||||
// cloudSnapshot, err := snapshot.AsCloudSnapshot()
|
||||
// if err != nil {
|
||||
// return "", sperr.Wrap(err)
|
||||
// }
|
||||
//
|
||||
// // strip verbose/sensitive fields
|
||||
// dashboardtypes.StripSnapshot(cloudSnapshot)
|
||||
//
|
||||
// req := steampipecloud.CreateWorkspaceSnapshotRequest{Data: *cloudSnapshot, Tags: tags, Visibility: &visibility}
|
||||
// req.SetTitle(title)
|
||||
//
|
||||
// var uploadedSnapshot steampipecloud.WorkspaceSnapshot
|
||||
// if identity.Type == "user" {
|
||||
// uploadedSnapshot, _, err = client.UserWorkspaceSnapshots.Create(ctx, identityHandle, workspaceHandle).Request(req).Execute()
|
||||
// } else {
|
||||
// uploadedSnapshot, _, err = client.OrgWorkspaceSnapshots.Create(ctx, identityHandle, workspaceHandle).Request(req).Execute()
|
||||
// }
|
||||
// if err != nil {
|
||||
// return "", sperr.Wrap(err)
|
||||
// }
|
||||
//
|
||||
// snapshotId := uploadedSnapshot.Id
|
||||
// snapshotUrl := fmt.Sprintf("https://%s/%s/%s/workspace/%s/snapshot/%s",
|
||||
// viper.GetString(constants.ArgPipesHost),
|
||||
// workspaceType,
|
||||
// identityHandle,
|
||||
// workspaceHandle,
|
||||
// snapshotId)
|
||||
//
|
||||
// return snapshotUrl, nil
|
||||
//}
|
||||
//
|
||||
//func resolveSnapshotTitle(snapshot *dashboardtypes.SteampipeSnapshot) string {
|
||||
// if titleArg := viper.GetString(constants.ArgSnapshotTitle); titleArg != "" {
|
||||
// return titleArg
|
||||
// }
|
||||
// // is there a title property set on the snapshot
|
||||
// if snapshotTitle := snapshot.Title; snapshotTitle != "" {
|
||||
// return snapshotTitle
|
||||
// }
|
||||
// // fall back to the fully qualified name of the root resource (which is also the FileNameRoot)
|
||||
// return snapshot.FileNameRoot
|
||||
//}
|
||||
//
|
||||
//func getTags() map[string]any {
|
||||
// tags := viper.GetStringSlice(constants.ArgSnapshotTag)
|
||||
// res := map[string]any{}
|
||||
//
|
||||
// for _, tagStr := range tags {
|
||||
// parts := strings.Split(tagStr, "=")
|
||||
// if len(parts) != 2 {
|
||||
// continue
|
||||
// }
|
||||
// res[parts[0]] = parts[1]
|
||||
// }
|
||||
// return res
|
||||
//}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmdconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
pconstants "github.com/turbot/pipe-fittings/constants"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -120,7 +121,7 @@ func (c *CmdBuilder) AddCloudFlags() *CmdBuilder {
|
||||
// AddWorkspaceDatabaseFlag is helper function to add the workspace-databse flag to a command
|
||||
func (c *CmdBuilder) AddWorkspaceDatabaseFlag() *CmdBuilder {
|
||||
return c.
|
||||
AddStringFlag(constants.ArgWorkspaceDatabase, constants.DefaultWorkspaceDatabase, "Turbot Pipes workspace database")
|
||||
AddStringFlag(pconstants.ArgWorkspaceDatabase, constants.DefaultWorkspaceDatabase, "Turbot Pipes workspace database")
|
||||
}
|
||||
|
||||
// AddModLocationFlag is helper function to add the mod-location flag to a command
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/turbot/pipe-fittings/parse"
|
||||
"github.com/turbot/pipe-fittings/workspace_profile"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
@@ -169,7 +171,7 @@ func runScheduledTasks(ctx context.Context, cmd *cobra.Command, args []string, e
|
||||
// the GlobalConfig has a loglevel set
|
||||
func logLevelNeedsReset() bool {
|
||||
envLogLevelIsSet := envLogLevelSet()
|
||||
generalOptionsSet := (steampipeconfig.GlobalConfig.GeneralOptions != nil && steampipeconfig.GlobalConfig.GeneralOptions.LogLevel != nil)
|
||||
generalOptionsSet := steampipeconfig.GlobalConfig.GeneralOptions != nil && steampipeconfig.GlobalConfig.GeneralOptions.LogLevel != nil
|
||||
|
||||
return !envLogLevelIsSet && generalOptionsSet
|
||||
}
|
||||
@@ -304,7 +306,7 @@ func handleDeprecations() perror_helpers.ErrorAndWarnings {
|
||||
return ew
|
||||
}
|
||||
|
||||
func setCloudTokenDefault(loader *steampipeconfig.WorkspaceProfileLoader) error {
|
||||
func setCloudTokenDefault(loader *parse.WorkspaceProfileLoader[*workspace_profile.SteampipeWorkspaceProfile]) error {
|
||||
/*
|
||||
saved cloud token
|
||||
cloud_token in default workspace
|
||||
@@ -344,7 +346,7 @@ func setCloudTokenDefault(loader *steampipeconfig.WorkspaceProfileLoader) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func getWorkspaceProfileLoader(ctx context.Context) (*steampipeconfig.WorkspaceProfileLoader, error) {
|
||||
func getWorkspaceProfileLoader(ctx context.Context) (*parse.WorkspaceProfileLoader[*workspace_profile.SteampipeWorkspaceProfile], error) {
|
||||
// set viper default for workspace profile, using EnvWorkspaceProfile env var
|
||||
SetDefaultFromEnv(constants.EnvWorkspaceProfile, constants.ArgWorkspaceProfile, String)
|
||||
// set viper default for install dir, using EnvInstallDir env var
|
||||
@@ -362,7 +364,7 @@ func getWorkspaceProfileLoader(ctx context.Context) (*steampipeconfig.WorkspaceP
|
||||
}
|
||||
|
||||
// create loader
|
||||
loader, err := steampipeconfig.NewWorkspaceProfileLoader(ctx, workspaceProfileDir)
|
||||
loader, err := parse.NewWorkspaceProfileLoader[*workspace_profile.SteampipeWorkspaceProfile](workspaceProfileDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"github.com/spf13/viper"
|
||||
filehelpers "github.com/turbot/go-kit/files"
|
||||
pconstants "github.com/turbot/pipe-fittings/constants"
|
||||
"github.com/turbot/steampipe/pkg/cloud"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
@@ -34,7 +35,7 @@ func ValidateSnapshotArgs(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// if workspace-database or snapshot-location are a cloud workspace handle, cloud token must be set
|
||||
requireCloudToken := steampipeconfig.IsCloudWorkspaceIdentifier(viper.GetString(constants.ArgWorkspaceDatabase)) ||
|
||||
requireCloudToken := steampipeconfig.IsCloudWorkspaceIdentifier(viper.GetString(pconstants.ArgWorkspaceDatabase)) ||
|
||||
steampipeconfig.IsCloudWorkspaceIdentifier(viper.GetString(constants.ArgSnapshotLocation))
|
||||
|
||||
// verify cloud token and workspace has been set
|
||||
|
||||
@@ -12,8 +12,13 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
filehelpers "github.com/turbot/go-kit/files"
|
||||
"github.com/turbot/go-kit/types"
|
||||
pconstants "github.com/turbot/pipe-fittings/constants"
|
||||
"github.com/turbot/pipe-fittings/parse"
|
||||
"github.com/turbot/pipe-fittings/workspace_profile"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/filepaths"
|
||||
)
|
||||
|
||||
// Viper fetches the global viper instance
|
||||
@@ -22,7 +27,7 @@ func Viper() *viper.Viper {
|
||||
}
|
||||
|
||||
// bootstrapViper sets up viper with the essential path config (workspace-chdir and install-dir)
|
||||
func bootstrapViper(loader *steampipeconfig.WorkspaceProfileLoader, cmd *cobra.Command) error {
|
||||
func bootstrapViper(loader *parse.WorkspaceProfileLoader[*workspace_profile.SteampipeWorkspaceProfile], cmd *cobra.Command) error {
|
||||
// set defaults for keys which do not have a corresponding command flag
|
||||
if err := setBaseDefaults(); err != nil {
|
||||
return err
|
||||
@@ -104,8 +109,7 @@ func setBaseDefaults() error {
|
||||
constants.ArgPipesInstallDir: pipesInstallDir,
|
||||
|
||||
// workspace profile
|
||||
constants.ArgAutoComplete: true,
|
||||
constants.ArgIntrospection: constants.IntrospectionNone,
|
||||
constants.ArgAutoComplete: true,
|
||||
|
||||
// from global database options
|
||||
constants.ArgDatabasePort: constants.DatabaseDefaultPort,
|
||||
@@ -155,16 +159,17 @@ func setDefaultsFromEnv() {
|
||||
constants.EnvInstallDir: {[]string{constants.ArgInstallDir}, String},
|
||||
constants.EnvWorkspaceChDir: {[]string{constants.ArgModLocation}, String},
|
||||
constants.EnvModLocation: {[]string{constants.ArgModLocation}, String},
|
||||
constants.EnvIntrospection: {[]string{constants.ArgIntrospection}, String},
|
||||
constants.EnvTelemetry: {[]string{constants.ArgTelemetry}, String},
|
||||
constants.EnvUpdateCheck: {[]string{constants.ArgUpdateCheck}, Bool},
|
||||
// TODO #breakingchange
|
||||
//constants.EnvIntrospection: {[]string{constants.ArgIntrospection}, String},
|
||||
constants.EnvTelemetry: {[]string{constants.ArgTelemetry}, String},
|
||||
constants.EnvUpdateCheck: {[]string{constants.ArgUpdateCheck}, Bool},
|
||||
// deprecated
|
||||
constants.EnvCloudHost: {[]string{constants.ArgPipesHost}, String},
|
||||
constants.EnvCloudToken: {[]string{constants.ArgPipesToken}, String},
|
||||
constants.EnvPipesHost: {[]string{constants.ArgPipesHost}, String},
|
||||
constants.EnvPipesToken: {[]string{constants.ArgPipesToken}, String},
|
||||
constants.EnvSnapshotLocation: {[]string{constants.ArgSnapshotLocation}, String},
|
||||
constants.EnvWorkspaceDatabase: {[]string{constants.ArgWorkspaceDatabase}, String},
|
||||
constants.EnvWorkspaceDatabase: {[]string{pconstants.ArgWorkspaceDatabase}, String},
|
||||
constants.EnvServicePassword: {[]string{constants.ArgServicePassword}, String},
|
||||
constants.EnvDisplayWidth: {[]string{constants.ArgDisplayWidth}, Int},
|
||||
constants.EnvMaxParallel: {[]string{constants.ArgMaxParallel}, Int},
|
||||
|
||||
@@ -1,12 +1,5 @@
|
||||
package constants
|
||||
|
||||
// Application constants
|
||||
|
||||
const (
|
||||
AppName = "steampipe"
|
||||
FdwName = "steampipe-postgres-fdw"
|
||||
)
|
||||
|
||||
const (
|
||||
ClientConnectionAppNamePrefix = "steampipe_client"
|
||||
ServiceConnectionAppNamePrefix = "steampipe_service"
|
||||
|
||||
@@ -27,7 +27,6 @@ const (
|
||||
ArgTelemetry = "telemetry"
|
||||
ArgInstallDir = "install-dir"
|
||||
ArgPipesInstallDir = "pipes-install-dir"
|
||||
ArgWorkspaceDatabase = "workspace-database"
|
||||
ArgSchemaComments = "schema-comments"
|
||||
ArgCloudHost = "cloud-host"
|
||||
ArgCloudToken = "cloud-token"
|
||||
@@ -35,43 +34,38 @@ const (
|
||||
ArgPipesToken = "pipes-token"
|
||||
ArgSearchPath = "search-path"
|
||||
ArgSearchPathPrefix = "search-path-prefix"
|
||||
ArgWatch = "watch"
|
||||
ArgTheme = "theme"
|
||||
ArgProgress = "progress"
|
||||
ArgExport = "export"
|
||||
ArgMaxParallel = "max-parallel"
|
||||
ArgLogLevel = "log-level"
|
||||
ArgDryRun = "dry-run"
|
||||
ArgWhere = "where"
|
||||
ArgTag = "tag"
|
||||
ArgVariable = "var"
|
||||
ArgVarFile = "var-file"
|
||||
ArgConnectionString = "connection-string"
|
||||
ArgDisplayWidth = "display-width"
|
||||
ArgPrune = "prune"
|
||||
ArgModInstall = "mod-install"
|
||||
ArgServiceMode = "service-mode"
|
||||
ArgBrowser = "browser"
|
||||
ArgInput = "input"
|
||||
ArgDashboardInput = "dashboard-input"
|
||||
ArgMaxCacheSizeMb = "max-cache-size-mb"
|
||||
ArgCacheTtl = "cache-ttl"
|
||||
ArgClientCacheEnabled = "client-cache-enabled"
|
||||
ArgServiceCacheEnabled = "service-cache-enabled"
|
||||
ArgCacheMaxTtl = "cache-max-ttl"
|
||||
ArgIntrospection = "introspection"
|
||||
ArgShare = "share"
|
||||
ArgSnapshot = "snapshot"
|
||||
ArgSnapshotTag = "snapshot-tag"
|
||||
ArgWorkspaceProfile = "workspace"
|
||||
ArgModLocation = "mod-location"
|
||||
ArgSnapshotLocation = "snapshot-location"
|
||||
ArgSnapshotTitle = "snapshot-title"
|
||||
ArgDatabaseStartTimeout = "database-start-timeout"
|
||||
ArgDatabaseSSLPassword = "database-ssl-password"
|
||||
ArgMemoryMaxMb = "memory-max-mb"
|
||||
ArgMemoryMaxMbPlugin = "memory-max-mb-plugin"
|
||||
ArgPluginStartTimeout = "plugin-start-timeout"
|
||||
//ArgWatch = "watch"
|
||||
ArgProgress = "progress"
|
||||
ArgExport = "export"
|
||||
ArgMaxParallel = "max-parallel"
|
||||
ArgLogLevel = "log-level"
|
||||
ArgDryRun = "dry-run"
|
||||
ArgWhere = "where"
|
||||
ArgTag = "tag"
|
||||
ArgVariable = "var"
|
||||
ArgVarFile = "var-file"
|
||||
ArgDisplayWidth = "display-width"
|
||||
ArgPrune = "prune"
|
||||
ArgServiceMode = "service-mode"
|
||||
ArgBrowser = "browser"
|
||||
ArgInput = "input"
|
||||
ArgDashboardInput = "dashboard-input"
|
||||
ArgMaxCacheSizeMb = "max-cache-size-mb"
|
||||
ArgCacheTtl = "cache-ttl"
|
||||
ArgClientCacheEnabled = "client-cache-enabled"
|
||||
ArgServiceCacheEnabled = "service-cache-enabled"
|
||||
ArgCacheMaxTtl = "cache-max-ttl"
|
||||
ArgShare = "share"
|
||||
ArgSnapshot = "snapshot"
|
||||
ArgSnapshotTag = "snapshot-tag"
|
||||
ArgWorkspaceProfile = "workspace"
|
||||
ArgModLocation = "mod-location"
|
||||
ArgSnapshotLocation = "snapshot-location"
|
||||
ArgSnapshotTitle = "snapshot-title"
|
||||
ArgDatabaseStartTimeout = "database-start-timeout"
|
||||
ArgDatabaseSSLPassword = "database-ssl-password"
|
||||
ArgMemoryMaxMb = "memory-max-mb"
|
||||
ArgMemoryMaxMbPlugin = "memory-max-mb-plugin"
|
||||
)
|
||||
|
||||
// metaquery mode arguments
|
||||
|
||||
@@ -28,11 +28,11 @@ const (
|
||||
EnvCacheMaxSize = "STEAMPIPE_CACHE_MAX_SIZE_MB"
|
||||
EnvQueryTimeout = "STEAMPIPE_QUERY_TIMEOUT"
|
||||
|
||||
EnvConnectionWatcher = "STEAMPIPE_CONNECTION_WATCHER"
|
||||
EnvWorkspaceChDir = "STEAMPIPE_WORKSPACE_CHDIR"
|
||||
EnvModLocation = "STEAMPIPE_MOD_LOCATION"
|
||||
EnvTelemetry = "STEAMPIPE_TELEMETRY"
|
||||
EnvIntrospection = "STEAMPIPE_INTROSPECTION"
|
||||
EnvConnectionWatcher = "STEAMPIPE_CONNECTION_WATCHER"
|
||||
EnvWorkspaceChDir = "STEAMPIPE_WORKSPACE_CHDIR"
|
||||
EnvModLocation = "STEAMPIPE_MOD_LOCATION"
|
||||
EnvTelemetry = "STEAMPIPE_TELEMETRY"
|
||||
//EnvIntrospection = "STEAMPIPE_INTROSPECTION"
|
||||
EnvWorkspaceProfileLocation = "STEAMPIPE_WORKSPACE_PROFILES_LOCATION"
|
||||
|
||||
// EnvInputVarPrefix is the prefix for environment variables that represent values for input variables.
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
package constants
|
||||
|
||||
// constants for introspection config flag
|
||||
const (
|
||||
IntrospectionNone = "none"
|
||||
IntrospectionInfo = "info"
|
||||
IntrospectionControl = "control"
|
||||
)
|
||||
@@ -1,393 +0,0 @@
|
||||
package dashboardevents
|
||||
|
||||
import (
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
type DashboardChanged struct {
|
||||
ChangedDashboards []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedContainers []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedControls []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedBenchmarks []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedCategories []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedCards []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedCharts []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedFlows []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedGraphs []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedHierarchies []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedImages []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedInputs []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedTables []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedTexts []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedNodes []*modconfig.DashboardTreeItemDiffs
|
||||
ChangedEdges []*modconfig.DashboardTreeItemDiffs
|
||||
|
||||
NewDashboards []*modconfig.Dashboard
|
||||
NewContainers []*modconfig.DashboardContainer
|
||||
NewControls []*modconfig.Control
|
||||
NewBenchmarks []*modconfig.Benchmark
|
||||
NewCards []*modconfig.DashboardCard
|
||||
NewCategories []*modconfig.DashboardCategory
|
||||
NewCharts []*modconfig.DashboardChart
|
||||
NewFlows []*modconfig.DashboardFlow
|
||||
NewGraphs []*modconfig.DashboardGraph
|
||||
NewHierarchies []*modconfig.DashboardHierarchy
|
||||
NewImages []*modconfig.DashboardImage
|
||||
NewInputs []*modconfig.DashboardInput
|
||||
NewTables []*modconfig.DashboardTable
|
||||
NewTexts []*modconfig.DashboardText
|
||||
NewNodes []*modconfig.DashboardNode
|
||||
NewEdges []*modconfig.DashboardEdge
|
||||
|
||||
DeletedDashboards []*modconfig.Dashboard
|
||||
DeletedContainers []*modconfig.DashboardContainer
|
||||
DeletedControls []*modconfig.Control
|
||||
DeletedBenchmarks []*modconfig.Benchmark
|
||||
DeletedCards []*modconfig.DashboardCard
|
||||
DeletedCategories []*modconfig.DashboardCategory
|
||||
DeletedCharts []*modconfig.DashboardChart
|
||||
DeletedFlows []*modconfig.DashboardFlow
|
||||
DeletedGraphs []*modconfig.DashboardGraph
|
||||
DeletedHierarchies []*modconfig.DashboardHierarchy
|
||||
DeletedImages []*modconfig.DashboardImage
|
||||
DeletedInputs []*modconfig.DashboardInput
|
||||
DeletedTables []*modconfig.DashboardTable
|
||||
DeletedTexts []*modconfig.DashboardText
|
||||
DeletedNodes []*modconfig.DashboardNode
|
||||
DeletedEdges []*modconfig.DashboardEdge
|
||||
}
|
||||
|
||||
// IsDashboardEvent implements DashboardEvent interface
|
||||
func (*DashboardChanged) IsDashboardEvent() {}
|
||||
|
||||
func (c *DashboardChanged) HasChanges() bool {
|
||||
return len(c.ChangedDashboards)+
|
||||
len(c.ChangedContainers)+
|
||||
len(c.ChangedBenchmarks)+
|
||||
len(c.ChangedControls)+
|
||||
len(c.ChangedCards)+
|
||||
len(c.ChangedCategories)+
|
||||
len(c.ChangedCharts)+
|
||||
len(c.ChangedFlows)+
|
||||
len(c.ChangedGraphs)+
|
||||
len(c.ChangedHierarchies)+
|
||||
len(c.ChangedImages)+
|
||||
len(c.ChangedInputs)+
|
||||
len(c.ChangedTables)+
|
||||
len(c.ChangedTexts)+
|
||||
len(c.ChangedNodes)+
|
||||
len(c.ChangedEdges)+
|
||||
len(c.NewDashboards)+
|
||||
len(c.NewContainers)+
|
||||
len(c.NewBenchmarks)+
|
||||
len(c.NewControls)+
|
||||
len(c.NewCards)+
|
||||
len(c.NewCategories)+
|
||||
len(c.NewCharts)+
|
||||
len(c.NewFlows)+
|
||||
len(c.NewGraphs)+
|
||||
len(c.NewHierarchies)+
|
||||
len(c.NewImages)+
|
||||
len(c.NewInputs)+
|
||||
len(c.NewTables)+
|
||||
len(c.NewTexts)+
|
||||
len(c.NewNodes)+
|
||||
len(c.NewEdges)+
|
||||
len(c.DeletedDashboards)+
|
||||
len(c.DeletedContainers)+
|
||||
len(c.DeletedBenchmarks)+
|
||||
len(c.DeletedControls)+
|
||||
len(c.DeletedCards)+
|
||||
len(c.DeletedCategories)+
|
||||
len(c.DeletedCharts)+
|
||||
len(c.DeletedFlows)+
|
||||
len(c.DeletedGraphs)+
|
||||
len(c.DeletedHierarchies)+
|
||||
len(c.DeletedImages)+
|
||||
len(c.DeletedInputs)+
|
||||
len(c.DeletedTables)+
|
||||
len(c.DeletedTexts)+
|
||||
len(c.DeletedNodes)+
|
||||
len(c.DeletedEdges) > 0
|
||||
}
|
||||
|
||||
func (c *DashboardChanged) WalkChangedResources(resourceFunc func(item modconfig.ModTreeItem) (bool, error)) error {
|
||||
for _, r := range c.ChangedDashboards {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedContainers {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedControls {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedCards {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedCategories {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedCharts {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedFlows {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedGraphs {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedHierarchies {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedImages {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedInputs {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedTables {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.ChangedTexts {
|
||||
if continueWalking, err := resourceFunc(r.Item); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewDashboards {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewContainers {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewControls {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewCards {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewCategories {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewCharts {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewFlows {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewGraphs {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewHierarchies {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewImages {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewInputs {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewTables {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.NewTexts {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedContainers {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedControls {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedCards {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedCategories {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedCharts {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedFlows {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedGraphs {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedHierarchies {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedImages {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedInputs {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedTables {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range c.DeletedTexts {
|
||||
if continueWalking, err := resourceFunc(r); err != nil || !continueWalking {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *DashboardChanged) SetParentsChanged(item modconfig.ModTreeItem, prevResourceMaps *modconfig.ResourceMaps) {
|
||||
if prevResourceMaps == nil {
|
||||
return
|
||||
}
|
||||
|
||||
parents := item.GetParents()
|
||||
for _, parent := range parents {
|
||||
// if the parent DID NOT exist in the previous resource maps, do nothing
|
||||
parsedResourceName, _ := modconfig.ParseResourceName(parent.Name())
|
||||
if _, existingResource := prevResourceMaps.GetResource(parsedResourceName); existingResource {
|
||||
c.AddChanged(parent)
|
||||
c.SetParentsChanged(parent, prevResourceMaps)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *DashboardChanged) diffsContain(diffs []*modconfig.DashboardTreeItemDiffs, item modconfig.ModTreeItem) bool {
|
||||
for _, d := range diffs {
|
||||
if d.Item.Name() == item.Name() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *DashboardChanged) AddChanged(item modconfig.ModTreeItem) {
|
||||
diff := &modconfig.DashboardTreeItemDiffs{
|
||||
Name: item.Name(),
|
||||
Item: item,
|
||||
ChangedProperties: []string{"Children"},
|
||||
}
|
||||
switch item.(type) {
|
||||
case *modconfig.Dashboard:
|
||||
if !c.diffsContain(c.ChangedDashboards, item) {
|
||||
c.ChangedDashboards = append(c.ChangedDashboards, diff)
|
||||
}
|
||||
case *modconfig.DashboardContainer:
|
||||
if !c.diffsContain(c.ChangedContainers, item) {
|
||||
c.ChangedContainers = append(c.ChangedContainers, diff)
|
||||
}
|
||||
case *modconfig.Control:
|
||||
if !c.diffsContain(c.ChangedControls, item) {
|
||||
c.ChangedControls = append(c.ChangedControls, diff)
|
||||
}
|
||||
case *modconfig.Benchmark:
|
||||
if !c.diffsContain(c.ChangedBenchmarks, item) {
|
||||
c.ChangedBenchmarks = append(c.ChangedBenchmarks, diff)
|
||||
}
|
||||
case *modconfig.DashboardCard:
|
||||
if !c.diffsContain(c.ChangedCards, item) {
|
||||
c.ChangedCards = append(c.ChangedCards, diff)
|
||||
}
|
||||
case *modconfig.DashboardCategory:
|
||||
if !c.diffsContain(c.ChangedCategories, item) {
|
||||
c.ChangedCategories = append(c.ChangedCategories, diff)
|
||||
}
|
||||
case *modconfig.DashboardChart:
|
||||
if !c.diffsContain(c.ChangedCharts, item) {
|
||||
c.ChangedCharts = append(c.ChangedCharts, diff)
|
||||
}
|
||||
case *modconfig.DashboardHierarchy:
|
||||
if !c.diffsContain(c.ChangedHierarchies, item) {
|
||||
c.ChangedHierarchies = append(c.ChangedHierarchies, diff)
|
||||
}
|
||||
|
||||
case *modconfig.DashboardImage:
|
||||
if !c.diffsContain(c.ChangedImages, item) {
|
||||
c.ChangedImages = append(c.ChangedImages, diff)
|
||||
}
|
||||
|
||||
case *modconfig.DashboardInput:
|
||||
if !c.diffsContain(c.ChangedInputs, item) {
|
||||
c.ChangedInputs = append(c.ChangedInputs, diff)
|
||||
}
|
||||
|
||||
case *modconfig.DashboardTable:
|
||||
if !c.diffsContain(c.ChangedTables, item) {
|
||||
c.ChangedTables = append(c.ChangedTables, diff)
|
||||
}
|
||||
case *modconfig.DashboardText:
|
||||
if !c.diffsContain(c.ChangedTexts, item) {
|
||||
c.ChangedTexts = append(c.ChangedTexts, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package dashboardevents
|
||||
|
||||
import "context"
|
||||
|
||||
type DashboardEvent interface {
|
||||
IsDashboardEvent()
|
||||
}
|
||||
type DashboardEventHandler func(context.Context, DashboardEvent)
|
||||
@@ -1,22 +0,0 @@
|
||||
package dashboardevents
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
)
|
||||
|
||||
type ExecutionComplete struct {
|
||||
Root dashboardtypes.DashboardTreeRun
|
||||
Session string
|
||||
ExecutionId string
|
||||
Panels map[string]dashboardtypes.SnapshotPanel
|
||||
Inputs map[string]interface{}
|
||||
Variables map[string]string
|
||||
SearchPath []string
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
}
|
||||
|
||||
// IsDashboardEvent implements DashboardEvent interface
|
||||
func (*ExecutionComplete) IsDashboardEvent() {}
|
||||
@@ -1,14 +0,0 @@
|
||||
package dashboardevents
|
||||
|
||||
import "time"
|
||||
|
||||
// ExecutionError is an event which is sent if an error occusrs _before execution has started_
|
||||
// e.g. a failure to create the execution tree
|
||||
type ExecutionError struct {
|
||||
Error error
|
||||
Session string
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
// IsDashboardEvent implements DashboardEvent interface
|
||||
func (*ExecutionError) IsDashboardEvent() {}
|
||||
@@ -1,21 +0,0 @@
|
||||
package dashboardevents
|
||||
|
||||
import (
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ExecutionStarted struct {
|
||||
Root dashboardtypes.DashboardTreeRun `json:"dashboard"`
|
||||
Panels map[string]any
|
||||
Session string
|
||||
ExecutionId string
|
||||
Inputs map[string]any
|
||||
Variables map[string]string
|
||||
StartTime time.Time
|
||||
// immutable representation of event data - to avoid mutation before we send it
|
||||
JsonData []byte
|
||||
}
|
||||
|
||||
// IsDashboardEvent implements DashboardEvent interface
|
||||
func (*ExecutionStarted) IsDashboardEvent() {}
|
||||
@@ -1,10 +0,0 @@
|
||||
package dashboardevents
|
||||
|
||||
type InputValuesCleared struct {
|
||||
ClearedInputs []string
|
||||
Session string
|
||||
ExecutionId string
|
||||
}
|
||||
|
||||
// IsDashboardEvent implements DashboardEvent interface
|
||||
func (*InputValuesCleared) IsDashboardEvent() {}
|
||||
@@ -1,30 +0,0 @@
|
||||
package dashboardevents
|
||||
|
||||
import (
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"time"
|
||||
)
|
||||
|
||||
type LeafNodeUpdated struct {
|
||||
LeafNode map[string]any
|
||||
Session string
|
||||
ExecutionId string
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
func NewLeafNodeUpdate(r dashboardtypes.DashboardTreeRun, session, executionId string) (*LeafNodeUpdated, error) {
|
||||
immutableNode, err := utils.JsonCloneToMap(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &LeafNodeUpdated{
|
||||
LeafNode: immutableNode,
|
||||
Session: session,
|
||||
ExecutionId: executionId,
|
||||
Timestamp: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsDashboardEvent implements DashboardEvent interface
|
||||
func (*LeafNodeUpdated) IsDashboardEvent() {}
|
||||
@@ -1,8 +0,0 @@
|
||||
package dashboardevents
|
||||
|
||||
type WorkspaceError struct {
|
||||
Error error
|
||||
}
|
||||
|
||||
// IsDashboardEvent implements DashboardEvent interface
|
||||
func (*WorkspaceError) IsDashboardEvent() {}
|
||||
@@ -1,13 +0,0 @@
|
||||
package dashboardtypes
|
||||
|
||||
import "context"
|
||||
|
||||
// DashboardParent is an interface implemented by all dashboard run nodes which have children
|
||||
type DashboardParent interface {
|
||||
DashboardTreeRun
|
||||
GetName() string
|
||||
ChildCompleteChan() chan DashboardTreeRun
|
||||
GetChildren() []DashboardTreeRun
|
||||
ChildrenComplete() bool
|
||||
ChildStatusChanged(context.Context)
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package dashboardtypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
// DashboardTreeRun is an interface implemented by all dashboard run nodes
|
||||
type DashboardTreeRun interface {
|
||||
Initialise(ctx context.Context)
|
||||
Execute(ctx context.Context)
|
||||
GetName() string
|
||||
GetTitle() string
|
||||
GetRunStatus() RunStatus
|
||||
SetError(context.Context, error)
|
||||
GetError() error
|
||||
GetParent() DashboardParent
|
||||
SetComplete(context.Context)
|
||||
RunComplete() bool
|
||||
GetInputsDependingOn(string) []string
|
||||
GetNodeType() string
|
||||
AsTreeNode() *SnapshotTreeNode
|
||||
GetResource() modconfig.DashboardLeafNode
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package dashboardtypes
|
||||
|
||||
import (
|
||||
"github.com/turbot/steampipe/pkg/query/queryresult"
|
||||
)
|
||||
|
||||
type LeafData struct {
|
||||
Columns []*queryresult.ColumnDef `json:"columns"`
|
||||
Rows []map[string]interface{} `json:"rows"`
|
||||
}
|
||||
|
||||
func NewLeafData(result *queryresult.SyncQueryResult) *LeafData {
|
||||
leafData := &LeafData{
|
||||
Rows: make([]map[string]interface{}, len(result.Rows)),
|
||||
Columns: result.Cols,
|
||||
}
|
||||
|
||||
for rowIdx, row := range result.Rows {
|
||||
rowData := make(map[string]interface{}, len(result.Cols))
|
||||
for i, data := range row.(*queryresult.RowResult).Data {
|
||||
columnName := leafData.Columns[i].Name
|
||||
rowData[columnName] = data
|
||||
}
|
||||
|
||||
leafData.Rows[rowIdx] = rowData
|
||||
}
|
||||
return leafData
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
package dashboardtypes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/turbot/go-kit/helpers"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
// ResolvedRuntimeDependency is a wrapper for RuntimeDependency which contains the resolved value
|
||||
// we must wrap it so that we do not mutate the underlying workspace data when resolving dependency values
|
||||
type ResolvedRuntimeDependency struct {
|
||||
Dependency *modconfig.RuntimeDependency
|
||||
valueLock sync.Mutex
|
||||
Value any
|
||||
// the name of the run which publishes this dependency
|
||||
publisherName string
|
||||
valueChannel chan *ResolvedRuntimeDependencyValue
|
||||
}
|
||||
|
||||
func NewResolvedRuntimeDependency(dep *modconfig.RuntimeDependency, valueChannel chan *ResolvedRuntimeDependencyValue, publisherName string) *ResolvedRuntimeDependency {
|
||||
return &ResolvedRuntimeDependency{
|
||||
Dependency: dep,
|
||||
valueChannel: valueChannel,
|
||||
publisherName: publisherName,
|
||||
}
|
||||
}
|
||||
|
||||
// ScopedName returns is a unique name for the dependency by prepending the publisher name
|
||||
// this is used to uniquely identify which `with` is used - for the snapshot data
|
||||
func (d *ResolvedRuntimeDependency) ScopedName() string {
|
||||
return fmt.Sprintf("%s.%s", d.publisherName, d.Dependency.SourceResourceName())
|
||||
}
|
||||
|
||||
func (d *ResolvedRuntimeDependency) IsResolved() bool {
|
||||
d.valueLock.Lock()
|
||||
defer d.valueLock.Unlock()
|
||||
|
||||
return d.hasValue()
|
||||
}
|
||||
|
||||
func (d *ResolvedRuntimeDependency) Resolve() error {
|
||||
d.valueLock.Lock()
|
||||
defer d.valueLock.Unlock()
|
||||
|
||||
log.Printf("[TRACE] ResolvedRuntimeDependency Resolve dep %s chan %p", d.Dependency.PropertyPath, d.valueChannel)
|
||||
|
||||
// if we are already resolved, do nothing
|
||||
if d.hasValue() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// wait for value
|
||||
val := <-d.valueChannel
|
||||
|
||||
d.Value = val.Value
|
||||
|
||||
// TACTICAL if the desired value is an array, wrap in an array
|
||||
if d.Dependency.IsArray {
|
||||
d.Value = helpers.AnySliceToTypedSlice([]any{d.Value})
|
||||
}
|
||||
|
||||
if val.Error != nil {
|
||||
return val.Error
|
||||
}
|
||||
|
||||
// we should have a non nil value now
|
||||
if !d.hasValue() {
|
||||
return fmt.Errorf("nil value recevied for runtime dependency %s", d.Dependency.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ResolvedRuntimeDependency) hasValue() bool {
|
||||
return !helpers.IsNil(d.Value)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package dashboardtypes
|
||||
|
||||
//type WithResult struct {
|
||||
// *LeafData
|
||||
// Error error
|
||||
//}
|
||||
|
||||
type ResolvedRuntimeDependencyValue struct {
|
||||
Value any
|
||||
Error error
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
package dashboardtypes
|
||||
|
||||
type RunStatus string
|
||||
|
||||
const (
|
||||
RunInitialized RunStatus = "initialized"
|
||||
RunBlocked RunStatus = "blocked"
|
||||
RunRunning RunStatus = "running"
|
||||
RunComplete RunStatus = "complete"
|
||||
RunError RunStatus = "error"
|
||||
RunCanceled RunStatus = "canceled"
|
||||
)
|
||||
|
||||
func (s RunStatus) IsError() bool {
|
||||
return s == RunError || s == RunCanceled
|
||||
}
|
||||
|
||||
func (s RunStatus) IsFinished() bool {
|
||||
return s == RunComplete || s.IsError()
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package dashboardtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
steampipecloud "github.com/turbot/steampipe-cloud-sdk-go"
|
||||
)
|
||||
|
||||
var SteampipeSnapshotSchemaVersion int64 = 20221222
|
||||
|
||||
type SteampipeSnapshot struct {
|
||||
SchemaVersion string `json:"schema_version"`
|
||||
Panels map[string]SnapshotPanel `json:"panels"`
|
||||
Inputs map[string]interface{} `json:"inputs"`
|
||||
Variables map[string]string `json:"variables"`
|
||||
SearchPath []string `json:"search_path"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
EndTime time.Time `json:"end_time"`
|
||||
Layout *SnapshotTreeNode `json:"layout"`
|
||||
FileNameRoot string `json:"-"`
|
||||
Title string `json:"-"`
|
||||
}
|
||||
|
||||
// IsExportSourceData implements ExportSourceData
|
||||
func (*SteampipeSnapshot) IsExportSourceData() {}
|
||||
|
||||
func (s *SteampipeSnapshot) AsCloudSnapshot() (*steampipecloud.WorkspaceSnapshotData, error) {
|
||||
jsonbytes, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := &steampipecloud.WorkspaceSnapshotData{}
|
||||
if err := json.Unmarshal(jsonbytes, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *SteampipeSnapshot) AsStrippedJson(indent bool) ([]byte, error) {
|
||||
res, err := s.AsCloudSnapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = StripSnapshot(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if indent {
|
||||
return json.MarshalIndent(res, "", " ")
|
||||
}
|
||||
return json.Marshal(res)
|
||||
}
|
||||
|
||||
func StripSnapshot(snapshot *steampipecloud.WorkspaceSnapshotData) error {
|
||||
propertiesToStrip := []string{
|
||||
"sql",
|
||||
"source_definition",
|
||||
"documentation",
|
||||
"search_path",
|
||||
"search_path_prefix"}
|
||||
for _, p := range snapshot.Panels {
|
||||
panel := p.(map[string]any)
|
||||
properties, _ := panel["properties"].(map[string]any)
|
||||
for _, property := range propertiesToStrip {
|
||||
// look both at top level and under properties
|
||||
delete(panel, property)
|
||||
if properties != nil {
|
||||
delete(properties, property)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package dashboardtypes
|
||||
|
||||
// SnapshotPanel is an interface implemented by all nodes which are to be included in the Snapshot Panels map
|
||||
// this consists of all 'Run' types - LeafRun, DashboardRun, etc.
|
||||
type SnapshotPanel interface {
|
||||
IsSnapshotPanel()
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package dashboardtypes
|
||||
|
||||
// SnapshotTreeNode is a struct used to store the dashboard structure in the snapshot
|
||||
type SnapshotTreeNode struct {
|
||||
Name string `json:"name"`
|
||||
Children []*SnapshotTreeNode `json:"children,omitempty"`
|
||||
NodeType string `json:"panel_type"`
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"github.com/spf13/viper"
|
||||
@@ -59,30 +58,17 @@ type DbClient struct {
|
||||
onConnectionCallback DbConnectionCallback
|
||||
}
|
||||
|
||||
func NewDbClient(ctx context.Context, connectionString string, onConnectionCallback DbConnectionCallback, opts ...ClientOption) (_ *DbClient, err error) {
|
||||
func NewDbClient(ctx context.Context, connectionString string, opts ...ClientOption) (_ *DbClient, err error) {
|
||||
utils.LogTime("db_client.NewDbClient start")
|
||||
defer utils.LogTime("db_client.NewDbClient end")
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
// wrap onConnectionCallback to use wait group
|
||||
var wrappedOnConnectionCallback DbConnectionCallback
|
||||
if onConnectionCallback != nil {
|
||||
wrappedOnConnectionCallback = func(ctx context.Context, conn *pgx.Conn) error {
|
||||
wg.Add(1)
|
||||
defer wg.Done()
|
||||
return onConnectionCallback(ctx, conn)
|
||||
}
|
||||
}
|
||||
|
||||
client := &DbClient{
|
||||
// a weighted semaphore to control the maximum number parallel
|
||||
// initializations under way
|
||||
parallelSessionInitLock: semaphore.NewWeighted(constants.MaxParallelClientInits),
|
||||
sessions: make(map[uint32]*db_common.DatabaseSession),
|
||||
sessionsMutex: &sync.Mutex{},
|
||||
// store the callback
|
||||
onConnectionCallback: wrappedOnConnectionCallback,
|
||||
connectionString: connectionString,
|
||||
connectionString: connectionString,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -167,8 +167,8 @@ func startDatabaseInLocation(ctx context.Context, location string) (*pgRunningIn
|
||||
// NOTE: If quoted, the application name includes the quotes. Worried about
|
||||
// having spaces in the APPNAME, but leaving it unquoted since currently
|
||||
// the APPNAME is hardcoded to be steampipe.
|
||||
"-c", fmt.Sprintf("application_name=%s", constants.AppName),
|
||||
"-c", fmt.Sprintf("cluster_name=%s", constants.AppName),
|
||||
"-c", fmt.Sprintf("application_name=%s", app_specific.AppName),
|
||||
"-c", fmt.Sprintf("cluster_name=%s", app_specific.AppName),
|
||||
|
||||
// Data Directory
|
||||
"-D", dataLocation,
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package db_local
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ColumnTag is a struct used to display column info in introspection tables
|
||||
type ColumnTag struct {
|
||||
Column string
|
||||
// the introspected go type
|
||||
ColumnType string
|
||||
}
|
||||
|
||||
func newColumnTag(field reflect.StructField) (*ColumnTag, bool) {
|
||||
columnTag, ok := field.Tag.Lookup(TagColumn)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
split := strings.Split(columnTag, ",")
|
||||
if len(split) != 2 {
|
||||
return nil, false
|
||||
}
|
||||
column := split[0]
|
||||
columnType := split[1]
|
||||
return &ColumnTag{column, columnType}, true
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/turbot/pipe-fittings/app_specific"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -356,8 +357,8 @@ func startServiceForInstall(port int) (*psutils.Process, error) {
|
||||
// NOTE: If quoted, the application name includes the quotes. Worried about
|
||||
// having spaces in the APPNAME, but leaving it unquoted since currently
|
||||
// the APPNAME is hardcoded to be steampipe.
|
||||
"-c", fmt.Sprintf("application_name=%s", constants.AppName),
|
||||
"-c", fmt.Sprintf("cluster_name=%s", constants.AppName),
|
||||
"-c", fmt.Sprintf("application_name=%s", app_specific.AppName),
|
||||
"-c", fmt.Sprintf("cluster_name=%s", app_specific.AppName),
|
||||
|
||||
// log directory
|
||||
"-c", fmt.Sprintf("log_directory=%s", filepaths.EnsureLogDir()),
|
||||
|
||||
@@ -1,355 +0,0 @@
|
||||
package db_local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/turbot/go-kit/helpers"
|
||||
typeHelpers "github.com/turbot/go-kit/types"
|
||||
"github.com/turbot/pipe-fittings/hclhelpers"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/db/db_common"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// TagColumn is the tag used to specify the column name and type in the introspection tables
|
||||
const TagColumn = "column"
|
||||
|
||||
func CreateIntrospectionTables(ctx context.Context, workspaceResources *modconfig.ResourceMaps, tx pgx.Tx) error {
|
||||
// get the sql for columns which every table has
|
||||
commonColumnSql := getColumnDefinitions(modconfig.ResourceMetadata{})
|
||||
|
||||
// convert to lowercase to avoid case sensitivity
|
||||
switch strings.ToLower(viper.GetString(constants.ArgIntrospection)) {
|
||||
case constants.IntrospectionInfo:
|
||||
return populateAllIntrospectionTables(ctx, workspaceResources, tx, commonColumnSql)
|
||||
case constants.IntrospectionControl:
|
||||
return populateControlIntrospectionTables(ctx, workspaceResources, tx, commonColumnSql)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func populateAllIntrospectionTables(ctx context.Context, workspaceResources *modconfig.ResourceMaps, tx pgx.Tx, commonColumnSql []string) error {
|
||||
utils.LogTime("db.CreateIntrospectionTables start")
|
||||
defer utils.LogTime("db.CreateIntrospectionTables end")
|
||||
|
||||
// get the create sql for each table type
|
||||
createSql := getCreateTablesSql(commonColumnSql)
|
||||
|
||||
// now get sql to populate the tables
|
||||
insertSql := getTableInsertSql(workspaceResources)
|
||||
sql := []string{createSql, insertSql}
|
||||
|
||||
_, err := tx.Exec(ctx, strings.Join(sql, "\n"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create introspection tables: %v", err)
|
||||
}
|
||||
// return context error - this enables calling code to respond to cancellation
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
func populateControlIntrospectionTables(ctx context.Context, workspaceResources *modconfig.ResourceMaps, tx pgx.Tx, commonColumnSql []string) error {
|
||||
utils.LogTime("db.CreateIntrospectionTables start")
|
||||
defer utils.LogTime("db.CreateIntrospectionTables end")
|
||||
|
||||
// get the create sql for control and benchmark tables
|
||||
createSql := getCreateControlTablesSql(commonColumnSql)
|
||||
// now get sql to populate the control and benchmark tables
|
||||
insertSql := getControlTableInsertSql(workspaceResources)
|
||||
sql := []string{createSql, insertSql}
|
||||
|
||||
_, err := tx.Exec(ctx, strings.Join(sql, "\n"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create introspection tables: %v", err)
|
||||
}
|
||||
|
||||
// return context error - this enables calling code to respond to cancellation
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
func getCreateTablesSql(commonColumnSql []string) string {
|
||||
var createSql []string
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.Control{}, constants.IntrospectionTableControl, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.Query{}, constants.IntrospectionTableQuery, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.Benchmark{}, constants.IntrospectionTableBenchmark, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.Mod{}, constants.IntrospectionTableMod, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.Variable{}, constants.IntrospectionTableVariable, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.Dashboard{}, constants.IntrospectionTableDashboard, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.DashboardContainer{}, constants.IntrospectionTableDashboardContainer, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.DashboardCard{}, constants.IntrospectionTableDashboardCard, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.DashboardChart{}, constants.IntrospectionTableDashboardChart, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.DashboardFlow{}, constants.IntrospectionTableDashboardFlow, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.DashboardGraph{}, constants.IntrospectionTableDashboardGraph, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.DashboardHierarchy{}, constants.IntrospectionTableDashboardHierarchy, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.DashboardImage{}, constants.IntrospectionTableDashboardImage, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.DashboardInput{}, constants.IntrospectionTableDashboardInput, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.DashboardTable{}, constants.IntrospectionTableDashboardTable, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.DashboardText{}, constants.IntrospectionTableDashboardText, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.ResourceReference{}, constants.IntrospectionTableReference, commonColumnSql))
|
||||
return strings.Join(createSql, "\n")
|
||||
}
|
||||
|
||||
func getTableInsertSql(workspaceResources *modconfig.ResourceMaps) string {
|
||||
var insertSql []string
|
||||
|
||||
for _, control := range workspaceResources.Controls {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(control, constants.IntrospectionTableControl))
|
||||
}
|
||||
for _, query := range workspaceResources.Queries {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(query, constants.IntrospectionTableQuery))
|
||||
}
|
||||
for _, benchmark := range workspaceResources.Benchmarks {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(benchmark, constants.IntrospectionTableBenchmark))
|
||||
}
|
||||
for _, mod := range workspaceResources.Mods {
|
||||
if !mod.IsDefaultMod() {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(mod, constants.IntrospectionTableMod))
|
||||
}
|
||||
}
|
||||
for _, variable := range workspaceResources.Variables {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(variable, constants.IntrospectionTableVariable))
|
||||
}
|
||||
for _, dashboard := range workspaceResources.Dashboards {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(dashboard, constants.IntrospectionTableDashboard))
|
||||
}
|
||||
for _, container := range workspaceResources.DashboardContainers {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(container, constants.IntrospectionTableDashboardContainer))
|
||||
}
|
||||
for _, card := range workspaceResources.DashboardCards {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(card, constants.IntrospectionTableDashboardCard))
|
||||
}
|
||||
for _, chart := range workspaceResources.DashboardCharts {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(chart, constants.IntrospectionTableDashboardChart))
|
||||
}
|
||||
for _, flow := range workspaceResources.DashboardFlows {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(flow, constants.IntrospectionTableDashboardFlow))
|
||||
}
|
||||
for _, graph := range workspaceResources.DashboardGraphs {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(graph, constants.IntrospectionTableDashboardGraph))
|
||||
}
|
||||
for _, hierarchy := range workspaceResources.DashboardHierarchies {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(hierarchy, constants.IntrospectionTableDashboardHierarchy))
|
||||
}
|
||||
for _, image := range workspaceResources.DashboardImages {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(image, constants.IntrospectionTableDashboardImage))
|
||||
}
|
||||
for _, dashboardInputs := range workspaceResources.DashboardInputs {
|
||||
for _, input := range dashboardInputs {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(input, constants.IntrospectionTableDashboardInput))
|
||||
}
|
||||
}
|
||||
for _, input := range workspaceResources.GlobalDashboardInputs {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(input, constants.IntrospectionTableDashboardInput))
|
||||
}
|
||||
for _, table := range workspaceResources.DashboardTables {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(table, constants.IntrospectionTableDashboardTable))
|
||||
}
|
||||
for _, text := range workspaceResources.DashboardTexts {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(text, constants.IntrospectionTableDashboardText))
|
||||
}
|
||||
for _, reference := range workspaceResources.References {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(reference, constants.IntrospectionTableReference))
|
||||
}
|
||||
|
||||
return strings.Join(insertSql, "\n")
|
||||
}
|
||||
|
||||
// reflect on the `column` tag for this given resource and any nested structs
|
||||
// to build the introspection table creation sql
|
||||
// NOTE: ensure the object passed to this is a pointer, as otherwise the interface type casts will return false
|
||||
func getTableCreateSqlForResource(s interface{}, tableName string, commonColumnSql []string) string {
|
||||
columnDefinitions := append(commonColumnSql, getColumnDefinitions(s)...)
|
||||
if qp, ok := s.(modconfig.QueryProvider); ok {
|
||||
columnDefinitions = append(columnDefinitions, getColumnDefinitions(qp.GetQueryProviderImpl())...)
|
||||
}
|
||||
if mti, ok := s.(modconfig.ModTreeItem); ok {
|
||||
columnDefinitions = append(columnDefinitions, getColumnDefinitions(mti.GetModTreeItemImpl())...)
|
||||
}
|
||||
if hr, ok := s.(modconfig.HclResource); ok {
|
||||
columnDefinitions = append(columnDefinitions, getColumnDefinitions(hr.GetHclResourceImpl())...)
|
||||
}
|
||||
|
||||
// Query cannot define 'query' as a property.
|
||||
// So for a steampipe_query table, we will exclude the query column.
|
||||
// Here we are removing the column named query from the 'columnDefinitions' slice.
|
||||
if tableName == "steampipe_query" {
|
||||
// find the index of the element 'query' and store in idx
|
||||
for i, col := range columnDefinitions {
|
||||
if col == " query text" {
|
||||
// remove the idx element from 'columnDefinitions' slice
|
||||
columnDefinitions = utils.RemoveElementFromSlice(columnDefinitions, i)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
tableSql := fmt.Sprintf(`create temp table %s (
|
||||
%s
|
||||
);`, tableName, strings.Join(columnDefinitions, ",\n"))
|
||||
return tableSql
|
||||
}
|
||||
|
||||
func getCreateControlTablesSql(commonColumnSql []string) string {
|
||||
var createSql []string
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.Control{}, constants.IntrospectionTableControl, commonColumnSql))
|
||||
createSql = append(createSql, getTableCreateSqlForResource(&modconfig.Benchmark{}, constants.IntrospectionTableBenchmark, commonColumnSql))
|
||||
return strings.Join(createSql, "\n")
|
||||
}
|
||||
|
||||
func getControlTableInsertSql(workspaceResources *modconfig.ResourceMaps) string {
|
||||
var insertSql []string
|
||||
|
||||
for _, control := range workspaceResources.Controls {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(control, constants.IntrospectionTableControl))
|
||||
}
|
||||
for _, benchmark := range workspaceResources.Benchmarks {
|
||||
insertSql = append(insertSql, getTableInsertSqlForResource(benchmark, constants.IntrospectionTableBenchmark))
|
||||
}
|
||||
|
||||
return strings.Join(insertSql, "\n")
|
||||
}
|
||||
|
||||
// getColumnDefinitions returns the sql column definitions for tagged properties of the item
|
||||
func getColumnDefinitions(item interface{}) []string {
|
||||
t := reflect.TypeOf(item)
|
||||
if t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
}
|
||||
var columnDef []string
|
||||
val := reflect.ValueOf(item)
|
||||
if val.Kind() == reflect.Pointer {
|
||||
val = val.Elem()
|
||||
}
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
fieldName := val.Type().Field(i).Name
|
||||
field, _ := t.FieldByName(fieldName)
|
||||
columnTag, ok := newColumnTag(field)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
columnDef = append(columnDef, fmt.Sprintf(" %s %s", columnTag.Column, columnTag.ColumnType))
|
||||
}
|
||||
return columnDef
|
||||
}
|
||||
|
||||
func getTableInsertSqlForResource(item any, tableName string) string {
|
||||
// for each item there is core reflection data (i.e. reflection resource all items have)
|
||||
// and item specific reflection data
|
||||
// get the core reflection data values
|
||||
var valuesCore, columnsCore []string
|
||||
if rwm, ok := item.(modconfig.ResourceWithMetadata); ok {
|
||||
valuesCore, columnsCore = getColumnValues(rwm.GetMetadata())
|
||||
}
|
||||
|
||||
// get item specific reflection data values from the item
|
||||
valuesItem, columnsItem := getColumnValues(item)
|
||||
columns := append(columnsCore, columnsItem...)
|
||||
values := append(valuesCore, valuesItem...)
|
||||
|
||||
// get properties from embedded structs
|
||||
if qp, ok := item.(modconfig.QueryProvider); ok {
|
||||
valuesItem, columnsItem = getColumnValues(qp.GetQueryProviderImpl())
|
||||
columns = append(columns, columnsItem...)
|
||||
values = append(values, valuesItem...)
|
||||
}
|
||||
if mti, ok := item.(modconfig.ModTreeItem); ok {
|
||||
valuesItem, columnsItem = getColumnValues(mti.GetModTreeItemImpl())
|
||||
columns = append(columns, columnsItem...)
|
||||
values = append(values, valuesItem...)
|
||||
}
|
||||
if hr, ok := item.(modconfig.HclResource); ok {
|
||||
valuesItem, columnsItem = getColumnValues(hr.GetHclResourceImpl())
|
||||
columns = append(columns, columnsItem...)
|
||||
values = append(values, valuesItem...)
|
||||
}
|
||||
|
||||
insertSql := fmt.Sprintf(`insert into %s (%s) values(%s);`, tableName, strings.Join(columns, ","), strings.Join(values, ","))
|
||||
return insertSql
|
||||
}
|
||||
|
||||
// use reflection to evaluate the column names and values from item - return as 2 separate arrays
|
||||
func getColumnValues(item interface{}) ([]string, []string) {
|
||||
if item == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var columns, values []string
|
||||
|
||||
// dereference item in vcase it is a pointer
|
||||
item = helpers.DereferencePointer(item)
|
||||
|
||||
val := reflect.ValueOf(helpers.DereferencePointer(item))
|
||||
t := reflect.TypeOf(item)
|
||||
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
fieldName := val.Type().Field(i).Name
|
||||
field, _ := t.FieldByName(fieldName)
|
||||
|
||||
columnTag, ok := newColumnTag(field)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
value, ok := helpers.GetFieldValueFromInterface(item, fieldName)
|
||||
|
||||
// all fields will be pointers
|
||||
value = helpers.DereferencePointer(value)
|
||||
if !ok || value == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// formatIntrospectionTableValue escapes values, and for json columns, converts them into escaped JSON
|
||||
// ignore JSON conversion errors - trust that array values read from hcl will be convertable
|
||||
formattedValue, _ := formatIntrospectionTableValue(value, columnTag)
|
||||
values = append(values, formattedValue)
|
||||
columns = append(columns, columnTag.Column)
|
||||
}
|
||||
return values, columns
|
||||
}
|
||||
|
||||
// convert the value into a postgres format value which can used in an insert statement
|
||||
func formatIntrospectionTableValue(item interface{}, columnTag *ColumnTag) (string, error) {
|
||||
// special handling for cty.Type and cty.Value data
|
||||
switch t := item.(type) {
|
||||
// if the item is a cty value, we always represent it as json
|
||||
case cty.Value:
|
||||
if columnTag.ColumnType != "jsonb" {
|
||||
return "nil", fmt.Errorf("data for column %s is of type cty.Value so column type should be 'jsonb' but is actually %s", columnTag.Column, columnTag.ColumnType)
|
||||
}
|
||||
str, err := hclhelpers.CtyToJSON(t)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return db_common.PgEscapeString(str), nil
|
||||
case cty.Type:
|
||||
// if the item is a cty value, we always represent it as json
|
||||
if columnTag.ColumnType != "text" {
|
||||
return "nil", fmt.Errorf("data for column %s is of type cty.Type so column type should be 'text' but is actually %s", columnTag.Column, columnTag.ColumnType)
|
||||
}
|
||||
return db_common.PgEscapeString(t.FriendlyName()), nil
|
||||
}
|
||||
|
||||
switch columnTag.ColumnType {
|
||||
case "jsonb":
|
||||
jsonBytes, err := json.Marshal(reflect.ValueOf(item).Interface())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
res := db_common.PgEscapeString(string(jsonBytes))
|
||||
return res, nil
|
||||
case "integer", "numeric", "decimal", "boolean":
|
||||
return typeHelpers.ToString(item), nil
|
||||
default:
|
||||
// for string column, escape the data
|
||||
return db_common.PgEscapeString(typeHelpers.ToString(item)), nil
|
||||
}
|
||||
}
|
||||
@@ -23,7 +23,7 @@ type LocalDbClient struct {
|
||||
}
|
||||
|
||||
// GetLocalClient starts service if needed and creates a new LocalDbClient
|
||||
func GetLocalClient(ctx context.Context, invoker constants.Invoker, onConnectionCallback db_client.DbConnectionCallback, opts ...db_client.ClientOption) (*LocalDbClient, error_helpers.ErrorAndWarnings) {
|
||||
func GetLocalClient(ctx context.Context, invoker constants.Invoker, opts ...db_client.ClientOption) (*LocalDbClient, error_helpers.ErrorAndWarnings) {
|
||||
utils.LogTime("db.GetLocalClient start")
|
||||
defer utils.LogTime("db.GetLocalClient end")
|
||||
|
||||
@@ -45,7 +45,7 @@ func GetLocalClient(ctx context.Context, invoker constants.Invoker, onConnection
|
||||
}
|
||||
|
||||
log.Printf("[INFO] newLocalClient")
|
||||
client, err := newLocalClient(ctx, invoker, onConnectionCallback, opts...)
|
||||
client, err := newLocalClient(ctx, invoker, opts...)
|
||||
if err != nil {
|
||||
ShutdownService(ctx, invoker)
|
||||
startResult.Error = err
|
||||
@@ -67,7 +67,7 @@ func GetLocalClient(ctx context.Context, invoker constants.Invoker, onConnection
|
||||
|
||||
// newLocalClient verifies that the local database instance is running and returns a LocalDbClient to interact with it
|
||||
// (This FAILS if local service is not running - use GetLocalClient to start service first)
|
||||
func newLocalClient(ctx context.Context, invoker constants.Invoker, onConnectionCallback db_client.DbConnectionCallback, opts ...db_client.ClientOption) (*LocalDbClient, error) {
|
||||
func newLocalClient(ctx context.Context, invoker constants.Invoker, opts ...db_client.ClientOption) (*LocalDbClient, error) {
|
||||
utils.LogTime("db.newLocalClient start")
|
||||
defer utils.LogTime("db.newLocalClient end")
|
||||
|
||||
@@ -75,7 +75,7 @@ func newLocalClient(ctx context.Context, invoker constants.Invoker, onConnection
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbClient, err := db_client.NewDbClient(ctx, connString, onConnectionCallback, opts...)
|
||||
dbClient, err := db_client.NewDbClient(ctx, connString, opts...)
|
||||
if err != nil {
|
||||
log.Printf("[TRACE] error getting local client %s", err.Error())
|
||||
return nil, err
|
||||
|
||||
@@ -447,8 +447,8 @@ func createCmd(ctx context.Context, port int, listenAddresses []string) *exec.Cm
|
||||
// by this time, we are sure that the port is free to listen to
|
||||
"-p", fmt.Sprint(port),
|
||||
"-c", fmt.Sprintf("listen_addresses=%s", strings.Join(listenAddresses, ",")),
|
||||
"-c", fmt.Sprintf("application_name=%s", constants.AppName),
|
||||
"-c", fmt.Sprintf("cluster_name=%s", constants.AppName),
|
||||
"-c", fmt.Sprintf("application_name=%s", app_specific.AppName),
|
||||
"-c", fmt.Sprintf("cluster_name=%s", app_specific.AppName),
|
||||
|
||||
// log directory
|
||||
"-c", fmt.Sprintf("log_directory=%s", filepaths.EnsureLogDir()),
|
||||
@@ -667,7 +667,7 @@ func isSteampipePostgresProcess(ctx context.Context, cmdline []string) bool {
|
||||
}
|
||||
if strings.Contains(cmdline[0], "postgres") {
|
||||
// this is a postgres process - but is it a steampipe service?
|
||||
return helpers.StringSliceContains(cmdline, fmt.Sprintf("application_name=%s", constants.AppName))
|
||||
return helpers.StringSliceContains(cmdline, fmt.Sprintf("application_name=%s", app_specific.AppName))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,41 +1,33 @@
|
||||
package export
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
)
|
||||
|
||||
type SnapshotExporter struct {
|
||||
ExporterBase
|
||||
}
|
||||
|
||||
func (e *SnapshotExporter) Export(_ context.Context, input ExportSourceData, filePath string) error {
|
||||
snapshot, ok := input.(*dashboardtypes.SteampipeSnapshot)
|
||||
if !ok {
|
||||
return fmt.Errorf("SnapshotExporter input must be *dashboardtypes.SteampipeSnapshot")
|
||||
}
|
||||
snapshotBytes, err := snapshot.AsStrippedJson(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res := strings.NewReader(fmt.Sprintf("%s\n", string(snapshotBytes)))
|
||||
|
||||
return Write(filePath, res)
|
||||
}
|
||||
|
||||
func (e *SnapshotExporter) FileExtension() string {
|
||||
return constants.SnapshotExtension
|
||||
}
|
||||
|
||||
func (e *SnapshotExporter) Name() string {
|
||||
return constants.OutputFormatSnapshot
|
||||
}
|
||||
|
||||
func (*SnapshotExporter) Alias() string {
|
||||
return "sps"
|
||||
}
|
||||
//
|
||||
//type SnapshotExporter struct {
|
||||
// ExporterBase
|
||||
//}
|
||||
//
|
||||
//func (e *SnapshotExporter) Export(_ context.Context, input ExportSourceData, filePath string) error {
|
||||
// snapshot, ok := input.(*dashboardtypes.SteampipeSnapshot)
|
||||
// if !ok {
|
||||
// return fmt.Errorf("SnapshotExporter input must be *dashboardtypes.SteampipeSnapshot")
|
||||
// }
|
||||
// snapshotBytes, err := snapshot.AsStrippedJson(false)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// res := strings.NewReader(fmt.Sprintf("%s\n", string(snapshotBytes)))
|
||||
//
|
||||
// return Write(filePath, res)
|
||||
//}
|
||||
//
|
||||
//func (e *SnapshotExporter) FileExtension() string {
|
||||
// return constants.SnapshotExtension
|
||||
//}
|
||||
//
|
||||
//func (e *SnapshotExporter) Name() string {
|
||||
// return constants.OutputFormatSnapshot
|
||||
//}
|
||||
//
|
||||
//func (*SnapshotExporter) Alias() string {
|
||||
// return "sps"
|
||||
//}
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/turbot/pipe-fittings/constants"
|
||||
"github.com/turbot/steampipe/pkg/cloud"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig"
|
||||
)
|
||||
|
||||
@@ -3,32 +3,25 @@ package initialisation
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
error_helpers2 "github.com/turbot/pipe-fittings/error_helpers"
|
||||
"github.com/turbot/pipe-fittings/plugin"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig"
|
||||
"github.com/turbot/pipe-fittings/app_specific"
|
||||
"github.com/turbot/pipe-fittings/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"log"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/turbot/go-kit/helpers"
|
||||
"github.com/turbot/steampipe-plugin-sdk/v5/sperr"
|
||||
pconstants "github.com/turbot/pipe-fittings/constants"
|
||||
"github.com/turbot/steampipe-plugin-sdk/v5/telemetry"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/db/db_client"
|
||||
"github.com/turbot/steampipe/pkg/db/db_common"
|
||||
"github.com/turbot/steampipe/pkg/db/db_local"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/export"
|
||||
"github.com/turbot/steampipe/pkg/modinstaller"
|
||||
"github.com/turbot/steampipe/pkg/statushooks"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/workspace"
|
||||
)
|
||||
|
||||
type InitData struct {
|
||||
Workspace *workspace.Workspace
|
||||
Client db_common.Client
|
||||
Result *db_common.InitResult
|
||||
Client db_common.Client
|
||||
Result *db_common.InitResult
|
||||
|
||||
ShutdownTelemetry func()
|
||||
ExportManager *export.Manager
|
||||
@@ -73,71 +66,26 @@ func (i *InitData) Init(ctx context.Context, invoker constants.Invoker, opts ...
|
||||
|
||||
log.Printf("[INFO] Initializing...")
|
||||
|
||||
// code after this depends of i.Workspace being defined. make sure that it is
|
||||
if i.Workspace == nil {
|
||||
i.Result.Error = sperr.WrapWithRootMessage(error_helpers.InvalidStateError, "InitData.Init called before setting up Workspace")
|
||||
return
|
||||
}
|
||||
|
||||
statushooks.SetStatus(ctx, "Initializing")
|
||||
|
||||
// initialise telemetry
|
||||
shutdownTelemetry, err := telemetry.Init(constants.AppName)
|
||||
shutdownTelemetry, err := telemetry.Init(app_specific.AppName)
|
||||
if err != nil {
|
||||
i.Result.AddWarnings(err.Error())
|
||||
} else {
|
||||
i.ShutdownTelemetry = shutdownTelemetry
|
||||
}
|
||||
|
||||
// install mod dependencies if needed
|
||||
if viper.GetBool(constants.ArgModInstall) {
|
||||
statushooks.SetStatus(ctx, "Installing workspace dependencies")
|
||||
log.Printf("[INFO] Installing workspace dependencies")
|
||||
|
||||
opts := modinstaller.NewInstallOpts(i.Workspace.Mod)
|
||||
// use force install so that errors are ignored during installation
|
||||
// (we are validating prereqs later)
|
||||
opts.Force = true
|
||||
_, err := modinstaller.InstallWorkspaceDependencies(ctx, opts)
|
||||
if err != nil {
|
||||
i.Result.Error = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TODO #KAI what to do with cloud metadata
|
||||
// retrieve cloud metadata
|
||||
cloudMetadata, err := getCloudMetadata(ctx)
|
||||
if err != nil {
|
||||
i.Result.Error = err
|
||||
return
|
||||
}
|
||||
|
||||
// set cloud metadata (may be nil)
|
||||
i.Workspace.CloudMetadata = cloudMetadata
|
||||
|
||||
statushooks.SetStatus(ctx, "Checking for required plugins")
|
||||
log.Printf("[INFO] Checking for required plugins")
|
||||
pluginsInstalled, err := plugin.GetInstalledPlugins(ctx, steampipeconfig.GlobalConfig.PluginVersions)
|
||||
if err != nil {
|
||||
i.Result.Error = err
|
||||
return
|
||||
}
|
||||
|
||||
// no need to validate local steampipe and plugin versions for when connecting to remote steampipe database
|
||||
// ArgConnectionString is empty when connecting to local database
|
||||
if connectionString := viper.GetString(constants.ArgConnectionString); connectionString == "" {
|
||||
// validate steampipe version and required plugin version
|
||||
validationWarnings := validateModRequirementsRecursively(i.Workspace.Mod, pluginsInstalled)
|
||||
i.Result.AddWarnings(validationWarnings...)
|
||||
}
|
||||
|
||||
// if introspection tables are enabled, setup the session data callback
|
||||
var ensureSessionData db_client.DbConnectionCallback
|
||||
if viper.GetString(constants.ArgIntrospection) != constants.IntrospectionNone {
|
||||
ensureSessionData = func(ctx context.Context, conn *pgx.Conn) error {
|
||||
return workspace.EnsureSessionData(ctx, i.Workspace.GetResourceMaps(), conn)
|
||||
}
|
||||
}
|
||||
//cloudMetadata, err := getCloudMetadata(ctx)
|
||||
//if err != nil {
|
||||
// i.Result.Error = err
|
||||
// return
|
||||
//}
|
||||
//
|
||||
//// set cloud metadata (may be nil)
|
||||
//i.Workspace.CloudMetadata = cloudMetadata
|
||||
|
||||
// get a client
|
||||
// add a message rendering function to the context - this is used for the fdw update message and
|
||||
@@ -148,7 +96,7 @@ func (i *InitData) Init(ctx context.Context, invoker constants.Invoker, opts ...
|
||||
|
||||
statushooks.SetStatus(ctx, "Connecting to steampipe database")
|
||||
log.Printf("[INFO] Connecting to steampipe database")
|
||||
client, errorsAndWarnings := GetDbClient(getClientCtx, invoker, ensureSessionData, opts...)
|
||||
client, errorsAndWarnings := GetDbClient(getClientCtx, invoker, opts...)
|
||||
if errorsAndWarnings.Error != nil {
|
||||
i.Result.Error = errorsAndWarnings.Error
|
||||
return
|
||||
@@ -166,42 +114,18 @@ func (i *InitData) Init(ctx context.Context, invoker constants.Invoker, opts ...
|
||||
i.Client = client
|
||||
}
|
||||
|
||||
func validateModRequirementsRecursively(mod *modconfig.Mod, pluginVersionMap map[string]*plugin.PluginVersionString) []string {
|
||||
var validationErrors []string
|
||||
|
||||
// validate this mod
|
||||
for _, err := range mod.ValidateRequirements(pluginVersionMap) {
|
||||
validationErrors = append(validationErrors, err.Error())
|
||||
}
|
||||
|
||||
// validate dependent mods
|
||||
for childDependencyName, childMod := range mod.ResourceMaps.Mods {
|
||||
// TODO : The 'mod.DependencyName == childMod.DependencyName' check has to be done because
|
||||
// of a bug in the resource loading code which also puts the mod itself into the resource map
|
||||
// [https://github.com/turbot/steampipe/issues/3341]
|
||||
if childDependencyName == "local" || mod.DependencyName == childMod.DependencyName {
|
||||
// this is a reference to self - skip (otherwise we will end up with a recursion loop)
|
||||
continue
|
||||
}
|
||||
childValidationErrors := validateModRequirementsRecursively(childMod, pluginVersionMap)
|
||||
validationErrors = append(validationErrors, childValidationErrors...)
|
||||
}
|
||||
|
||||
return validationErrors
|
||||
}
|
||||
|
||||
// GetDbClient either creates a DB client using the configured connection string (if present) or creates a LocalDbClient
|
||||
func GetDbClient(ctx context.Context, invoker constants.Invoker, onConnectionCallback db_client.DbConnectionCallback, opts ...db_client.ClientOption) (db_common.Client, error_helpers2.ErrorAndWarnings) {
|
||||
if connectionString := viper.GetString(constants.ArgConnectionString); connectionString != "" {
|
||||
func GetDbClient(ctx context.Context, invoker constants.Invoker, opts ...db_client.ClientOption) (db_common.Client, error_helpers.ErrorAndWarnings) {
|
||||
if connectionString := viper.GetString(pconstants.ArgConnectionString); connectionString != "" {
|
||||
statushooks.SetStatus(ctx, "Connecting to remote Steampipe database")
|
||||
client, err := db_client.NewDbClient(ctx, connectionString, onConnectionCallback, opts...)
|
||||
return client, error_helpers2.NewErrorsAndWarning(err)
|
||||
client, err := db_client.NewDbClient(ctx, connectionString, opts...)
|
||||
return client, error_helpers.NewErrorsAndWarning(err)
|
||||
}
|
||||
|
||||
statushooks.SetStatus(ctx, "Starting local Steampipe database")
|
||||
log.Printf("[INFO] Starting local Steampipe database")
|
||||
|
||||
return db_local.GetLocalClient(ctx, invoker, onConnectionCallback, opts...)
|
||||
return db_local.GetLocalClient(ctx, invoker, opts...)
|
||||
}
|
||||
|
||||
func (i *InitData) Cleanup(ctx context.Context) {
|
||||
@@ -211,7 +135,4 @@ func (i *InitData) Cleanup(ctx context.Context) {
|
||||
if i.ShutdownTelemetry != nil {
|
||||
i.ShutdownTelemetry()
|
||||
}
|
||||
if i.Workspace != nil {
|
||||
i.Workspace.Close()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
pconstants "github.com/turbot/pipe-fittings/constants"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -91,7 +92,7 @@ func newInteractiveClient(ctx context.Context, initData *query.InitData, result
|
||||
interactiveBuffer: []string{},
|
||||
autocompleteOnEmpty: false,
|
||||
initResultChan: make(chan *db_common.InitResult, 1),
|
||||
highlighter: getHighlighter(viper.GetString(constants.ArgTheme)),
|
||||
highlighter: getHighlighter(viper.GetString(pconstants.ArgTheme)),
|
||||
suggestions: newAutocompleteSuggestions(),
|
||||
}
|
||||
|
||||
@@ -464,7 +465,7 @@ func (c *InteractiveClient) getQuery(ctx context.Context, line string) *modconfi
|
||||
}
|
||||
|
||||
// in case of a named query call with params, parse the where clause
|
||||
resolvedQuery, queryProvider, err := c.workspace().ResolveQueryAndArgsFromSQLString(queryString)
|
||||
resolvedQuery, err := query.ResolveQueryAndArgsFromSQLString(queryString)
|
||||
if err != nil {
|
||||
// if we fail to resolve:
|
||||
// - show error but do not return it so we stay in the prompt
|
||||
@@ -474,12 +475,11 @@ func (c *InteractiveClient) getQuery(ctx context.Context, line string) *modconfi
|
||||
error_helpers.ShowError(ctx, err)
|
||||
return nil
|
||||
}
|
||||
isNamedQuery := queryProvider != nil
|
||||
|
||||
// should we execute?
|
||||
// we will NOT execute if we are in multiline mode, there is no semi-colon
|
||||
// and it is NOT a metaquery or a named query
|
||||
if !c.shouldExecute(queryString, isNamedQuery) {
|
||||
if !c.shouldExecute(queryString) {
|
||||
// is we are not executing, do not store history
|
||||
historyEntry = ""
|
||||
// do not clear interactive buffer
|
||||
@@ -500,7 +500,7 @@ func (c *InteractiveClient) getQuery(ctx context.Context, line string) *modconfi
|
||||
return nil
|
||||
}
|
||||
// if this is a multiline query, update history entry
|
||||
if !isNamedQuery && len(strings.Split(resolvedQuery.ExecuteSQL, "\n")) > 1 {
|
||||
if len(strings.Split(resolvedQuery.ExecuteSQL, "\n")) > 1 {
|
||||
historyEntry = resolvedQuery.ExecuteSQL
|
||||
}
|
||||
|
||||
@@ -557,11 +557,7 @@ func (c *InteractiveClient) restartInteractiveSession() {
|
||||
c.ClosePrompt(c.afterClose)
|
||||
}
|
||||
|
||||
func (c *InteractiveClient) shouldExecute(line string, namedQuery bool) bool {
|
||||
if namedQuery {
|
||||
// execute named queries with no ';' even in multiline mode
|
||||
return true
|
||||
}
|
||||
func (c *InteractiveClient) shouldExecute(line string) bool {
|
||||
if !cmdconfig.Viper().GetBool(constants.ArgMultiLine) {
|
||||
// NOT multiline mode
|
||||
return true
|
||||
|
||||
@@ -3,7 +3,6 @@ package interactive
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/spf13/viper"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
@@ -13,7 +12,6 @@ import (
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/db/db_common"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
func (c *InteractiveClient) initialiseSuggestions(ctx context.Context) error {
|
||||
@@ -27,7 +25,7 @@ func (c *InteractiveClient) initialiseSuggestions(ctx context.Context) error {
|
||||
|
||||
connectionStateMap, err := steampipeconfig.LoadConnectionState(ctx, conn.Conn(), steampipeconfig.WithWaitUntilLoading())
|
||||
if err != nil {
|
||||
c.initialiseSuggestionsLegacy()
|
||||
log.Printf("[WARN] could not load connection state: %v", err)
|
||||
//nolint:golint,nilerr // valid condition - not an error
|
||||
return nil
|
||||
}
|
||||
@@ -49,7 +47,7 @@ func (c *InteractiveClient) initialiseSchemaAndTableSuggestions(connectionStateM
|
||||
// unqualified table names
|
||||
// use lookup to avoid dupes from dynamic plugins
|
||||
// (this is needed as GetFirstSearchPathConnectionForPlugins will return ALL dynamic connections)
|
||||
var unqualifiedTablesToAdd = getIntrospectionTableSuggestions()
|
||||
var unqualifiedTablesToAdd = make(map[string]struct{})
|
||||
|
||||
// add connection state and rate limit
|
||||
unqualifiedTablesToAdd[constants.ConnectionTable] = struct{}{}
|
||||
@@ -106,83 +104,8 @@ func (c *InteractiveClient) initialiseSchemaAndTableSuggestions(connectionStateM
|
||||
}
|
||||
}
|
||||
|
||||
func getIntrospectionTableSuggestions() map[string]struct{} {
|
||||
res := make(map[string]struct{})
|
||||
switch strings.ToLower(viper.GetString(constants.ArgIntrospection)) {
|
||||
case constants.IntrospectionInfo:
|
||||
res[constants.IntrospectionTableQuery] = struct{}{}
|
||||
res[constants.IntrospectionTableControl] = struct{}{}
|
||||
res[constants.IntrospectionTableBenchmark] = struct{}{}
|
||||
res[constants.IntrospectionTableMod] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboard] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboardContainer] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboardCard] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboardChart] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboardFlow] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboardGraph] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboardHierarchy] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboardImage] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboardInput] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboardTable] = struct{}{}
|
||||
res[constants.IntrospectionTableDashboardText] = struct{}{}
|
||||
res[constants.IntrospectionTableVariable] = struct{}{}
|
||||
res[constants.IntrospectionTableReference] = struct{}{}
|
||||
case constants.IntrospectionControl:
|
||||
res[constants.IntrospectionTableControl] = struct{}{}
|
||||
res[constants.IntrospectionTableBenchmark] = struct{}{}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *InteractiveClient) initialiseQuerySuggestions() {
|
||||
workspaceModName := c.initData.Workspace.Mod.Name()
|
||||
resourceFunc := func(item modconfig.HclResource) (continueWalking bool, err error) {
|
||||
continueWalking = true
|
||||
|
||||
// should we include this item
|
||||
qp, ok := item.(modconfig.QueryProvider)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if qp.GetQuery() == nil && qp.GetSQL() == nil {
|
||||
return
|
||||
}
|
||||
rm := item.(modconfig.ResourceWithMetadata)
|
||||
if rm.IsAnonymous() {
|
||||
return
|
||||
}
|
||||
mod := qp.GetMod()
|
||||
isLocal := mod.Name() == workspaceModName
|
||||
itemType := item.BlockType()
|
||||
|
||||
// only include global inputs
|
||||
if itemType == modconfig.BlockTypeInput {
|
||||
if _, ok := c.initData.Workspace.Mod.ResourceMaps.GlobalDashboardInputs[item.Name()]; !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
// special case for query
|
||||
if itemType == modconfig.BlockTypeQuery {
|
||||
itemType = "named query"
|
||||
}
|
||||
if isLocal {
|
||||
suggestion := c.newSuggestion(itemType, qp.GetDescription(), qp.GetUnqualifiedName())
|
||||
c.suggestions.unqualifiedQueries = append(c.suggestions.unqualifiedQueries, suggestion)
|
||||
} else {
|
||||
suggestion := c.newSuggestion(itemType, qp.GetDescription(), qp.Name())
|
||||
c.suggestions.queriesByMod[mod.ShortName] = append(c.suggestions.queriesByMod[mod.ShortName], suggestion)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
c.workspace().GetResourceMaps().WalkResources(resourceFunc)
|
||||
|
||||
// populate mod suggestions
|
||||
for mod := range c.suggestions.queriesByMod {
|
||||
suggestion := c.newSuggestion("mod", "", mod)
|
||||
c.suggestions.mods = append(c.suggestions.mods, suggestion)
|
||||
}
|
||||
// TODO #KAI add sql files???
|
||||
}
|
||||
|
||||
func sanitiseTableName(strToEscape string) string {
|
||||
|
||||
@@ -1,128 +0,0 @@
|
||||
package interactive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/c-bata/go-prompt"
|
||||
"github.com/turbot/go-kit/helpers"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func (c *InteractiveClient) initialiseSuggestionsLegacy() {
|
||||
c.initialiseQuerySuggestionsLegacy()
|
||||
c.initialiseTableSuggestionsLegacy()
|
||||
}
|
||||
|
||||
func (c *InteractiveClient) initialiseQuerySuggestionsLegacy() {
|
||||
var res []prompt.Suggest
|
||||
|
||||
workspaceModName := c.initData.Workspace.Mod.Name()
|
||||
resourceFunc := func(item modconfig.HclResource) (continueWalking bool, err error) {
|
||||
continueWalking = true
|
||||
|
||||
qp, ok := item.(modconfig.QueryProvider)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
modTreeItem, ok := item.(modconfig.ModTreeItem)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if qp.GetQuery() == nil && qp.GetSQL() == nil {
|
||||
return
|
||||
}
|
||||
rm := item.(modconfig.ResourceWithMetadata)
|
||||
if rm.IsAnonymous() {
|
||||
return
|
||||
}
|
||||
isLocal := modTreeItem.GetMod().Name() == workspaceModName
|
||||
itemType := item.BlockType()
|
||||
// only include global inputs
|
||||
if itemType == modconfig.BlockTypeInput {
|
||||
if _, ok := c.initData.Workspace.Mod.ResourceMaps.GlobalDashboardInputs[item.Name()]; !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
// special case for query
|
||||
if itemType == modconfig.BlockTypeQuery {
|
||||
itemType = "named query"
|
||||
}
|
||||
name := qp.Name()
|
||||
if isLocal {
|
||||
name = qp.GetUnqualifiedName()
|
||||
}
|
||||
|
||||
res = append(res, c.newSuggestion(itemType, qp.GetDescription(), name))
|
||||
return
|
||||
}
|
||||
|
||||
c.workspace().GetResourceMaps().WalkResources(resourceFunc)
|
||||
|
||||
// sort the suggestions
|
||||
sort.Slice(res, func(i, j int) bool {
|
||||
return res[i].Text < res[j].Text
|
||||
})
|
||||
c.suggestions.unqualifiedQueries = res
|
||||
}
|
||||
|
||||
// initialiseTableSuggestions build a list of schema and table querySuggestions
|
||||
func (c *InteractiveClient) initialiseTableSuggestionsLegacy() {
|
||||
|
||||
if c.schemaMetadata == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// schema names
|
||||
var schemasToAdd []string
|
||||
// unqualified table names - initialise to the introspection table names
|
||||
var unqualifiedTablesToAddMap = make(map[string]struct{})
|
||||
var unqualifiedTablesToAdd []string
|
||||
|
||||
// keep track of which plugins we have added unqualified tables for
|
||||
//pluginSchemaMap := map[string]bool{}
|
||||
|
||||
for schemaName, schemaDetails := range c.schemaMetadata.Schemas {
|
||||
// fully qualified table names
|
||||
var qualifiedTablesToAdd []string
|
||||
isTemporarySchema := schemaName == c.schemaMetadata.TemporarySchemaName
|
||||
|
||||
// add the schema into the list of schema
|
||||
if !isTemporarySchema {
|
||||
schemasToAdd = append(schemasToAdd, schemaName)
|
||||
}
|
||||
|
||||
// add qualified names of all tables
|
||||
for tableName := range schemaDetails {
|
||||
if !isTemporarySchema {
|
||||
|
||||
qualifiedTablesToAdd = append(qualifiedTablesToAdd, fmt.Sprintf("%s.%s", schemaName, sanitiseTableName(tableName)))
|
||||
|
||||
if helpers.StringSliceContains(c.client().GetRequiredSessionSearchPath(), schemaName) {
|
||||
unqualifiedTablesToAddMap[tableName] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(qualifiedTablesToAdd)
|
||||
var tableSuggestions []prompt.Suggest
|
||||
for _, t := range qualifiedTablesToAdd {
|
||||
tableSuggestions = append(tableSuggestions, prompt.Suggest{Text: t, Description: "Table", Output: sanitiseTableName(t)})
|
||||
}
|
||||
c.suggestions.tablesBySchema[schemaName] = tableSuggestions
|
||||
}
|
||||
|
||||
sort.Strings(schemasToAdd)
|
||||
for _, schema := range schemasToAdd {
|
||||
// we don't need to escape schema names, since schema names are derived from connection names
|
||||
// which are validated so that we don't end up with names which need it
|
||||
c.suggestions.schemas = append(c.suggestions.schemas, prompt.Suggest{Text: schema, Description: "Schema", Output: schema})
|
||||
}
|
||||
|
||||
unqualifiedTablesToAdd = maps.Keys(unqualifiedTablesToAddMap)
|
||||
sort.Strings(unqualifiedTablesToAdd)
|
||||
for _, table := range unqualifiedTablesToAdd {
|
||||
c.suggestions.unqualifiedTables = append(c.suggestions.unqualifiedTables, prompt.Suggest{Text: table, Description: "Table", Output: sanitiseTableName(table)})
|
||||
}
|
||||
}
|
||||
@@ -6,13 +6,10 @@ import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/turbot/go-kit/helpers"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/db/db_common"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/statushooks"
|
||||
"github.com/turbot/steampipe/pkg/workspace"
|
||||
)
|
||||
|
||||
// init data has arrived, handle any errors/warnings/messages
|
||||
@@ -46,12 +43,6 @@ func (c *InteractiveClient) handleInitResult(ctx context.Context, initResult *db
|
||||
// initialise autocomplete suggestions
|
||||
//nolint:golint,errcheck // worst case is we won't have autocomplete - this is not a failure
|
||||
c.initialiseSuggestions(ctx)
|
||||
// tell the workspace to reset the prompt after displaying async filewatcher messages
|
||||
c.initData.Workspace.SetOnFileWatcherEventMessages(func() {
|
||||
//nolint:golint,errcheck // worst case is we won't have autocomplete - this is not a failure
|
||||
c.initialiseSuggestions(ctx)
|
||||
c.interactivePrompt.Render()
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
@@ -123,14 +114,7 @@ func (c *InteractiveClient) readInitDataStream(ctx context.Context) {
|
||||
log.Printf("[TRACE] SetupWatcher")
|
||||
|
||||
statushooks.SetStatus(ctx, "Start file watcher…")
|
||||
// start the workspace file watcher
|
||||
if viper.GetBool(constants.ArgWatch) {
|
||||
// provide an explicit error handler which re-renders the prompt after displaying the error
|
||||
if err := c.initData.Workspace.SetupWatcher(ctx, c.initData.Client, c.workspaceWatcherErrorHandler); err != nil {
|
||||
c.initData.Result.Error = err
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
statushooks.SetStatus(ctx, "Start notifications listener…")
|
||||
log.Printf("[TRACE] Start notifications listener")
|
||||
|
||||
@@ -170,14 +154,6 @@ func (c *InteractiveClient) waitForInitData(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// return the workspace, or nil if not yet initialised
|
||||
func (c *InteractiveClient) workspace() *workspace.Workspace {
|
||||
if c.initData == nil {
|
||||
return nil
|
||||
}
|
||||
return c.initData.Workspace
|
||||
}
|
||||
|
||||
// return the client, or nil if not yet initialised
|
||||
func (c *InteractiveClient) client() db_common.Client {
|
||||
if c.initData == nil {
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type ChangeOperation int
|
||||
|
||||
const (
|
||||
Insert ChangeOperation = iota
|
||||
Delete
|
||||
Replace
|
||||
)
|
||||
|
||||
type Change struct {
|
||||
Content []byte
|
||||
Operation ChangeOperation
|
||||
OffsetStart int
|
||||
OffsetEnd int
|
||||
}
|
||||
|
||||
type ChangeSet []*Change
|
||||
|
||||
func EmptyChangeSet() ChangeSet { return ChangeSet{} }
|
||||
|
||||
// MergeChangeSet creates a ChangeSet by merging the given ChangeSets in order
|
||||
func MergeChangeSet(changeSets ...ChangeSet) ChangeSet {
|
||||
changeSet := ChangeSet{}
|
||||
for _, cs := range changeSets {
|
||||
changeSet = append(changeSet, cs...)
|
||||
}
|
||||
return changeSet
|
||||
}
|
||||
|
||||
// NewChangeSet creates a ChangeSet from the given changes
|
||||
func NewChangeSet(changes ...*Change) ChangeSet {
|
||||
return ChangeSet(changes)
|
||||
}
|
||||
|
||||
func (c ChangeSet) SortByOffset() {
|
||||
// sort the changes into descending order of byte offset
|
||||
// this way, when a change is applied, even if it's replacement
|
||||
// does not have the exact same bytes, we don't lose the offset information
|
||||
// of the changes preceeding it
|
||||
sort.Slice(c, func(i, j int) bool {
|
||||
return c[i].OffsetStart > c[j].OffsetStart
|
||||
})
|
||||
}
|
||||
|
||||
type OperatorFunc func(*Change, []byte) []byte
|
||||
|
||||
type ByteSequence struct {
|
||||
operators map[ChangeOperation]OperatorFunc
|
||||
_underlying []byte
|
||||
}
|
||||
|
||||
func NewByteSequence(b []byte) *ByteSequence {
|
||||
byteSequence := new(ByteSequence)
|
||||
byteSequence._underlying = make([]byte, len(b))
|
||||
copy(byteSequence._underlying, b)
|
||||
|
||||
byteSequence.operators = map[ChangeOperation]OperatorFunc{
|
||||
Insert: insert,
|
||||
Delete: clear,
|
||||
Replace: replace,
|
||||
}
|
||||
|
||||
return byteSequence
|
||||
}
|
||||
|
||||
func (b *ByteSequence) ApplyChanges(changeSet ChangeSet) {
|
||||
changeSet.SortByOffset()
|
||||
for _, change := range changeSet {
|
||||
operation := change.Operation
|
||||
if operator, ok := b.operators[operation]; ok {
|
||||
b._underlying = operator(change, b._underlying)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply applies the given function on the byte sequence
|
||||
func (bseq *ByteSequence) Apply(apply func([]byte) []byte) {
|
||||
bseq._underlying = apply(bseq._underlying)
|
||||
}
|
||||
|
||||
// Bytes returns the current underlying byte sequence
|
||||
func (bseq *ByteSequence) Bytes() []byte {
|
||||
return bseq._underlying
|
||||
}
|
||||
|
||||
func clear(change *Change, source []byte) []byte {
|
||||
left := source[:change.OffsetStart]
|
||||
right := source[change.OffsetEnd:]
|
||||
return append(left, right...)
|
||||
}
|
||||
|
||||
func insert(change *Change, source []byte) []byte {
|
||||
left := source[:change.OffsetStart]
|
||||
right := source[change.OffsetStart:]
|
||||
// prepend the content before the right part
|
||||
right = append(change.Content, right...)
|
||||
return append(left, right...)
|
||||
}
|
||||
|
||||
func replace(change *Change, source []byte) []byte {
|
||||
return insert(change, clear(change, source))
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/config"
|
||||
"github.com/go-git/go-git/v5/storage/memory"
|
||||
)
|
||||
|
||||
func getGitUrl(modName string) string {
|
||||
return fmt.Sprintf("https://%s", modName)
|
||||
}
|
||||
|
||||
func getTags(repo string) ([]string, error) {
|
||||
// Create the remote with repository URL
|
||||
rem := git.NewRemote(memory.NewStorage(), &config.RemoteConfig{
|
||||
Name: "origin",
|
||||
URLs: []string{repo},
|
||||
})
|
||||
|
||||
// load remote references
|
||||
refs, err := rem.List(&git.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// filters the references list and only keeps tags
|
||||
var tags []string
|
||||
for _, ref := range refs {
|
||||
if ref.Name().IsTag() {
|
||||
tags = append(tags, ref.Name().Short())
|
||||
}
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func getTagVersionsFromGit(repo string, includePrerelease bool) (semver.Collection, error) {
|
||||
tags, err := getTags(repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versions := make(semver.Collection, len(tags))
|
||||
// handle index manually as we may not add all tags - if we cannot parse them as a version
|
||||
idx := 0
|
||||
for _, raw := range tags {
|
||||
v, err := semver.NewVersion(raw)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !includePrerelease && v.Metadata() != "" || v.Prerelease() != "" {
|
||||
continue
|
||||
}
|
||||
versions[idx] = v
|
||||
idx++
|
||||
}
|
||||
// shrink slice
|
||||
versions = versions[:idx]
|
||||
|
||||
// sort the versions in REVERSE order
|
||||
sort.Sort(sort.Reverse(versions))
|
||||
return versions, nil
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/turbot/steampipe/pkg/versionhelpers"
|
||||
)
|
||||
|
||||
func getVersionSatisfyingConstraint(constraint *versionhelpers.Constraints, availableVersions []*semver.Version) *semver.Version {
|
||||
// search the reverse sorted versions, finding the highest version which satisfies ALL constraints
|
||||
for _, version := range availableVersions {
|
||||
if constraint.Check(version) {
|
||||
return version
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/turbot/go-kit/helpers"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
)
|
||||
|
||||
func InstallWorkspaceDependencies(ctx context.Context, opts *InstallOpts) (_ *InstallData, err error) {
|
||||
utils.LogTime("cmd.InstallWorkspaceDependencies")
|
||||
defer func() {
|
||||
utils.LogTime("cmd.InstallWorkspaceDependencies end")
|
||||
if r := recover(); r != nil {
|
||||
error_helpers.ShowError(ctx, helpers.ToError(r))
|
||||
}
|
||||
}()
|
||||
|
||||
// install workspace dependencies
|
||||
installer, err := NewModInstaller(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := installer.InstallWorkspaceDependencies(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return installer.installData, nil
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/versionmap"
|
||||
"github.com/turbot/steampipe/pkg/versionhelpers"
|
||||
"github.com/xlab/treeprint"
|
||||
)
|
||||
|
||||
type InstallData struct {
|
||||
// record of the full dependency tree
|
||||
Lock *versionmap.WorkspaceLock
|
||||
NewLock *versionmap.WorkspaceLock
|
||||
|
||||
// ALL the available versions for each dependency mod(we populate this in a lazy fashion)
|
||||
allAvailable versionmap.VersionListMap
|
||||
|
||||
// list of dependencies installed by recent install operation
|
||||
Installed versionmap.DependencyVersionMap
|
||||
// list of dependencies which have been upgraded
|
||||
Upgraded versionmap.DependencyVersionMap
|
||||
// list of dependencies which have been downgraded
|
||||
Downgraded versionmap.DependencyVersionMap
|
||||
// list of dependencies which have been uninstalled
|
||||
Uninstalled versionmap.DependencyVersionMap
|
||||
WorkspaceMod *modconfig.Mod
|
||||
}
|
||||
|
||||
func NewInstallData(workspaceLock *versionmap.WorkspaceLock, workspaceMod *modconfig.Mod) *InstallData {
|
||||
return &InstallData{
|
||||
Lock: workspaceLock,
|
||||
WorkspaceMod: workspaceMod,
|
||||
NewLock: versionmap.EmptyWorkspaceLock(workspaceLock),
|
||||
allAvailable: make(versionmap.VersionListMap),
|
||||
Installed: make(versionmap.DependencyVersionMap),
|
||||
Upgraded: make(versionmap.DependencyVersionMap),
|
||||
Downgraded: make(versionmap.DependencyVersionMap),
|
||||
Uninstalled: make(versionmap.DependencyVersionMap),
|
||||
}
|
||||
}
|
||||
|
||||
// onModInstalled is called when a dependency is satisfied by installing a mod version
|
||||
func (d *InstallData) onModInstalled(dependency *ResolvedModRef, modDef *modconfig.Mod, parent *modconfig.Mod) {
|
||||
parentPath := parent.GetInstallCacheKey()
|
||||
// get the constraint from the parent (it must be there)
|
||||
modVersionConstraint := parent.Require.GetModDependency(dependency.Name).Constraint.Original
|
||||
|
||||
// update lock
|
||||
d.NewLock.InstallCache.Add(dependency.Name, modDef.ShortName, modDef.Version, modVersionConstraint, parentPath)
|
||||
}
|
||||
|
||||
// addExisting is called when a dependency is satisfied by a mod which is already installed
|
||||
func (d *InstallData) addExisting(dependencyName string, existingDep *modconfig.Mod, constraint *versionhelpers.Constraints, parent *modconfig.Mod) {
|
||||
// update lock
|
||||
parentPath := parent.GetInstallCacheKey()
|
||||
d.NewLock.InstallCache.Add(dependencyName, existingDep.ShortName, existingDep.Version, constraint.Original, parentPath)
|
||||
}
|
||||
|
||||
// retrieve all available mod versions from our cache, or from Git if not yet cached
|
||||
func (d *InstallData) getAvailableModVersions(modName string, includePrerelease bool) ([]*semver.Version, error) {
|
||||
// have we already loaded the versions for this mod
|
||||
availableVersions, ok := d.allAvailable[modName]
|
||||
if ok {
|
||||
return availableVersions, nil
|
||||
}
|
||||
// so we have not cached this yet - retrieve from Git
|
||||
var err error
|
||||
availableVersions, err = getTagVersionsFromGit(getGitUrl(modName), includePrerelease)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve version data from Git URL '%s'", modName)
|
||||
}
|
||||
// update our cache
|
||||
d.allAvailable[modName] = availableVersions
|
||||
|
||||
return availableVersions, nil
|
||||
}
|
||||
|
||||
// update the lock with the NewLock and dtermine if any mods have been uninstalled
|
||||
func (d *InstallData) onInstallComplete() {
|
||||
d.Installed = d.NewLock.InstallCache.GetMissingFromOther(d.Lock.InstallCache)
|
||||
d.Uninstalled = d.Lock.InstallCache.GetMissingFromOther(d.NewLock.InstallCache)
|
||||
d.Upgraded = d.Lock.InstallCache.GetUpgradedInOther(d.NewLock.InstallCache)
|
||||
d.Downgraded = d.Lock.InstallCache.GetDowngradedInOther(d.NewLock.InstallCache)
|
||||
d.Lock = d.NewLock
|
||||
}
|
||||
|
||||
func (d *InstallData) GetUpdatedTree() treeprint.Tree {
|
||||
return d.Upgraded.GetDependencyTree(d.WorkspaceMod.GetInstallCacheKey())
|
||||
}
|
||||
|
||||
func (d *InstallData) GetInstalledTree() treeprint.Tree {
|
||||
return d.Installed.GetDependencyTree(d.WorkspaceMod.GetInstallCacheKey())
|
||||
}
|
||||
|
||||
func (d *InstallData) GetUninstalledTree() treeprint.Tree {
|
||||
return d.Uninstalled.GetDependencyTree(d.WorkspaceMod.GetInstallCacheKey())
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
type InstallOpts struct {
|
||||
WorkspaceMod *modconfig.Mod
|
||||
Command string
|
||||
ModArgs []string
|
||||
DryRun bool
|
||||
Force bool
|
||||
}
|
||||
|
||||
func NewInstallOpts(workspaceMod *modconfig.Mod, modsToInstall ...string) *InstallOpts {
|
||||
cmdName := viper.Get(constants.ConfigKeyActiveCommand).(*cobra.Command).Name()
|
||||
opts := &InstallOpts{
|
||||
WorkspaceMod: workspaceMod,
|
||||
DryRun: viper.GetBool(constants.ArgDryRun),
|
||||
Force: viper.GetBool(constants.ArgForce),
|
||||
ModArgs: modsToInstall,
|
||||
Command: cmdName,
|
||||
}
|
||||
return opts
|
||||
}
|
||||
@@ -1,599 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
git "github.com/go-git/go-git/v5"
|
||||
"github.com/otiai10/copy"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/turbot/pipe-fittings/plugin"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe-plugin-sdk/v5/sperr"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/filepaths"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/parse"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/versionmap"
|
||||
)
|
||||
|
||||
type ModInstaller struct {
|
||||
installData *InstallData
|
||||
|
||||
// this will be updated as changes are made to dependencies
|
||||
workspaceMod *modconfig.Mod
|
||||
|
||||
// since changes are made to workspaceMod, we need a copy of the Require as is on disk
|
||||
// to be able to calculate changes
|
||||
oldRequire *modconfig.Require
|
||||
|
||||
// installed plugins
|
||||
installedPlugins map[string]*plugin.PluginVersionString
|
||||
|
||||
mods versionmap.VersionConstraintMap
|
||||
|
||||
// the final resting place of all dependency mods
|
||||
modsPath string
|
||||
// a shadow directory for installing mods
|
||||
// this is necessary to make mod installation transactional
|
||||
shadowDirPath string
|
||||
|
||||
workspacePath string
|
||||
|
||||
// what command is being run
|
||||
command string
|
||||
// are dependencies being added to the workspace
|
||||
dryRun bool
|
||||
// do we force install even if there are require errors
|
||||
force bool
|
||||
}
|
||||
|
||||
func NewModInstaller(ctx context.Context, opts *InstallOpts) (*ModInstaller, error) {
|
||||
if opts.WorkspaceMod == nil {
|
||||
return nil, sperr.New("no workspace mod passed to mod installer")
|
||||
}
|
||||
i := &ModInstaller{
|
||||
workspacePath: opts.WorkspaceMod.ModPath,
|
||||
workspaceMod: opts.WorkspaceMod,
|
||||
command: opts.Command,
|
||||
dryRun: opts.DryRun,
|
||||
force: opts.Force,
|
||||
}
|
||||
|
||||
if opts.WorkspaceMod.Require != nil {
|
||||
i.oldRequire = opts.WorkspaceMod.Require.Clone()
|
||||
}
|
||||
|
||||
if err := i.setModsPath(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
installedPlugins, err := plugin.GetInstalledPlugins(ctx, steampipeconfig.GlobalConfig.PluginVersions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.installedPlugins = installedPlugins
|
||||
|
||||
// load lock file
|
||||
workspaceLock, err := versionmap.LoadWorkspaceLock(ctx, i.workspacePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create install data
|
||||
i.installData = NewInstallData(workspaceLock, i.workspaceMod)
|
||||
|
||||
// parse args to get the required mod versions
|
||||
requiredMods, err := i.GetRequiredModVersionsFromArgs(opts.ModArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.mods = requiredMods
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (i *ModInstaller) removeOldShadowDirectories() error {
|
||||
removeErrors := []error{}
|
||||
// get the parent of the 'mods' directory - all shadow directories are siblings of this
|
||||
parent := filepath.Base(i.modsPath)
|
||||
entries, err := os.ReadDir(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, dir := range entries {
|
||||
if dir.IsDir() && filepaths.IsModInstallShadowPath(dir.Name()) {
|
||||
err := os.RemoveAll(filepath.Join(parent, dir.Name()))
|
||||
if err != nil {
|
||||
removeErrors = append(removeErrors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return error_helpers.CombineErrors(removeErrors...)
|
||||
}
|
||||
|
||||
func (i *ModInstaller) setModsPath() error {
|
||||
i.modsPath = filepaths.WorkspaceModPath(i.workspacePath)
|
||||
_ = i.removeOldShadowDirectories()
|
||||
i.shadowDirPath = filepaths.WorkspaceModShadowPath(i.workspacePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *ModInstaller) UninstallWorkspaceDependencies(ctx context.Context) error {
|
||||
workspaceMod := i.workspaceMod
|
||||
|
||||
// remove required dependencies from the mod file
|
||||
if len(i.mods) == 0 {
|
||||
workspaceMod.RemoveAllModDependencies()
|
||||
|
||||
} else {
|
||||
// verify all the mods specifed in the args exist in the modfile
|
||||
workspaceMod.RemoveModDependencies(i.mods)
|
||||
}
|
||||
|
||||
// uninstall by calling Install
|
||||
if err := i.installMods(ctx, workspaceMod.Require.Mods, workspaceMod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if workspaceMod.Require.Empty() {
|
||||
workspaceMod.Require = nil
|
||||
}
|
||||
|
||||
// if this is a dry run, return now
|
||||
if i.dryRun {
|
||||
log.Printf("[TRACE] UninstallWorkspaceDependencies - dry-run=true, returning before saving mod file and cache\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// write the lock file
|
||||
if err := i.installData.Lock.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// now safe to save the mod file
|
||||
if err := i.updateModFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// tidy unused mods
|
||||
if viper.GetBool(constants.ArgPrune) {
|
||||
if _, err := i.Prune(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InstallWorkspaceDependencies installs all dependencies of the workspace mod
|
||||
func (i *ModInstaller) InstallWorkspaceDependencies(ctx context.Context) (err error) {
|
||||
workspaceMod := i.workspaceMod
|
||||
defer func() {
|
||||
if err != nil && i.force {
|
||||
// suppress the error since this is a forced install
|
||||
log.Println("[TRACE] suppressing error in InstallWorkspaceDependencies because force is enabled", err)
|
||||
err = nil
|
||||
}
|
||||
// tidy unused mods
|
||||
// (put in defer so it still gets called in case of errors)
|
||||
if viper.GetBool(constants.ArgPrune) && !i.dryRun {
|
||||
// be sure not to overwrite an existing return error
|
||||
_, pruneErr := i.Prune()
|
||||
if pruneErr != nil && err == nil {
|
||||
err = pruneErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if validationErrors := workspaceMod.ValidateRequirements(i.installedPlugins); len(validationErrors) > 0 {
|
||||
if !i.force {
|
||||
// if this is not a force install, return errors in validation
|
||||
return error_helpers.CombineErrors(validationErrors...)
|
||||
}
|
||||
// ignore if this is a force install
|
||||
// TODO: raise warnings for errors getting suppressed [https://github.com/turbot/steampipe/issues/3364]
|
||||
log.Println("[TRACE] suppressing mod validation error", validationErrors)
|
||||
}
|
||||
|
||||
// if mod args have been provided, add them to the workspace mod requires
|
||||
// (this will replace any existing dependencies of same name)
|
||||
if len(i.mods) > 0 {
|
||||
workspaceMod.AddModDependencies(i.mods)
|
||||
}
|
||||
|
||||
if err := i.installMods(ctx, workspaceMod.Require.Mods, workspaceMod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if this is a dry run, return now
|
||||
if i.dryRun {
|
||||
log.Printf("[TRACE] InstallWorkspaceDependencies - dry-run=true, returning before saving mod file and cache\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// write the lock file
|
||||
if err := i.installData.Lock.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// now safe to save the mod file
|
||||
if err := i.updateModFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !workspaceMod.HasDependentMods() {
|
||||
// there are no dependencies - delete the cache
|
||||
i.installData.Lock.Delete()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *ModInstaller) GetModList() string {
|
||||
return i.installData.Lock.GetModList(i.workspaceMod.GetInstallCacheKey())
|
||||
}
|
||||
|
||||
// commitShadow recursively copies over the contents of the shadow directory
|
||||
// to the mods directory, replacing conflicts as it goes
|
||||
// (uses `os.Create(dest)` under the hood - which truncates the target)
|
||||
func (i *ModInstaller) commitShadow(ctx context.Context) error {
|
||||
if error_helpers.IsContextCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
if _, err := os.Stat(i.shadowDirPath); os.IsNotExist(err) {
|
||||
// nothing to do here
|
||||
// there's no shadow directory to commit
|
||||
// this is not an error and may happen when install does not make any changes
|
||||
return nil
|
||||
}
|
||||
entries, err := os.ReadDir(i.shadowDirPath)
|
||||
if err != nil {
|
||||
return sperr.WrapWithRootMessage(err, "could not read shadow directory")
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
source := filepath.Join(i.shadowDirPath, entry.Name())
|
||||
destination := filepath.Join(i.modsPath, entry.Name())
|
||||
log.Println("[TRACE] copying", source, destination)
|
||||
if err := copy.Copy(source, destination); err != nil {
|
||||
return sperr.WrapWithRootMessage(err, "could not commit shadow directory '%s'", entry.Name())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *ModInstaller) shouldCommitShadow(ctx context.Context, installError error) bool {
|
||||
// no commit if this is a dry run
|
||||
if i.dryRun {
|
||||
return false
|
||||
}
|
||||
// commit if this is forced - even if there's errors
|
||||
return installError == nil || i.force
|
||||
}
|
||||
|
||||
func (i *ModInstaller) installMods(ctx context.Context, mods []*modconfig.ModVersionConstraint, parent *modconfig.Mod) (err error) {
|
||||
defer func() {
|
||||
var commitErr error
|
||||
if i.shouldCommitShadow(ctx, err) {
|
||||
commitErr = i.commitShadow(ctx)
|
||||
}
|
||||
|
||||
// if this was forced, we need to suppress the install error
|
||||
// otherwise the calling code will fail
|
||||
if i.force {
|
||||
err = nil
|
||||
}
|
||||
|
||||
// ensure we return any commit error
|
||||
if commitErr != nil {
|
||||
err = commitErr
|
||||
}
|
||||
|
||||
// force remove the shadow directory - we can ignore any error here, since
|
||||
// these directories get cleaned up before any install session
|
||||
os.RemoveAll(i.shadowDirPath)
|
||||
}()
|
||||
|
||||
var errors []error
|
||||
for _, requiredModVersion := range mods {
|
||||
modToUse, err := i.getCurrentlyInstalledVersionToUse(ctx, requiredModVersion, parent, i.updating())
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// if the mod is not installed or needs updating, OR if this is an update command,
|
||||
// pass shouldUpdate=true into installModDependencesRecursively
|
||||
// this ensures that we update any dependencies which have updates available
|
||||
shouldUpdate := modToUse == nil || i.updating()
|
||||
if err := i.installModDependencesRecursively(ctx, requiredModVersion, modToUse, parent, shouldUpdate); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
// update the lock to be the new lock, and record any uninstalled mods
|
||||
i.installData.onInstallComplete()
|
||||
|
||||
return i.buildInstallError(errors)
|
||||
}
|
||||
|
||||
func (i *ModInstaller) buildInstallError(errors []error) error {
|
||||
if len(errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
verb := "install"
|
||||
if i.updating() {
|
||||
verb = "update"
|
||||
}
|
||||
prefix := fmt.Sprintf("%d %s failed to %s", len(errors), utils.Pluralize("dependency", len(errors)), verb)
|
||||
err := error_helpers.CombineErrorsWithPrefix(prefix, errors...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *ModInstaller) installModDependencesRecursively(ctx context.Context, requiredModVersion *modconfig.ModVersionConstraint, dependencyMod *modconfig.Mod, parent *modconfig.Mod, shouldUpdate bool) error {
|
||||
if error_helpers.IsContextCanceled(ctx) {
|
||||
// short circuit if the execution context has been cancelled
|
||||
return ctx.Err()
|
||||
}
|
||||
// get available versions for this mod
|
||||
includePrerelease := requiredModVersion.Constraint.IsPrerelease()
|
||||
availableVersions, err := i.installData.getAvailableModVersions(requiredModVersion.Name, includePrerelease)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var errors []error
|
||||
|
||||
if dependencyMod == nil {
|
||||
// get a resolved mod ref that satisfies the version constraints
|
||||
resolvedRef, err := i.getModRefSatisfyingConstraints(requiredModVersion, availableVersions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// install the mod
|
||||
dependencyMod, err = i.install(ctx, resolvedRef, parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
validationErrors := dependencyMod.ValidateRequirements(i.installedPlugins)
|
||||
errors = append(errors, validationErrors...)
|
||||
} else {
|
||||
// update the install data
|
||||
i.installData.addExisting(requiredModVersion.Name, dependencyMod, requiredModVersion.Constraint, parent)
|
||||
log.Printf("[TRACE] not installing %s with version constraint %s as version %s is already installed", requiredModVersion.Name, requiredModVersion.Constraint.Original, dependencyMod.Version)
|
||||
}
|
||||
|
||||
// to get here we have the dependency mod - either we installed it or it was already installed
|
||||
// recursively install its dependencies
|
||||
for _, childDependency := range dependencyMod.Require.Mods {
|
||||
childDependencyMod, err := i.getCurrentlyInstalledVersionToUse(ctx, childDependency, dependencyMod, shouldUpdate)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
if err := i.installModDependencesRecursively(ctx, childDependency, childDependencyMod, dependencyMod, shouldUpdate); err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return error_helpers.CombineErrorsWithPrefix(fmt.Sprintf("%d child %s failed to install", len(errors), utils.Pluralize("dependency", len(errors))), errors...)
|
||||
}
|
||||
|
||||
func (i *ModInstaller) getCurrentlyInstalledVersionToUse(ctx context.Context, requiredModVersion *modconfig.ModVersionConstraint, parent *modconfig.Mod, forceUpdate bool) (*modconfig.Mod, error) {
|
||||
// do we have an installed version of this mod matching the required mod constraint
|
||||
installedVersion, err := i.installData.Lock.GetLockedModVersion(requiredModVersion, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if installedVersion == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// can we update this
|
||||
canUpdate, err := i.canUpdateMod(installedVersion, requiredModVersion, forceUpdate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
if canUpdate {
|
||||
// return nil mod to indicate we should update
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// load the existing mod and return
|
||||
return i.loadDependencyMod(ctx, installedVersion)
|
||||
}
|
||||
|
||||
// loadDependencyMod tries to load the mod definition from the shadow directory
|
||||
// and falls back to the 'mods' directory of the root mod
|
||||
func (i *ModInstaller) loadDependencyMod(ctx context.Context, modVersion *versionmap.ResolvedVersionConstraint) (*modconfig.Mod, error) {
|
||||
// construct the dependency path - this is the relative path of the dependency we are installing
|
||||
dependencyPath := modVersion.DependencyPath()
|
||||
|
||||
// first try loading from the shadow dir
|
||||
modDefinition, err := i.loadDependencyModFromRoot(ctx, i.shadowDirPath, dependencyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// failed to load from shadow dir, try mods dir
|
||||
if modDefinition == nil {
|
||||
modDefinition, err = i.loadDependencyModFromRoot(ctx, i.modsPath, dependencyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// if we still failed, give up
|
||||
if modDefinition == nil {
|
||||
return nil, fmt.Errorf("could not find dependency mod '%s'", dependencyPath)
|
||||
}
|
||||
|
||||
// set the DependencyName, DependencyPath and Version properties on the mod
|
||||
if err := i.setModDependencyConfig(modDefinition, dependencyPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return modDefinition, nil
|
||||
}
|
||||
|
||||
func (i *ModInstaller) loadDependencyModFromRoot(ctx context.Context, modInstallRoot string, dependencyPath string) (*modconfig.Mod, error) {
|
||||
log.Printf("[TRACE] loadDependencyModFromRoot: trying to load %s from root %s", dependencyPath, modInstallRoot)
|
||||
|
||||
modPath := path.Join(modInstallRoot, dependencyPath)
|
||||
modDefinition, err := parse.LoadModfile(modPath)
|
||||
if err != nil {
|
||||
return nil, sperr.WrapWithMessage(err, "failed to load mod definition for %s from %s", dependencyPath, modInstallRoot)
|
||||
}
|
||||
return modDefinition, nil
|
||||
}
|
||||
|
||||
// determine if we should update this mod, and if so whether there is an update available
|
||||
func (i *ModInstaller) canUpdateMod(installedVersion *versionmap.ResolvedVersionConstraint, requiredModVersion *modconfig.ModVersionConstraint, forceUpdate bool) (bool, error) {
|
||||
// so should we update?
|
||||
// if forceUpdate is set or if the required version constraint is different to the locked version constraint, update
|
||||
isSatisfied, errs := requiredModVersion.Constraint.Validate(installedVersion.Version)
|
||||
if len(errs) > 0 {
|
||||
return false, error_helpers.CombineErrors(errs...)
|
||||
}
|
||||
if forceUpdate || !isSatisfied {
|
||||
// get available versions for this mod
|
||||
includePrerelease := requiredModVersion.Constraint.IsPrerelease()
|
||||
availableVersions, err := i.installData.getAvailableModVersions(requiredModVersion.Name, includePrerelease)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return i.updateAvailable(requiredModVersion, installedVersion.Version, availableVersions)
|
||||
}
|
||||
return false, nil
|
||||
|
||||
}
|
||||
|
||||
// determine whether there is a newer mod version avoilable which satisfies the dependency version constraint
|
||||
func (i *ModInstaller) updateAvailable(requiredVersion *modconfig.ModVersionConstraint, currentVersion *semver.Version, availableVersions []*semver.Version) (bool, error) {
|
||||
latestVersion, err := i.getModRefSatisfyingConstraints(requiredVersion, availableVersions)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if latestVersion.Version.GreaterThan(currentVersion) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// get the most recent available mod version which satisfies the version constraint
|
||||
func (i *ModInstaller) getModRefSatisfyingConstraints(modVersion *modconfig.ModVersionConstraint, availableVersions []*semver.Version) (*ResolvedModRef, error) {
|
||||
// find a version which satisfies the version constraint
|
||||
var version = getVersionSatisfyingConstraint(modVersion.Constraint, availableVersions)
|
||||
if version == nil {
|
||||
return nil, fmt.Errorf("no version of %s found satisfying version constraint: %s", modVersion.Name, modVersion.Constraint.Original)
|
||||
}
|
||||
|
||||
return NewResolvedModRef(modVersion, version)
|
||||
}
|
||||
|
||||
// install a mod
|
||||
func (i *ModInstaller) install(ctx context.Context, dependency *ResolvedModRef, parent *modconfig.Mod) (_ *modconfig.Mod, err error) {
|
||||
var modDef *modconfig.Mod
|
||||
// get the temp location to install the mod to
|
||||
dependencyPath := dependency.DependencyPath()
|
||||
destPath := i.getDependencyShadowPath(dependencyPath)
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
i.installData.onModInstalled(dependency, modDef, parent)
|
||||
}
|
||||
}()
|
||||
// if the target path exists, use the exiting file
|
||||
// if it does not exist (the usual case), install it
|
||||
if _, err := os.Stat(destPath); os.IsNotExist(err) {
|
||||
log.Println("[TRACE] installing", dependencyPath, "in", destPath)
|
||||
if err := i.installFromGit(dependency, destPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// now load the installed mod and return it
|
||||
modDef, err = parse.LoadModfile(destPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if modDef == nil {
|
||||
return nil, fmt.Errorf("'%s' has no mod definition file", dependencyPath)
|
||||
}
|
||||
|
||||
if !i.dryRun {
|
||||
// now the mod is installed in its final location, set mod dependency path
|
||||
if err := i.setModDependencyConfig(modDef, dependencyPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return modDef, nil
|
||||
}
|
||||
|
||||
func (i *ModInstaller) installFromGit(dependency *ResolvedModRef, installPath string) error {
|
||||
// get the mod from git
|
||||
gitUrl := getGitUrl(dependency.Name)
|
||||
log.Println("[TRACE] >>> cloning", gitUrl, dependency.GitReference)
|
||||
_, err := git.PlainClone(installPath,
|
||||
false,
|
||||
&git.CloneOptions{
|
||||
URL: gitUrl,
|
||||
ReferenceName: dependency.GitReference,
|
||||
Depth: 1,
|
||||
SingleBranch: true,
|
||||
})
|
||||
if err != nil {
|
||||
return sperr.WrapWithMessage(err, "failed to clone mod '%s' from git", dependency.Name)
|
||||
}
|
||||
// verify the cloned repo contains a valid modfile
|
||||
return i.verifyModFile(dependency, installPath)
|
||||
}
|
||||
|
||||
// build the path of the temp location to copy this depednency to
|
||||
func (i *ModInstaller) getDependencyDestPath(dependencyFullName string) string {
|
||||
return filepath.Join(i.modsPath, dependencyFullName)
|
||||
}
|
||||
|
||||
// build the path of the temp location to copy this depednency to
|
||||
func (i *ModInstaller) getDependencyShadowPath(dependencyFullName string) string {
|
||||
return filepath.Join(i.shadowDirPath, dependencyFullName)
|
||||
}
|
||||
|
||||
// set the mod dependency path
|
||||
func (i *ModInstaller) setModDependencyConfig(mod *modconfig.Mod, dependencyPath string) error {
|
||||
return mod.SetDependencyConfig(dependencyPath)
|
||||
}
|
||||
|
||||
func (i *ModInstaller) updating() bool {
|
||||
return i.command == "update"
|
||||
}
|
||||
|
||||
func (i *ModInstaller) uninstalling() bool {
|
||||
return i.command == "uninstall"
|
||||
}
|
||||
|
||||
func (i *ModInstaller) verifyModFile(dependency *ResolvedModRef, installPath string) error {
|
||||
for _, modFilePath := range filepaths.ModFilePaths(installPath) {
|
||||
_, err := os.Stat(modFilePath)
|
||||
if err == nil {
|
||||
// found the modfile
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return sperr.New("mod '%s' does not contain a valid mod file", dependency.Name)
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/versionmap"
|
||||
)
|
||||
|
||||
func (i *ModInstaller) GetRequiredModVersionsFromArgs(modsArgs []string) (versionmap.VersionConstraintMap, error) {
|
||||
var errors []error
|
||||
mods := make(versionmap.VersionConstraintMap, len(modsArgs))
|
||||
for _, modArg := range modsArgs {
|
||||
// create mod version from arg
|
||||
modVersion, err := modconfig.NewModVersionConstraint(modArg)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
// if we are updating there are a few checks we need to make
|
||||
if i.updating() {
|
||||
modVersion, err = i.getUpdateVersion(modArg, modVersion)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if i.uninstalling() {
|
||||
// it is not valid to specify a mod version for uninstall
|
||||
if modVersion.HasVersion() {
|
||||
errors = append(errors, fmt.Errorf("invalid arg '%s' - cannot specify a version when uninstalling", modArg))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
mods[modVersion.Name] = modVersion
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
return nil, error_helpers.CombineErrors(errors...)
|
||||
}
|
||||
return mods, nil
|
||||
}
|
||||
|
||||
func (i *ModInstaller) getUpdateVersion(modArg string, modVersion *modconfig.ModVersionConstraint) (*modconfig.ModVersionConstraint, error) {
|
||||
// verify the mod is already installed
|
||||
if i.installData.Lock.GetMod(modVersion.Name, i.workspaceMod) == nil {
|
||||
return nil, fmt.Errorf("cannot update '%s' as it is not installed", modArg)
|
||||
}
|
||||
|
||||
// find the current dependency with this mod name
|
||||
// - this is what we will be using, to ensure we keep the same version constraint
|
||||
currentDependency := i.workspaceMod.GetModDependency(modVersion.Name)
|
||||
if currentDependency == nil {
|
||||
return nil, fmt.Errorf("cannot update '%s' as it is not a dependency of this workspace", modArg)
|
||||
}
|
||||
|
||||
// it is not valid to specify a mod version - we will set the constraint from the modfile
|
||||
if modVersion.HasVersion() {
|
||||
return nil, fmt.Errorf("invalid arg '%s' - cannot specify a version when updating", modArg)
|
||||
}
|
||||
return currentDependency, nil
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/versionmap"
|
||||
)
|
||||
|
||||
func (i *ModInstaller) Prune() (versionmap.VersionListMap, error) {
|
||||
unusedMods := i.installData.Lock.GetUnreferencedMods()
|
||||
// now delete any mod folders which are not in the lock file
|
||||
for name, versions := range unusedMods {
|
||||
for _, version := range versions {
|
||||
depPath := i.getDependencyDestPath(modconfig.BuildModDependencyPath(name, version))
|
||||
if err := i.deleteDependencyItem(depPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return unusedMods, nil
|
||||
}
|
||||
|
||||
func (i *ModInstaller) deleteDependencyItem(depPath string) error {
|
||||
if err := os.RemoveAll(depPath); err != nil {
|
||||
return err
|
||||
}
|
||||
return i.deleteEmptyFolderTree(filepath.Dir(depPath))
|
||||
|
||||
}
|
||||
|
||||
func (i *ModInstaller) deleteEmptyFolderTree(folderPath string) error {
|
||||
// if the parent folder is empty, delete it
|
||||
err := os.Remove(folderPath)
|
||||
if err == nil {
|
||||
parent := filepath.Dir(folderPath)
|
||||
return i.deleteEmptyFolderTree(parent)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,213 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// updates the 'require' block in 'mod.sp'
|
||||
func (i *ModInstaller) updateModFile() error {
|
||||
contents, err := i.loadModFileBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldRequire := i.oldRequire
|
||||
newRequire := i.workspaceMod.Require
|
||||
|
||||
// fill these requires in with empty requires
|
||||
// so that we don't have to do nil checks everywhere
|
||||
// from here on out - if it's empty - it's nil
|
||||
|
||||
if oldRequire == nil {
|
||||
// use an empty require as the old requirements
|
||||
oldRequire = modconfig.NewRequire()
|
||||
}
|
||||
if newRequire == nil {
|
||||
// use a stub require instance
|
||||
newRequire = modconfig.NewRequire()
|
||||
}
|
||||
|
||||
changes := EmptyChangeSet()
|
||||
|
||||
if i.shouldDeleteRequireBlock(oldRequire, newRequire) {
|
||||
changes = i.buildChangeSetForRequireDelete(oldRequire, newRequire)
|
||||
} else if i.shouldCreateRequireBlock(oldRequire, newRequire) {
|
||||
changes = i.buildChangeSetForRequireCreate(oldRequire, newRequire)
|
||||
} else if !newRequire.Empty() && !oldRequire.Empty() {
|
||||
changes = i.calculateChangeSet(oldRequire, newRequire)
|
||||
}
|
||||
|
||||
if len(changes) == 0 {
|
||||
// nothing to do here
|
||||
return nil
|
||||
}
|
||||
|
||||
contents.ApplyChanges(changes)
|
||||
contents.Apply(hclwrite.Format)
|
||||
|
||||
return os.WriteFile(i.workspaceMod.FilePath(), contents.Bytes(), 0644)
|
||||
}
|
||||
|
||||
// loads the contents of the mod.sp file and wraps it with a thin wrapper
|
||||
// to assist in byte sequence manipulation
|
||||
func (i *ModInstaller) loadModFileBytes() (*ByteSequence, error) {
|
||||
modFileBytes, err := os.ReadFile(i.workspaceMod.FilePath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewByteSequence(modFileBytes), nil
|
||||
}
|
||||
|
||||
func (i *ModInstaller) shouldDeleteRequireBlock(oldRequire *modconfig.Require, newRequire *modconfig.Require) bool {
|
||||
return newRequire.Empty() && !oldRequire.Empty()
|
||||
}
|
||||
|
||||
func (i *ModInstaller) shouldCreateRequireBlock(oldRequire *modconfig.Require, newRequire *modconfig.Require) bool {
|
||||
return !newRequire.Empty() && oldRequire.Empty()
|
||||
}
|
||||
|
||||
func (i *ModInstaller) buildChangeSetForRequireDelete(oldRequire *modconfig.Require, newRequire *modconfig.Require) ChangeSet {
|
||||
return NewChangeSet(&Change{
|
||||
Operation: Delete,
|
||||
OffsetStart: oldRequire.TypeRange.Start.Byte,
|
||||
OffsetEnd: oldRequire.DeclRange.End.Byte,
|
||||
})
|
||||
}
|
||||
|
||||
func (i *ModInstaller) buildChangeSetForRequireCreate(oldRequire *modconfig.Require, newRequire *modconfig.Require) ChangeSet {
|
||||
// if the new require is not empty, but the old one is
|
||||
// add a new require block with the new stuff
|
||||
// by generating the HCL string that goes in
|
||||
f := hclwrite.NewEmptyFile()
|
||||
|
||||
var body *hclwrite.Body
|
||||
var insertOffset int
|
||||
|
||||
if oldRequire.TypeRange.Start.Byte != 0 {
|
||||
// this means that there is a require block
|
||||
// but is probably empty
|
||||
body = f.Body()
|
||||
insertOffset = oldRequire.TypeRange.End.Byte - 1
|
||||
} else {
|
||||
// we don't have a require block at all
|
||||
// let's create one to append to
|
||||
body = f.Body().AppendNewBlock("require", nil).Body()
|
||||
insertOffset = i.workspaceMod.DeclRange.End.Byte - 1
|
||||
}
|
||||
|
||||
for _, mvc := range newRequire.Mods {
|
||||
newBlock := i.createNewModRequireBlock(mvc)
|
||||
body.AppendBlock(newBlock)
|
||||
}
|
||||
|
||||
// prefix and suffix with new lines
|
||||
// this is so that we can handle empty blocks
|
||||
// which do not have newlines
|
||||
buffer := bytes.NewBuffer([]byte{'\n'})
|
||||
buffer.Write(f.Bytes())
|
||||
buffer.WriteByte('\n')
|
||||
|
||||
return NewChangeSet(&Change{
|
||||
Operation: Insert,
|
||||
OffsetStart: insertOffset,
|
||||
Content: buffer.Bytes(),
|
||||
})
|
||||
}
|
||||
|
||||
func (i *ModInstaller) calculateChangeSet(oldRequire *modconfig.Require, newRequire *modconfig.Require) ChangeSet {
|
||||
if oldRequire.Empty() && newRequire.Empty() {
|
||||
// both are empty
|
||||
// nothing to do
|
||||
return EmptyChangeSet()
|
||||
}
|
||||
// calculate the changes
|
||||
uninstallChanges := i.calcChangesForUninstall(oldRequire, newRequire)
|
||||
installChanges := i.calcChangesForInstall(oldRequire, newRequire)
|
||||
updateChanges := i.calcChangesForUpdate(oldRequire, newRequire)
|
||||
|
||||
return MergeChangeSet(
|
||||
uninstallChanges,
|
||||
installChanges,
|
||||
updateChanges,
|
||||
)
|
||||
}
|
||||
|
||||
// creates a new "mod" block which can be written as part of the "require" block in mod.sp
|
||||
func (i *ModInstaller) createNewModRequireBlock(modVersion *modconfig.ModVersionConstraint) *hclwrite.Block {
|
||||
modRequireBlock := hclwrite.NewBlock("mod", []string{modVersion.Name})
|
||||
modRequireBlock.Body().SetAttributeValue("version", cty.StringVal(modVersion.VersionString))
|
||||
return modRequireBlock
|
||||
}
|
||||
|
||||
// calculates changes required in mod.sp to reflect uninstalls
|
||||
func (i *ModInstaller) calcChangesForUninstall(oldRequire *modconfig.Require, newRequire *modconfig.Require) ChangeSet {
|
||||
changes := ChangeSet{}
|
||||
for _, requiredMod := range oldRequire.Mods {
|
||||
// check if this mod is still a dependency
|
||||
if modInNew := newRequire.GetModDependency(requiredMod.Name); modInNew == nil {
|
||||
changes = append(changes, &Change{
|
||||
Operation: Delete,
|
||||
OffsetStart: requiredMod.DefRange.Start.Byte,
|
||||
OffsetEnd: requiredMod.BodyRange.End.Byte,
|
||||
})
|
||||
}
|
||||
}
|
||||
return changes
|
||||
}
|
||||
|
||||
// calculates changes required in mod.sp to reflect new installs
|
||||
func (i *ModInstaller) calcChangesForInstall(oldRequire *modconfig.Require, newRequire *modconfig.Require) ChangeSet {
|
||||
modsToAdd := []*modconfig.ModVersionConstraint{}
|
||||
for _, requiredMod := range newRequire.Mods {
|
||||
if modInOld := oldRequire.GetModDependency(requiredMod.Name); modInOld == nil {
|
||||
modsToAdd = append(modsToAdd, requiredMod)
|
||||
}
|
||||
}
|
||||
|
||||
if len(modsToAdd) == 0 {
|
||||
// an empty changeset
|
||||
return ChangeSet{}
|
||||
}
|
||||
|
||||
// create the HCL serialization for the mod blocks which needs to be placed
|
||||
// in the require block
|
||||
f := hclwrite.NewEmptyFile()
|
||||
rootBody := f.Body()
|
||||
for _, modToAdd := range modsToAdd {
|
||||
rootBody.AppendBlock(i.createNewModRequireBlock(modToAdd))
|
||||
}
|
||||
|
||||
return ChangeSet{
|
||||
&Change{
|
||||
Operation: Insert,
|
||||
OffsetStart: oldRequire.DeclRange.End.Byte - 1,
|
||||
Content: f.Bytes(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// calculates the changes required in mod.sp to reflect updates
|
||||
func (i *ModInstaller) calcChangesForUpdate(oldRequire *modconfig.Require, newRequire *modconfig.Require) ChangeSet {
|
||||
changes := ChangeSet{}
|
||||
for _, requiredMod := range oldRequire.Mods {
|
||||
modInUpdated := newRequire.GetModDependency(requiredMod.Name)
|
||||
if modInUpdated == nil {
|
||||
continue
|
||||
}
|
||||
if modInUpdated.VersionString != requiredMod.VersionString {
|
||||
changes = append(changes, &Change{
|
||||
Operation: Replace,
|
||||
OffsetStart: requiredMod.VersionRange.Start.Byte,
|
||||
OffsetEnd: requiredMod.VersionRange.End.Byte,
|
||||
Content: []byte(fmt.Sprintf("version = \"%s\"", modInUpdated.VersionString)),
|
||||
})
|
||||
}
|
||||
}
|
||||
return changes
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
)
|
||||
|
||||
func TestModInstaller(t *testing.T) {
|
||||
cs, err := semver.NewConstraint("^3")
|
||||
v, _ := semver.NewVersion("3.1")
|
||||
res := cs.Check(v)
|
||||
fmt.Println(res)
|
||||
|
||||
fmt.Println(cs)
|
||||
fmt.Println(err)
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
package modinstaller
|
||||
|
||||
import (
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/versionhelpers"
|
||||
)
|
||||
|
||||
// ResolvedModRef is a struct to represent a resolved mod git reference
|
||||
type ResolvedModRef struct {
|
||||
// the FQN of the mod - also the Git URL of the mod repo
|
||||
Name string
|
||||
// the mod version
|
||||
Version *semver.Version
|
||||
// the vestion constraint
|
||||
Constraint *versionhelpers.Constraints
|
||||
// the Git branch/tag
|
||||
GitReference plumbing.ReferenceName
|
||||
// the file path for local mods
|
||||
FilePath string
|
||||
}
|
||||
|
||||
func NewResolvedModRef(requiredModVersion *modconfig.ModVersionConstraint, version *semver.Version) (*ResolvedModRef, error) {
|
||||
res := &ResolvedModRef{
|
||||
Name: requiredModVersion.Name,
|
||||
Version: version,
|
||||
Constraint: requiredModVersion.Constraint,
|
||||
// this may be empty strings
|
||||
FilePath: requiredModVersion.FilePath,
|
||||
}
|
||||
if res.FilePath == "" {
|
||||
res.setGitReference()
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *ResolvedModRef) setGitReference() {
|
||||
// TODO handle branches
|
||||
|
||||
// NOTE: use the original version string - this will be the tag name
|
||||
r.GitReference = plumbing.NewTagReferenceName(r.Version.Original())
|
||||
}
|
||||
|
||||
// DependencyPath returns name in the format <dependency name>@v<dependencyVersion>
|
||||
func (r *ResolvedModRef) DependencyPath() string {
|
||||
return modconfig.BuildModDependencyPath(r.Name, r.Version)
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
control "c1"{
|
||||
description = "control 1"
|
||||
query = aws_compliance.query.acm_certificate_expires_30_days
|
||||
}
|
||||
7
pkg/modinstaller/testdata/mods/dep1/mod.sp
vendored
7
pkg/modinstaller/testdata/mods/dep1/mod.sp
vendored
@@ -1,7 +0,0 @@
|
||||
mod "dep1" {
|
||||
require {
|
||||
mod "github.com/turbot/steampipe-mod-aws-compliance" {
|
||||
version = "0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
control "c1"{
|
||||
description = "control 1"
|
||||
query = m2.query.m2_q1
|
||||
}
|
||||
7
pkg/modinstaller/testdata/mods/dep2/mod.sp
vendored
7
pkg/modinstaller/testdata/mods/dep2/mod.sp
vendored
@@ -1,7 +0,0 @@
|
||||
mod "dep2" {
|
||||
require {
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m2" {
|
||||
version = "latest"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
control "c1"{
|
||||
description = "control 1"
|
||||
query = m2.query.m2_q1
|
||||
}
|
||||
10
pkg/modinstaller/testdata/mods/dep3/mod.sp
vendored
10
pkg/modinstaller/testdata/mods/dep3/mod.sp
vendored
@@ -1,10 +0,0 @@
|
||||
mod "dep3"{
|
||||
require {
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m1" {
|
||||
version = "v1.*"
|
||||
}
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m2" {
|
||||
version = "v3.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
control "c1"{
|
||||
description = "control 1"
|
||||
query = m2.query.m2_q1
|
||||
}
|
||||
10
pkg/modinstaller/testdata/mods/dep4/mod.sp
vendored
10
pkg/modinstaller/testdata/mods/dep4/mod.sp
vendored
@@ -1,10 +0,0 @@
|
||||
mod "dep4"{
|
||||
require {
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m1" {
|
||||
version = "v1.1"
|
||||
}
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m2" {
|
||||
version = "v3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
control "c1"{
|
||||
description = "control 1"
|
||||
query = m2.query.m2_q1
|
||||
}
|
||||
10
pkg/modinstaller/testdata/mods/dep5/mod.sp
vendored
10
pkg/modinstaller/testdata/mods/dep5/mod.sp
vendored
@@ -1,10 +0,0 @@
|
||||
mod "dep5"{
|
||||
require {
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m1" {
|
||||
version = "v1.*"
|
||||
}
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m2" {
|
||||
version = "v3.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
control "c1"{
|
||||
description = "control 1"
|
||||
query = m2.query.m2_q1
|
||||
}
|
||||
10
pkg/modinstaller/testdata/mods/dep6_x/mod.sp
vendored
10
pkg/modinstaller/testdata/mods/dep6_x/mod.sp
vendored
@@ -1,10 +0,0 @@
|
||||
mod "dep6"{
|
||||
require {
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m1" {
|
||||
version = "v1.*"
|
||||
}
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m2" {
|
||||
version = "v3.3"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
control "c1"{
|
||||
description = "control 1"
|
||||
query = m2.query.m2_q1
|
||||
}
|
||||
10
pkg/modinstaller/testdata/mods/dep7_x/mod.sp
vendored
10
pkg/modinstaller/testdata/mods/dep7_x/mod.sp
vendored
@@ -1,10 +0,0 @@
|
||||
mod "dep7"{
|
||||
require {
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m1" {
|
||||
version = "v2.*"
|
||||
}
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m2" {
|
||||
version = "v3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
control "c1"{
|
||||
description = "control 1"
|
||||
query = m2.query.m2_q1
|
||||
}
|
||||
10
pkg/modinstaller/testdata/mods/dep8_x/mod.sp
vendored
10
pkg/modinstaller/testdata/mods/dep8_x/mod.sp
vendored
@@ -1,10 +0,0 @@
|
||||
mod "dep8"{
|
||||
require {
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m1" {
|
||||
version = "v1.0"
|
||||
}
|
||||
mod "github.com/kaidaguerre/steampipe-mod-m2" {
|
||||
version = "v3.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
select * from aws_account;
|
||||
select * from aws_account;
|
||||
select * from aws_account;
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"github.com/hashicorp/hcl/v2/gohcl"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/turbot/pipe-fittings/hclhelpers"
|
||||
pmodconfig "github.com/turbot/pipe-fittings/modconfig"
|
||||
"github.com/turbot/pipe-fittings/parse"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"golang.org/x/exp/maps"
|
||||
@@ -54,44 +56,21 @@ func DecodeConnection(block *hcl.Block) (*modconfig.Connection, hcl.Diagnostics)
|
||||
connection.ConnectionNames = connections
|
||||
}
|
||||
|
||||
// check for nested options
|
||||
// blocks are not (currently) supported in connections
|
||||
for _, connectionBlock := range connectionContent.Blocks {
|
||||
switch connectionBlock.Type {
|
||||
case "options":
|
||||
// if we already found settings, fail
|
||||
opts, moreDiags := DecodeOptions(connectionBlock)
|
||||
if moreDiags.HasErrors() {
|
||||
diags = append(diags, moreDiags...)
|
||||
break
|
||||
}
|
||||
moreDiags = connection.SetOptions(opts, connectionBlock)
|
||||
if moreDiags.HasErrors() {
|
||||
diags = append(diags, moreDiags...)
|
||||
}
|
||||
|
||||
default:
|
||||
// this can never happen
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("connections do not support '%s' blocks", block.Type),
|
||||
Subject: hclhelpers.BlockRangePointer(connectionBlock),
|
||||
})
|
||||
}
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("connections do not support '%s' blocks", block.Type),
|
||||
Subject: hclhelpers.BlockRangePointer(connectionBlock),
|
||||
})
|
||||
}
|
||||
|
||||
// tactical - update when support for options blocks is removed
|
||||
// this needs updating to use a single block check
|
||||
// at present we do not support blocks for plugin specific connection config
|
||||
// so any blocks present in 'rest' are an error
|
||||
if hclBody, ok := rest.(*hclsyntax.Body); ok {
|
||||
for _, b := range hclBody.Blocks {
|
||||
if b.Type != "options" {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("connections do not support '%s' blocks", b.Type),
|
||||
Subject: hclhelpers.HclSyntaxBlockRangePointer(b),
|
||||
})
|
||||
}
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("connections do not support '%s' blocks", b.Type),
|
||||
Subject: hclhelpers.HclSyntaxBlockRangePointer(b),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,8 +90,8 @@ func decodeConnectionPluginProperty(connectionContent *hcl.BodyContent, connecti
|
||||
evalCtx := &hcl.EvalContext{Variables: make(map[string]cty.Value)}
|
||||
|
||||
diags := gohcl.DecodeExpression(connectionContent.Attributes["plugin"].Expr, evalCtx, &pluginName)
|
||||
res := newDecodeResult()
|
||||
res.handleDecodeDiags(diags)
|
||||
res := parse.NewDecodeResult()
|
||||
res.HandleDecodeDiags(diags)
|
||||
if res.Diags.HasErrors() {
|
||||
return res.Diags
|
||||
}
|
||||
@@ -140,7 +119,7 @@ func decodeConnectionPluginProperty(connectionContent *hcl.BodyContent, connecti
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPluginInstanceFromDependency(dependencies []*modconfig.ResourceDependency) (string, bool) {
|
||||
func getPluginInstanceFromDependency(dependencies []*pmodconfig.ResourceDependency) (string, bool) {
|
||||
if len(dependencies) != 1 {
|
||||
return "", false
|
||||
}
|
||||
27
pkg/parse/schema.go
Normal file
27
pkg/parse/schema.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package parse
|
||||
|
||||
import "github.com/hashicorp/hcl/v2"
|
||||
|
||||
var ConnectionBlockSchema = &hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "plugin",
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "type",
|
||||
},
|
||||
{
|
||||
Name: "connections",
|
||||
},
|
||||
{
|
||||
Name: "import_schema",
|
||||
},
|
||||
},
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "options",
|
||||
LabelNames: []string{"type"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -3,18 +3,19 @@ package query
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/db/db_client"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/export"
|
||||
"github.com/turbot/steampipe/pkg/initialisation"
|
||||
"github.com/turbot/steampipe/pkg/statushooks"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/workspace"
|
||||
)
|
||||
|
||||
type InitData struct {
|
||||
@@ -34,40 +35,18 @@ func NewInitData(ctx context.Context, args []string) *InitData {
|
||||
InitData: *initialisation.NewInitData(),
|
||||
Loaded: make(chan struct{}),
|
||||
}
|
||||
// for interactive mode - do the home directory modfile check before init
|
||||
if viper.GetBool(constants.ConfigKeyInteractive) {
|
||||
path := viper.GetString(constants.ArgModLocation)
|
||||
modFilePath, _ := workspace.FindModFilePath(path)
|
||||
|
||||
// if the user cancels - no need to continue init
|
||||
if err := workspace.HomeDirectoryModfileCheck(ctx, filepath.Dir(modFilePath)); err != nil {
|
||||
i.Result.Error = err
|
||||
close(i.Loaded)
|
||||
return i
|
||||
}
|
||||
// home dir modfile already done - set the viper config
|
||||
viper.Set(constants.ConfigKeyBypassHomeDirModfileWarning, true)
|
||||
}
|
||||
|
||||
statushooks.SetStatus(ctx, "Loading workspace")
|
||||
|
||||
// load workspace variables syncronously
|
||||
w, inputVariables, errAndWarnings := workspace.LoadWorkspaceVars(ctx)
|
||||
if errAndWarnings.GetError() != nil {
|
||||
i.Result.Error = fmt.Errorf("failed to load workspace: %s", error_helpers.HandleCancelError(errAndWarnings.GetError()).Error())
|
||||
return i
|
||||
}
|
||||
|
||||
i.Result.AddWarnings(errAndWarnings.Warnings...)
|
||||
i.Workspace = w
|
||||
|
||||
go i.init(ctx, inputVariables, args)
|
||||
go i.init(ctx, args)
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
func queryExporters() []export.Exporter {
|
||||
return []export.Exporter{&export.SnapshotExporter{}}
|
||||
// TODO #snapshot
|
||||
return nil
|
||||
//return []export.Exporter{&export.SnapshotExporter{}}
|
||||
}
|
||||
|
||||
func (i *InitData) Cancel() {
|
||||
@@ -97,7 +76,7 @@ func (i *InitData) Cleanup(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (i *InitData) init(ctx context.Context, inputVariables *modconfig.ModVariableMap, args []string) {
|
||||
func (i *InitData) init(ctx context.Context, args []string) {
|
||||
defer func() {
|
||||
close(i.Loaded)
|
||||
// clear the cancelInitialisation function
|
||||
@@ -115,21 +94,13 @@ func (i *InitData) init(ctx context.Context, inputVariables *modconfig.ModVariab
|
||||
}
|
||||
}
|
||||
|
||||
// load the workspace mod (this load is asynchronous as it is within the async init function)
|
||||
errAndWarnings := i.Workspace.LoadWorkspaceMod(ctx, inputVariables)
|
||||
i.Result.AddWarnings(errAndWarnings.Warnings...)
|
||||
if errAndWarnings.GetError() != nil {
|
||||
i.Result.Error = fmt.Errorf("failed to load workspace mod: %s", error_helpers.HandleCancelError(errAndWarnings.GetError()).Error())
|
||||
return
|
||||
}
|
||||
|
||||
// set max DB connections to 1
|
||||
viper.Set(constants.ArgMaxParallel, 1)
|
||||
|
||||
statushooks.SetStatus(ctx, "Resolving arguments")
|
||||
|
||||
// convert the query or sql file arg into an array of executable queries - check names queries in the current workspace
|
||||
resolvedQueries, err := i.Workspace.GetQueriesFromArgs(args)
|
||||
resolvedQueries, err := getQueriesFromArgs(args)
|
||||
if err != nil {
|
||||
i.Result.Error = err
|
||||
return
|
||||
@@ -156,3 +127,83 @@ func (i *InitData) init(ctx context.Context, inputVariables *modconfig.ModVariab
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
// getQueriesFromArgs retrieves queries from args
|
||||
//
|
||||
// For each arg check if it is a named query or a file, before falling back to treating it as sql
|
||||
func getQueriesFromArgs(args []string) ([]*modconfig.ResolvedQuery, error) {
|
||||
|
||||
var queries = make([]*modconfig.ResolvedQuery, len(args))
|
||||
for idx, arg := range args {
|
||||
resolvedQuery, err := ResolveQueryAndArgsFromSQLString(arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resolvedQuery.ExecuteSQL) > 0 {
|
||||
// default name to the query text
|
||||
resolvedQuery.Name = resolvedQuery.ExecuteSQL
|
||||
|
||||
queries[idx] = resolvedQuery
|
||||
}
|
||||
}
|
||||
return queries, nil
|
||||
}
|
||||
|
||||
// ResolveQueryAndArgsFromSQLString attempts to resolve 'arg' to a query and query args
|
||||
func ResolveQueryAndArgsFromSQLString(sqlString string) (*modconfig.ResolvedQuery, error) {
|
||||
var err error
|
||||
|
||||
// 2) is this a file
|
||||
// get absolute filename
|
||||
filePath, err := filepath.Abs(sqlString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s", err.Error())
|
||||
}
|
||||
fileQuery, fileExists, err := getQueryFromFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s", err.Error())
|
||||
}
|
||||
if fileExists {
|
||||
if fileQuery.ExecuteSQL == "" {
|
||||
error_helpers.ShowWarning(fmt.Sprintf("file '%s' does not contain any data", filePath))
|
||||
// (just return the empty query - it will be filtered above)
|
||||
}
|
||||
return fileQuery, nil
|
||||
}
|
||||
// the argument cannot be resolved as an existing file
|
||||
// if it has a sql suffix (i.e we believe the user meant to specify a file) return a file not found error
|
||||
if strings.HasSuffix(strings.ToLower(sqlString), ".sql") {
|
||||
return nil, fmt.Errorf("file '%s' does not exist", filePath)
|
||||
}
|
||||
|
||||
// 2) just use the query string as is and assume it is valid SQL
|
||||
return &modconfig.ResolvedQuery{RawSQL: sqlString, ExecuteSQL: sqlString}, nil
|
||||
}
|
||||
|
||||
// try to treat the input string as a file name and if it exists, return its contents
|
||||
func getQueryFromFile(input string) (*modconfig.ResolvedQuery, bool, error) {
|
||||
// get absolute filename
|
||||
path, err := filepath.Abs(input)
|
||||
if err != nil {
|
||||
//nolint:golint,nilerr // if this gives any error, return not exist
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// does it exist?
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
//nolint:golint,nilerr // if this gives any error, return not exist (we may get a not found or a path too long for example)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// read file
|
||||
fileBytes, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
|
||||
res := &modconfig.ResolvedQuery{
|
||||
RawSQL: string(fileBytes),
|
||||
ExecuteSQL: string(fileBytes),
|
||||
}
|
||||
return res, true, nil
|
||||
}
|
||||
|
||||
@@ -1,122 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
// DashboardContainerRun is a struct representing a container run
|
||||
type DashboardContainerRun struct {
|
||||
DashboardParentImpl
|
||||
|
||||
dashboardNode *modconfig.DashboardContainer
|
||||
}
|
||||
|
||||
func (r *DashboardContainerRun) AsTreeNode() *dashboardtypes.SnapshotTreeNode {
|
||||
res := &dashboardtypes.SnapshotTreeNode{
|
||||
Name: r.Name,
|
||||
NodeType: r.NodeType,
|
||||
Children: make([]*dashboardtypes.SnapshotTreeNode, len(r.children)),
|
||||
}
|
||||
for i, c := range r.children {
|
||||
res.Children[i] = c.AsTreeNode()
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func NewDashboardContainerRun(container *modconfig.DashboardContainer, parent dashboardtypes.DashboardParent, executionTree *DashboardExecutionTree) (*DashboardContainerRun, error) {
|
||||
children := container.GetChildren()
|
||||
|
||||
r := &DashboardContainerRun{dashboardNode: container}
|
||||
// create NewDashboardTreeRunImpl
|
||||
// (we must create after creating the run as it requires a ref to the run)
|
||||
r.DashboardParentImpl = newDashboardParentImpl(container, parent, r, executionTree)
|
||||
|
||||
if container.Title != nil {
|
||||
r.Title = *container.Title
|
||||
}
|
||||
|
||||
if container.Width != nil {
|
||||
r.Width = *container.Width
|
||||
}
|
||||
r.childCompleteChan = make(chan dashboardtypes.DashboardTreeRun, len(children))
|
||||
for _, child := range children {
|
||||
var childRun dashboardtypes.DashboardTreeRun
|
||||
//nolint:golint // using a global var here to maintain parity with definition of childRun
|
||||
var err error
|
||||
switch i := child.(type) {
|
||||
case *modconfig.DashboardContainer:
|
||||
childRun, err = NewDashboardContainerRun(i, r, executionTree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *modconfig.Dashboard:
|
||||
childRun, err = NewDashboardRun(i, r, executionTree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
default:
|
||||
// ensure this item is a DashboardLeafNode
|
||||
leafNode, ok := i.(modconfig.DashboardLeafNode)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("child %s does not implement DashboardLeafNode", i.Name())
|
||||
}
|
||||
|
||||
childRun, err = NewLeafRun(leafNode, r, executionTree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// should never happen - container children must be either container or counter
|
||||
if childRun == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// if our child has not completed, we have not completed
|
||||
if childRun.GetRunStatus() == dashboardtypes.RunInitialized {
|
||||
r.Status = dashboardtypes.RunInitialized
|
||||
}
|
||||
r.children = append(r.children, childRun)
|
||||
}
|
||||
// add r into execution tree
|
||||
executionTree.runs[r.Name] = r
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Initialise implements DashboardTreeRun
|
||||
func (r *DashboardContainerRun) Initialise(ctx context.Context) {
|
||||
// initialise our children
|
||||
if err := r.initialiseChildren(ctx); err != nil {
|
||||
r.SetError(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Execute implements DashboardTreeRun
|
||||
// execute all children and wait for them to complete
|
||||
func (r *DashboardContainerRun) Execute(ctx context.Context) {
|
||||
// execute all children asynchronously
|
||||
r.executeChildrenAsync(ctx)
|
||||
|
||||
// try to set status as running (will be set to blocked if any children are blocked)
|
||||
r.setRunning(ctx)
|
||||
|
||||
// wait for children to complete
|
||||
err := <-r.waitForChildrenAsync(ctx)
|
||||
if err == nil {
|
||||
log.Printf("[TRACE] %s Execute waitForChildrenAsync returned success", r.Name)
|
||||
// set complete status on dashboard
|
||||
r.SetComplete(ctx)
|
||||
} else {
|
||||
log.Printf("[TRACE] %s Execute waitForChildrenAsync returned err %s", r.Name, err.Error())
|
||||
r.SetError(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// IsSnapshotPanel implements SnapshotPanel
|
||||
func (*DashboardContainerRun) IsSnapshotPanel() {}
|
||||
@@ -1,315 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe/pkg/connection_sync"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardevents"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/db/db_common"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/workspace"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// DashboardExecutionTree is a structure representing the control result hierarchy
|
||||
type DashboardExecutionTree struct {
|
||||
Root dashboardtypes.DashboardTreeRun
|
||||
|
||||
dashboardName string
|
||||
sessionId string
|
||||
client db_common.Client
|
||||
// map of executing runs, keyed by full name
|
||||
runs map[string]dashboardtypes.DashboardTreeRun
|
||||
workspace *workspace.Workspace
|
||||
runComplete chan dashboardtypes.DashboardTreeRun
|
||||
|
||||
// map of subscribers to notify when an input value changes
|
||||
cancel context.CancelFunc
|
||||
inputLock sync.Mutex
|
||||
inputValues map[string]any
|
||||
id string
|
||||
}
|
||||
|
||||
func NewDashboardExecutionTree(rootName string, sessionId string, client db_common.Client, workspace *workspace.Workspace) (*DashboardExecutionTree, error) {
|
||||
// now populate the DashboardExecutionTree
|
||||
executionTree := &DashboardExecutionTree{
|
||||
dashboardName: rootName,
|
||||
sessionId: sessionId,
|
||||
client: client,
|
||||
runs: make(map[string]dashboardtypes.DashboardTreeRun),
|
||||
workspace: workspace,
|
||||
runComplete: make(chan dashboardtypes.DashboardTreeRun, 1),
|
||||
inputValues: make(map[string]any),
|
||||
}
|
||||
executionTree.id = fmt.Sprintf("%p", executionTree)
|
||||
|
||||
// create the root run node (either a report run or a counter run)
|
||||
root, err := executionTree.createRootItem(rootName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
executionTree.Root = root
|
||||
return executionTree, nil
|
||||
}
|
||||
|
||||
func (e *DashboardExecutionTree) createRootItem(rootName string) (dashboardtypes.DashboardTreeRun, error) {
|
||||
parsedName, err := modconfig.ParseResourceName(rootName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullName, err := parsedName.ToFullName()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if parsedName.ItemType == "" {
|
||||
return nil, fmt.Errorf("root item is not valid named resource")
|
||||
}
|
||||
// if no mod is specified, assume the workspace mod
|
||||
if parsedName.Mod == "" {
|
||||
parsedName.Mod = e.workspace.Mod.ShortName
|
||||
rootName = fullName
|
||||
}
|
||||
switch parsedName.ItemType {
|
||||
case modconfig.BlockTypeQuery:
|
||||
// wrap in a table
|
||||
query, ok := e.workspace.GetResourceMaps().Queries[rootName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("query '%s' does not exist in workspace", rootName)
|
||||
}
|
||||
// wrap this in a chart and a dashboard
|
||||
dashboard, err := modconfig.NewQueryDashboard(query)
|
||||
// TACTICAL - set the execution tree dashboard name from the query dashboard
|
||||
e.dashboardName = dashboard.Name()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewDashboardRun(dashboard, e, e)
|
||||
default:
|
||||
return nil, fmt.Errorf("reporting type %s cannot be executed as dashboard", parsedName.ItemType)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *DashboardExecutionTree) Execute(ctx context.Context) {
|
||||
startTime := time.Now()
|
||||
|
||||
searchPath := e.client.GetRequiredSessionSearchPath()
|
||||
|
||||
// store context
|
||||
cancelCtx, cancel := context.WithCancel(ctx)
|
||||
e.cancel = cancel
|
||||
workspace := e.workspace
|
||||
|
||||
// perform any necessary initialisation
|
||||
// (e.g. check run creates the control execution tree)
|
||||
e.Root.Initialise(cancelCtx)
|
||||
if e.Root.GetError() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO should we always wait even with non custom search path?
|
||||
// if there is a custom search path, wait until the first connection of each plugin has loaded
|
||||
if customSearchPath := e.client.GetCustomSearchPath(); customSearchPath != nil {
|
||||
if err := connection_sync.WaitForSearchPathSchemas(ctx, e.client, customSearchPath); err != nil {
|
||||
e.Root.SetError(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
panels := e.BuildSnapshotPanels()
|
||||
// build map of those variables referenced by the dashboard run
|
||||
referencedVariables := GetReferencedVariables(e.Root, e.workspace)
|
||||
|
||||
immutablePanels, err := utils.JsonCloneToMap(panels)
|
||||
if err != nil {
|
||||
e.SetError(ctx, err)
|
||||
return
|
||||
}
|
||||
workspace.PublishDashboardEvent(ctx, &dashboardevents.ExecutionStarted{
|
||||
Root: e.Root,
|
||||
Session: e.sessionId,
|
||||
ExecutionId: e.id,
|
||||
Panels: immutablePanels,
|
||||
Inputs: e.inputValues,
|
||||
Variables: referencedVariables,
|
||||
StartTime: startTime,
|
||||
})
|
||||
defer func() {
|
||||
|
||||
e := &dashboardevents.ExecutionComplete{
|
||||
Root: e.Root,
|
||||
Session: e.sessionId,
|
||||
ExecutionId: e.id,
|
||||
Panels: panels,
|
||||
Inputs: e.inputValues,
|
||||
Variables: referencedVariables,
|
||||
// search path elements are quoted (for consumption by postgres)
|
||||
// unquote them
|
||||
SearchPath: utils.UnquoteStringArray(searchPath),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
workspace.PublishDashboardEvent(ctx, e)
|
||||
}()
|
||||
|
||||
log.Println("[TRACE]", "begin DashboardExecutionTree.Execute")
|
||||
defer log.Println("[TRACE]", "end DashboardExecutionTree.Execute")
|
||||
|
||||
if e.GetRunStatus().IsFinished() {
|
||||
// there must be no nodes to execute
|
||||
log.Println("[TRACE]", "execution tree already complete")
|
||||
return
|
||||
}
|
||||
|
||||
// execute synchronously
|
||||
e.Root.Execute(cancelCtx)
|
||||
}
|
||||
|
||||
// GetRunStatus returns the stats of the Root run
|
||||
func (e *DashboardExecutionTree) GetRunStatus() dashboardtypes.RunStatus {
|
||||
return e.Root.GetRunStatus()
|
||||
}
|
||||
|
||||
// SetError sets the error on the Root run
|
||||
func (e *DashboardExecutionTree) SetError(ctx context.Context, err error) {
|
||||
e.Root.SetError(ctx, err)
|
||||
}
|
||||
|
||||
// GetName implements DashboardParent
|
||||
// use mod short name - this will be the root name for all child runs
|
||||
func (e *DashboardExecutionTree) GetName() string {
|
||||
return e.workspace.Mod.ShortName
|
||||
}
|
||||
|
||||
// GetParent implements DashboardTreeRun
|
||||
func (e *DashboardExecutionTree) GetParent() dashboardtypes.DashboardParent {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodeType implements DashboardTreeRun
|
||||
func (*DashboardExecutionTree) GetNodeType() string {
|
||||
panic("should never call for DashboardExecutionTree")
|
||||
}
|
||||
|
||||
func (e *DashboardExecutionTree) SetInputValues(inputValues map[string]any) {
|
||||
log.Printf("[TRACE] SetInputValues")
|
||||
e.inputLock.Lock()
|
||||
defer e.inputLock.Unlock()
|
||||
|
||||
// we only support inputs if root is a dashboard (NOT a benchmark)
|
||||
runtimeDependencyPublisher, ok := e.Root.(RuntimeDependencyPublisher)
|
||||
if !ok {
|
||||
// should never happen
|
||||
log.Printf("[WARN] SetInputValues called but root Dashboard run is not a RuntimeDependencyPublisher: %s", e.Root.GetName())
|
||||
return
|
||||
}
|
||||
|
||||
for name, value := range inputValues {
|
||||
log.Printf("[TRACE] DashboardExecutionTree SetInput %s = %v", name, value)
|
||||
e.inputValues[name] = value
|
||||
// publish runtime dependency
|
||||
runtimeDependencyPublisher.PublishRuntimeDependencyValue(name, &dashboardtypes.ResolvedRuntimeDependencyValue{Value: value})
|
||||
}
|
||||
}
|
||||
|
||||
// ChildCompleteChan implements DashboardParent
|
||||
func (e *DashboardExecutionTree) ChildCompleteChan() chan dashboardtypes.DashboardTreeRun {
|
||||
return e.runComplete
|
||||
}
|
||||
|
||||
// ChildStatusChanged implements DashboardParent
|
||||
func (*DashboardExecutionTree) ChildStatusChanged(context.Context) {}
|
||||
|
||||
func (e *DashboardExecutionTree) Cancel() {
|
||||
// if we have not completed, and already have a cancel function - cancel
|
||||
if e.GetRunStatus().IsFinished() || e.cancel == nil {
|
||||
log.Printf("[TRACE] DashboardExecutionTree Cancel NOT cancelling status %s cancel func %p", e.GetRunStatus(), e.cancel)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] DashboardExecutionTree Cancel - calling cancel")
|
||||
e.cancel()
|
||||
|
||||
// if there are any children, wait for the execution to complete
|
||||
if !e.Root.RunComplete() {
|
||||
<-e.runComplete
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] DashboardExecutionTree Cancel - all children complete")
|
||||
}
|
||||
|
||||
func (e *DashboardExecutionTree) BuildSnapshotPanels() map[string]dashboardtypes.SnapshotPanel {
|
||||
// just build from e.runs
|
||||
res := map[string]dashboardtypes.SnapshotPanel{}
|
||||
|
||||
for name, run := range e.runs {
|
||||
res[name] = run.(dashboardtypes.SnapshotPanel)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// InputRuntimeDependencies returns the names of all inputs which are runtime dependencies
|
||||
func (e *DashboardExecutionTree) InputRuntimeDependencies() []string {
|
||||
var deps = map[string]struct{}{}
|
||||
for _, r := range e.runs {
|
||||
if leafRun, ok := r.(*LeafRun); ok {
|
||||
for _, r := range leafRun.runtimeDependencies {
|
||||
if r.Dependency.PropertyPath.ItemType == modconfig.BlockTypeInput {
|
||||
deps[r.Dependency.SourceResourceName()] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return maps.Keys(deps)
|
||||
}
|
||||
|
||||
// GetChildren implements DashboardParent
|
||||
func (e *DashboardExecutionTree) GetChildren() []dashboardtypes.DashboardTreeRun {
|
||||
return []dashboardtypes.DashboardTreeRun{e.Root}
|
||||
}
|
||||
|
||||
// ChildrenComplete implements DashboardParent
|
||||
func (e *DashboardExecutionTree) ChildrenComplete() bool {
|
||||
return e.Root.RunComplete()
|
||||
}
|
||||
|
||||
// Tactical: Empty implementations of DashboardParent functions
|
||||
// TODO remove need for this
|
||||
|
||||
func (e *DashboardExecutionTree) Initialise(ctx context.Context) {
|
||||
panic("should never call for DashboardExecutionTree")
|
||||
}
|
||||
|
||||
func (e *DashboardExecutionTree) GetTitle() string {
|
||||
panic("should never call for DashboardExecutionTree")
|
||||
}
|
||||
|
||||
func (e *DashboardExecutionTree) GetError() error {
|
||||
panic("should never call for DashboardExecutionTree")
|
||||
}
|
||||
|
||||
func (e *DashboardExecutionTree) SetComplete(ctx context.Context) {
|
||||
panic("should never call for DashboardExecutionTree")
|
||||
}
|
||||
|
||||
func (e *DashboardExecutionTree) RunComplete() bool {
|
||||
panic("should never call for DashboardExecutionTree")
|
||||
}
|
||||
|
||||
func (e *DashboardExecutionTree) GetInputsDependingOn(s string) []string {
|
||||
panic("should never call for DashboardExecutionTree")
|
||||
}
|
||||
|
||||
func (*DashboardExecutionTree) AsTreeNode() *dashboardtypes.SnapshotTreeNode {
|
||||
panic("should never call for DashboardExecutionTree")
|
||||
}
|
||||
|
||||
func (*DashboardExecutionTree) GetResource() modconfig.DashboardLeafNode {
|
||||
panic("should never call for DashboardExecutionTree")
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
type DashboardParentImpl struct {
|
||||
DashboardTreeRunImpl
|
||||
children []dashboardtypes.DashboardTreeRun
|
||||
childCompleteChan chan dashboardtypes.DashboardTreeRun
|
||||
// are we blocked by a child run
|
||||
blockedByChild bool
|
||||
childStatusLock *sync.Mutex
|
||||
}
|
||||
|
||||
func newDashboardParentImpl(resource modconfig.DashboardLeafNode, parent dashboardtypes.DashboardParent, run dashboardtypes.DashboardTreeRun, executionTree *DashboardExecutionTree) DashboardParentImpl {
|
||||
return DashboardParentImpl{
|
||||
DashboardTreeRunImpl: NewDashboardTreeRunImpl(resource, parent, run, executionTree),
|
||||
childStatusLock: new(sync.Mutex),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DashboardParentImpl) initialiseChildren(ctx context.Context) error {
|
||||
var errors []error
|
||||
for _, child := range r.children {
|
||||
child.Initialise(ctx)
|
||||
|
||||
if err := child.GetError(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
return error_helpers.CombineErrors(errors...)
|
||||
|
||||
}
|
||||
|
||||
// GetChildren implements DashboardTreeRun
|
||||
func (r *DashboardParentImpl) GetChildren() []dashboardtypes.DashboardTreeRun {
|
||||
return r.children
|
||||
}
|
||||
|
||||
// ChildrenComplete implements DashboardTreeRun
|
||||
func (r *DashboardParentImpl) ChildrenComplete() bool {
|
||||
for _, child := range r.children {
|
||||
if !child.RunComplete() {
|
||||
log.Printf("[TRACE] %s ChildrenComplete child %s NOT complete state %s", r.Name, child.GetName(), child.GetRunStatus())
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *DashboardParentImpl) ChildCompleteChan() chan dashboardtypes.DashboardTreeRun {
|
||||
return r.childCompleteChan
|
||||
}
|
||||
func (r *DashboardParentImpl) createChildCompleteChan() {
|
||||
// create buffered child complete chan
|
||||
if childCount := len(r.children); childCount > 0 {
|
||||
r.childCompleteChan = make(chan dashboardtypes.DashboardTreeRun, childCount)
|
||||
}
|
||||
}
|
||||
|
||||
// if this leaf run has children (including with runs) execute them asynchronously
|
||||
func (r *DashboardParentImpl) executeChildrenAsync(ctx context.Context) {
|
||||
for _, c := range r.children {
|
||||
go c.Execute(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// if this leaf run has with runs execute them asynchronously
|
||||
func (r *DashboardParentImpl) executeWithsAsync(ctx context.Context) {
|
||||
for _, c := range r.children {
|
||||
if c.GetNodeType() == modconfig.BlockTypeWith {
|
||||
go c.Execute(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DashboardParentImpl) waitForChildrenAsync(ctx context.Context) chan error {
|
||||
log.Printf("[TRACE] %s waitForChildrenAsync", r.Name)
|
||||
var doneChan = make(chan error)
|
||||
if len(r.children) == 0 {
|
||||
log.Printf("[TRACE] %s waitForChildrenAsync - no children so we're done", r.Name)
|
||||
// if there are no children, return a closed channel so we do not wait
|
||||
close(doneChan)
|
||||
return doneChan
|
||||
}
|
||||
|
||||
go func() {
|
||||
// wait for children to complete
|
||||
var errors []error
|
||||
for !(r.ChildrenComplete()) {
|
||||
completeChild := <-r.childCompleteChan
|
||||
log.Printf("[TRACE] %s waitForChildrenAsync got child complete for %s", r.Name, completeChild.GetName())
|
||||
if completeChild.GetRunStatus().IsError() {
|
||||
errors = append(errors, completeChild.GetError())
|
||||
log.Printf("[TRACE] %s child %s has error %v", r.Name, completeChild.GetName(), completeChild.GetError())
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] %s ALL children and withs complete, errors: %v", r.Name, errors)
|
||||
|
||||
// so all children have completed - check for errors
|
||||
// TODO [node_reuse] format better error https://github.com/turbot/steampipe/issues/2920
|
||||
err := error_helpers.CombineErrors(errors...)
|
||||
|
||||
// if context is cancelled, just return context cancellation error
|
||||
if ctx.Err() != nil {
|
||||
err = ctx.Err()
|
||||
}
|
||||
|
||||
doneChan <- err
|
||||
}()
|
||||
|
||||
return doneChan
|
||||
}
|
||||
|
||||
func (r *DashboardParentImpl) ChildStatusChanged(ctx context.Context) {
|
||||
// this function may be called asyncronously by children
|
||||
r.childStatusLock.Lock()
|
||||
defer r.childStatusLock.Unlock()
|
||||
|
||||
// if we are currently blocked by a child or we are currently in running state,
|
||||
// call setRunning() to determine whether any of our children are now blocked
|
||||
if r.blockedByChild || r.GetRunStatus() == dashboardtypes.RunRunning {
|
||||
log.Printf("[TRACE] %s ChildStatusChanged - calling setRunning to see if we are still running, status %s blockedByChild %v", r.Name, r.GetRunStatus(), r.blockedByChild)
|
||||
|
||||
// try setting our status to running again
|
||||
r.setRunning(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// override DashboardTreeRunImpl) setStatus(
|
||||
func (r *DashboardParentImpl) setRunning(ctx context.Context) {
|
||||
// if the run is already complete (for example, canceled), do nothing
|
||||
if r.GetRunStatus().IsFinished() {
|
||||
log.Printf("[TRACE] %s setRunning - run already terminated - current state %s - NOT setting running", r.Name, r.GetRunStatus())
|
||||
return
|
||||
}
|
||||
|
||||
status := dashboardtypes.RunRunning
|
||||
// if we are trying to set status to running, check if any of our children are blocked,
|
||||
// and if so set our status to blocked
|
||||
|
||||
// if any children are blocked, we are blocked
|
||||
for _, c := range r.children {
|
||||
if c.GetRunStatus() == dashboardtypes.RunBlocked {
|
||||
status = dashboardtypes.RunBlocked
|
||||
r.blockedByChild = true
|
||||
break
|
||||
}
|
||||
// to get here, no children can be blocked - clear blockedByChild
|
||||
r.blockedByChild = false
|
||||
}
|
||||
|
||||
// set status if it has changed
|
||||
if status != r.GetRunStatus() {
|
||||
log.Printf("[TRACE] %s setRunning - setting state %s, blockedByChild %v", r.Name, status, r.blockedByChild)
|
||||
r.DashboardTreeRunImpl.setStatus(ctx, status)
|
||||
} else {
|
||||
log.Printf("[TRACE] %s setRunning - state unchanged %s, blockedByChild %v", r.Name, status, r.blockedByChild)
|
||||
}
|
||||
}
|
||||
@@ -1,176 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
// DashboardRun is a struct representing a container run
|
||||
type DashboardRun struct {
|
||||
runtimeDependencyPublisherImpl
|
||||
|
||||
parent dashboardtypes.DashboardParent
|
||||
dashboard *modconfig.Dashboard
|
||||
}
|
||||
|
||||
func (r *DashboardRun) AsTreeNode() *dashboardtypes.SnapshotTreeNode {
|
||||
res := &dashboardtypes.SnapshotTreeNode{
|
||||
Name: r.Name,
|
||||
NodeType: r.NodeType,
|
||||
Children: make([]*dashboardtypes.SnapshotTreeNode, 0, len(r.children)),
|
||||
}
|
||||
|
||||
for _, c := range r.children {
|
||||
// NOTE: exclude with runs
|
||||
if c.GetNodeType() != modconfig.BlockTypeWith {
|
||||
res.Children = append(res.Children, c.AsTreeNode())
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func NewDashboardRun(dashboard *modconfig.Dashboard, parent dashboardtypes.DashboardParent, executionTree *DashboardExecutionTree) (*DashboardRun, error) {
|
||||
r := &DashboardRun{
|
||||
parent: parent,
|
||||
dashboard: dashboard,
|
||||
}
|
||||
// create RuntimeDependencyPublisherImpl- this handles 'with' run creation and resolving runtime dependency resolution
|
||||
// (we must create after creating the run as it requires a ref to the run)
|
||||
r.runtimeDependencyPublisherImpl = newRuntimeDependencyPublisherImpl(dashboard, parent, r, executionTree)
|
||||
// add r into execution tree BEFORE creating child runs or initialising runtime depdencies
|
||||
// - this is so child runs can find this dashboard run
|
||||
executionTree.runs[r.Name] = r
|
||||
|
||||
// set inputs map on RuntimeDependencyPublisherImpl BEFORE creating child runs
|
||||
r.inputs = dashboard.GetInputs()
|
||||
|
||||
// after setting inputs, init runtime dependencies. this creates with runs and adds them to our children
|
||||
err := r.initWiths()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = r.createChildRuns(executionTree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create buffered channel for children to report their completion
|
||||
r.createChildCompleteChan()
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Initialise implements DashboardTreeRun
|
||||
func (r *DashboardRun) Initialise(ctx context.Context) {
|
||||
// initialise our children
|
||||
if err := r.initialiseChildren(ctx); err != nil {
|
||||
r.SetError(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Execute implements DashboardTreeRun
|
||||
// execute all children and wait for them to complete
|
||||
func (r *DashboardRun) Execute(ctx context.Context) {
|
||||
r.executeChildrenAsync(ctx)
|
||||
|
||||
// try to set status as running (will be set to blocked if any children are blocked)
|
||||
r.setRunning(ctx)
|
||||
|
||||
// wait for children to complete
|
||||
err := <-r.waitForChildrenAsync(ctx)
|
||||
if err == nil {
|
||||
log.Printf("[TRACE] Execute run %s all children complete, success", r.Name)
|
||||
// set complete status on dashboard
|
||||
r.SetComplete(ctx)
|
||||
} else {
|
||||
log.Printf("[TRACE] Execute run %s all children complete, error: %s", r.Name, err.Error())
|
||||
r.SetError(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// IsSnapshotPanel implements SnapshotPanel
|
||||
func (*DashboardRun) IsSnapshotPanel() {}
|
||||
|
||||
// GetInput searches for an input with the given name
|
||||
func (r *DashboardRun) GetInput(name string) (*modconfig.DashboardInput, bool) {
|
||||
return r.dashboard.GetInput(name)
|
||||
}
|
||||
|
||||
// GetInputsDependingOn returns a list o DashboardInputs which have a runtime dependency on the given input
|
||||
func (r *DashboardRun) GetInputsDependingOn(changedInputName string) []string {
|
||||
var res []string
|
||||
for _, input := range r.dashboard.Inputs {
|
||||
if input.DependsOnInput(changedInputName) {
|
||||
res = append(res, input.UnqualifiedName)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (r *DashboardRun) createChildRuns(executionTree *DashboardExecutionTree) error {
|
||||
// ask our resource for its children
|
||||
children := r.dashboard.GetChildren()
|
||||
|
||||
for _, child := range children {
|
||||
var childRun dashboardtypes.DashboardTreeRun
|
||||
var err error
|
||||
switch i := child.(type) {
|
||||
case *modconfig.DashboardWith:
|
||||
// ignore as with runs are created by RuntimeDependencyPublisherImpl
|
||||
continue
|
||||
case *modconfig.Dashboard:
|
||||
childRun, err = NewDashboardRun(i, r, executionTree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case *modconfig.DashboardContainer:
|
||||
childRun, err = NewDashboardContainerRun(i, r, executionTree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case *modconfig.DashboardInput:
|
||||
// NOTE: clone the input to avoid mutating the original
|
||||
// TODO remove the need for this when we refactor input values resolution
|
||||
// TODO https://github.com/turbot/steampipe/issues/2864
|
||||
|
||||
// TACTICAL: as this is a runtime dependency, set the run name to the 'scoped name'
|
||||
// this is to match the name in the panel dependendencies
|
||||
// TODO [node_reuse] consider naming https://github.com/turbot/steampipe/issues/2921
|
||||
inputRunName := fmt.Sprintf("%s.%s", r.DashboardName, i.UnqualifiedName)
|
||||
childRun, err = NewLeafRun(i.Clone(), r, executionTree, setName(inputRunName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
// ensure this item is a DashboardLeafNode
|
||||
leafNode, ok := i.(modconfig.DashboardLeafNode)
|
||||
if !ok {
|
||||
return fmt.Errorf("child %s does not implement DashboardLeafNode", i.Name())
|
||||
}
|
||||
|
||||
childRun, err = NewLeafRun(leafNode, r, executionTree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// should never happen - container children must be either container or counter
|
||||
if childRun == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// if our child has not completed, we have not completed
|
||||
if childRun.GetRunStatus() == dashboardtypes.RunInitialized {
|
||||
r.Status = dashboardtypes.RunInitialized
|
||||
}
|
||||
r.children = append(r.children, childRun)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,171 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardevents"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
type DashboardTreeRunImpl struct {
|
||||
DashboardName string `json:"dashboard"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Display string `cty:"display" hcl:"display" json:"display,omitempty"`
|
||||
Documentation string `json:"documentation,omitempty"`
|
||||
ErrorString string `json:"error,omitempty"`
|
||||
Name string `json:"name"`
|
||||
NodeType string `json:"panel_type"`
|
||||
SourceDefinition string `json:"source_definition"`
|
||||
Status dashboardtypes.RunStatus `json:"status"`
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
Title string `json:"title,omitempty"`
|
||||
Type string `json:"display_type,omitempty"`
|
||||
Width int `json:"width,omitempty"`
|
||||
|
||||
err error
|
||||
parent dashboardtypes.DashboardParent
|
||||
executionTree *DashboardExecutionTree
|
||||
resource modconfig.DashboardLeafNode
|
||||
// store the top level run which embeds this struct
|
||||
// we need this for setStatus which serialises the run for the message payload
|
||||
run dashboardtypes.DashboardTreeRun
|
||||
}
|
||||
|
||||
func NewDashboardTreeRunImpl(resource modconfig.DashboardLeafNode, parent dashboardtypes.DashboardParent, run dashboardtypes.DashboardTreeRun, executionTree *DashboardExecutionTree) DashboardTreeRunImpl {
|
||||
// NOTE: we MUST declare children inline - therefore we cannot share children between runs in the tree
|
||||
// (if we supported the children property then we could reuse resources)
|
||||
// so FOR NOW it is safe to use the container name directly as the run name
|
||||
res := DashboardTreeRunImpl{
|
||||
Name: resource.Name(),
|
||||
Title: resource.GetTitle(),
|
||||
NodeType: resource.BlockType(),
|
||||
Width: resource.GetWidth(),
|
||||
Display: resource.GetDisplay(),
|
||||
Description: resource.GetDescription(),
|
||||
Documentation: resource.GetDocumentation(),
|
||||
Type: resource.GetType(),
|
||||
Tags: resource.GetTags(),
|
||||
SourceDefinition: resource.GetMetadata().SourceDefinition,
|
||||
|
||||
// set to complete, optimistically
|
||||
// if any children have SQL we will set this to DashboardRunReady instead
|
||||
Status: dashboardtypes.RunComplete,
|
||||
parent: parent,
|
||||
executionTree: executionTree,
|
||||
resource: resource,
|
||||
run: run,
|
||||
}
|
||||
|
||||
// TACTICAL if this run was created to create a snapshot output for a control run,
|
||||
// there will be no execution tree
|
||||
if executionTree != nil {
|
||||
res.DashboardName = executionTree.dashboardName
|
||||
} else {
|
||||
// there is no execution tree - use the resource name as the dashboard name
|
||||
res.DashboardName = resource.Name()
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// GetName implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) GetName() string {
|
||||
return r.Name
|
||||
}
|
||||
|
||||
// GetRunStatus implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) GetRunStatus() dashboardtypes.RunStatus {
|
||||
return r.Status
|
||||
}
|
||||
|
||||
// GetError implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) GetError() error {
|
||||
return r.err
|
||||
}
|
||||
|
||||
// RunComplete implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) RunComplete() bool {
|
||||
return r.Status.IsFinished()
|
||||
}
|
||||
|
||||
// GetInputsDependingOn implements DashboardTreeRun
|
||||
// defaults to nothing
|
||||
func (r *DashboardTreeRunImpl) GetInputsDependingOn(_ string) []string { return nil }
|
||||
|
||||
// GetParent implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) GetParent() dashboardtypes.DashboardParent {
|
||||
return r.parent
|
||||
}
|
||||
|
||||
// GetTitle implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) GetTitle() string {
|
||||
return r.Title
|
||||
}
|
||||
|
||||
// GetNodeType implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) GetNodeType() string {
|
||||
return r.NodeType
|
||||
}
|
||||
|
||||
// Initialise implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) Initialise(context.Context) {
|
||||
panic("must be implemented by child struct")
|
||||
}
|
||||
|
||||
// Execute implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) Execute(ctx context.Context) {
|
||||
panic("must be implemented by child struct")
|
||||
}
|
||||
|
||||
// AsTreeNode implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) AsTreeNode() *dashboardtypes.SnapshotTreeNode {
|
||||
panic("must be implemented by child struct")
|
||||
}
|
||||
|
||||
// GetResource implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) GetResource() modconfig.DashboardLeafNode {
|
||||
return r.resource
|
||||
}
|
||||
|
||||
// SetError implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) SetError(ctx context.Context, err error) {
|
||||
log.Printf("[TRACE] %s SetError err %v", r.Name, err)
|
||||
r.err = err
|
||||
// error type does not serialise to JSON so copy into a string
|
||||
r.ErrorString = err.Error()
|
||||
// set status (this sends update event)
|
||||
if error_helpers.IsContextCancelledError(err) {
|
||||
r.setStatus(ctx, dashboardtypes.RunCanceled)
|
||||
} else {
|
||||
r.setStatus(ctx, dashboardtypes.RunError)
|
||||
}
|
||||
// tell parent we are done
|
||||
r.notifyParentOfCompletion()
|
||||
}
|
||||
|
||||
// SetComplete implements DashboardTreeRun
|
||||
func (r *DashboardTreeRunImpl) SetComplete(ctx context.Context) {
|
||||
// set status (this sends update event)
|
||||
r.setStatus(ctx, dashboardtypes.RunComplete)
|
||||
// tell parent we are done
|
||||
r.notifyParentOfCompletion()
|
||||
}
|
||||
|
||||
func (r *DashboardTreeRunImpl) setStatus(ctx context.Context, status dashboardtypes.RunStatus) {
|
||||
r.Status = status
|
||||
// notify our parent that our status has changed
|
||||
r.parent.ChildStatusChanged(ctx)
|
||||
|
||||
// raise LeafNodeUpdated event
|
||||
// TODO [node_reuse] do this a different way https://github.com/turbot/steampipe/issues/2919
|
||||
// TACTICAL: pass the full run struct - 'r.run', rather than ourselves - so we serialize all properties
|
||||
e, _ := dashboardevents.NewLeafNodeUpdate(r.run, r.executionTree.sessionId, r.executionTree.id)
|
||||
r.executionTree.workspace.PublishDashboardEvent(ctx, e)
|
||||
|
||||
}
|
||||
|
||||
func (r *DashboardTreeRunImpl) notifyParentOfCompletion() {
|
||||
r.parent.ChildCompleteChan() <- r
|
||||
}
|
||||
@@ -1,223 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
filehelpers "github.com/turbot/go-kit/files"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardevents"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/db/db_common"
|
||||
"github.com/turbot/steampipe/pkg/workspace"
|
||||
)
|
||||
|
||||
type DashboardExecutor struct {
|
||||
// map of executions, keyed by session id
|
||||
executions map[string]*DashboardExecutionTree
|
||||
executionLock sync.Mutex
|
||||
// is this an interactive execution
|
||||
// i.e. inputs may be specified _after_ execution starts
|
||||
// false when running a single dashboard in batch mode
|
||||
interactive bool
|
||||
}
|
||||
|
||||
func newDashboardExecutor() *DashboardExecutor {
|
||||
return &DashboardExecutor{
|
||||
executions: make(map[string]*DashboardExecutionTree),
|
||||
// default to interactive execution
|
||||
interactive: true,
|
||||
}
|
||||
}
|
||||
|
||||
var Executor = newDashboardExecutor()
|
||||
|
||||
func (e *DashboardExecutor) ExecuteDashboard(ctx context.Context, sessionId, dashboardName string, inputs map[string]any, workspace *workspace.Workspace, client db_common.Client) (err error) {
|
||||
var executionTree *DashboardExecutionTree
|
||||
defer func() {
|
||||
if err != nil && ctx.Err() != nil {
|
||||
err = ctx.Err()
|
||||
}
|
||||
// if there was an error executing, send an ExecutionError event
|
||||
if err != nil {
|
||||
errorEvent := &dashboardevents.ExecutionError{
|
||||
Error: err,
|
||||
Session: sessionId,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
workspace.PublishDashboardEvent(ctx, errorEvent)
|
||||
}
|
||||
}()
|
||||
|
||||
// reset any existing executions for this session
|
||||
e.CancelExecutionForSession(ctx, sessionId)
|
||||
|
||||
// now create a new execution
|
||||
executionTree, err = NewDashboardExecutionTree(dashboardName, sessionId, client, workspace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if inputs must be provided before execution (i.e. this is a batch dashboard execution),
|
||||
// verify all required inputs are provided
|
||||
if err = e.validateInputs(executionTree, inputs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add to execution map
|
||||
e.setExecution(sessionId, executionTree)
|
||||
|
||||
// if inputs have been passed, set them first
|
||||
if len(inputs) > 0 {
|
||||
executionTree.SetInputValues(inputs)
|
||||
}
|
||||
|
||||
go executionTree.Execute(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// if inputs must be provided before execution (i.e. this is a batch dashboard execution),
|
||||
// verify all required inputs are provided
|
||||
func (e *DashboardExecutor) validateInputs(executionTree *DashboardExecutionTree, inputs map[string]any) error {
|
||||
if e.interactive {
|
||||
// interactive dashboard execution - no need to validate
|
||||
return nil
|
||||
}
|
||||
var missingInputs []string
|
||||
for _, inputName := range executionTree.InputRuntimeDependencies() {
|
||||
if _, ok := inputs[inputName]; !ok {
|
||||
missingInputs = append(missingInputs, inputName)
|
||||
}
|
||||
}
|
||||
if missingCount := len(missingInputs); missingCount > 0 {
|
||||
return fmt.Errorf("%s '%s' must be provided using '--dashboard-input name=value'", utils.Pluralize("input", missingCount), strings.Join(missingInputs, ","))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *DashboardExecutor) LoadSnapshot(ctx context.Context, sessionId, snapshotName string, w *workspace.Workspace) (map[string]any, error) {
|
||||
// find snapshot path in workspace
|
||||
snapshotPath, ok := w.GetResourceMaps().Snapshots[snapshotName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("snapshot %s not found in %s (%s)", snapshotName, w.Mod.Name(), w.Path)
|
||||
}
|
||||
|
||||
if !filehelpers.FileExists(snapshotPath) {
|
||||
return nil, fmt.Errorf("snapshot %s not does not exist", snapshotPath)
|
||||
}
|
||||
|
||||
snapshotContent, err := os.ReadFile(snapshotPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// deserialize the snapshot as an interface map
|
||||
// we cannot deserialize into a SteampipeSnapshot struct
|
||||
// (without custom derserialisation code) as the Panels property is an interface
|
||||
snap := map[string]any{}
|
||||
|
||||
err = json.Unmarshal(snapshotContent, &snap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
func (e *DashboardExecutor) OnInputChanged(ctx context.Context, sessionId string, inputs map[string]any, changedInput string) error {
|
||||
// find the execution
|
||||
executionTree, found := e.executions[sessionId]
|
||||
if !found {
|
||||
return fmt.Errorf("no dashboard running for session %s", sessionId)
|
||||
}
|
||||
|
||||
// get the previous value of this input
|
||||
inputPrevValue := executionTree.inputValues[changedInput]
|
||||
// first see if any other inputs rely on the one which was just changed
|
||||
clearedInputs := e.clearDependentInputs(executionTree.Root, changedInput, inputs)
|
||||
if len(clearedInputs) > 0 {
|
||||
event := &dashboardevents.InputValuesCleared{
|
||||
ClearedInputs: clearedInputs,
|
||||
Session: executionTree.sessionId,
|
||||
ExecutionId: executionTree.id,
|
||||
}
|
||||
executionTree.workspace.PublishDashboardEvent(ctx, event)
|
||||
}
|
||||
// if there are any dependent inputs, set their value to nil and send an event to the UI
|
||||
// if the dashboard run is complete, just re-execute
|
||||
if executionTree.GetRunStatus().IsFinished() || inputPrevValue != nil {
|
||||
return e.ExecuteDashboard(
|
||||
ctx,
|
||||
sessionId,
|
||||
executionTree.dashboardName,
|
||||
inputs,
|
||||
executionTree.workspace,
|
||||
executionTree.client)
|
||||
}
|
||||
|
||||
// set the inputs
|
||||
executionTree.SetInputValues(inputs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *DashboardExecutor) clearDependentInputs(root dashboardtypes.DashboardTreeRun, changedInput string, inputs map[string]any) []string {
|
||||
dependentInputs := root.GetInputsDependingOn(changedInput)
|
||||
clearedInputs := dependentInputs
|
||||
if len(dependentInputs) > 0 {
|
||||
for _, inputName := range dependentInputs {
|
||||
if inputs[inputName] != nil {
|
||||
// clear the input value
|
||||
inputs[inputName] = nil
|
||||
childDependentInputs := e.clearDependentInputs(root, inputName, inputs)
|
||||
clearedInputs = append(clearedInputs, childDependentInputs...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return clearedInputs
|
||||
}
|
||||
|
||||
func (e *DashboardExecutor) CancelExecutionForSession(_ context.Context, sessionId string) {
|
||||
// find the execution
|
||||
executionTree, found := e.getExecution(sessionId)
|
||||
if !found {
|
||||
// nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
// cancel if in progress
|
||||
executionTree.Cancel()
|
||||
// remove from execution tree
|
||||
e.removeExecution(sessionId)
|
||||
}
|
||||
|
||||
// find the execution for the given session id
|
||||
func (e *DashboardExecutor) getExecution(sessionId string) (*DashboardExecutionTree, bool) {
|
||||
e.executionLock.Lock()
|
||||
defer e.executionLock.Unlock()
|
||||
|
||||
executionTree, found := e.executions[sessionId]
|
||||
return executionTree, found
|
||||
}
|
||||
|
||||
func (e *DashboardExecutor) setExecution(sessionId string, executionTree *DashboardExecutionTree) {
|
||||
e.executionLock.Lock()
|
||||
defer e.executionLock.Unlock()
|
||||
|
||||
e.executions[sessionId] = executionTree
|
||||
}
|
||||
|
||||
func (e *DashboardExecutor) removeExecution(sessionId string) {
|
||||
e.executionLock.Lock()
|
||||
defer e.executionLock.Unlock()
|
||||
|
||||
delete(e.executions, sessionId)
|
||||
}
|
||||
@@ -1,220 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/query/queryresult"
|
||||
"github.com/turbot/steampipe/pkg/statushooks"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// LeafRun is a struct representing the execution of a leaf dashboard node
|
||||
type LeafRun struct {
|
||||
// all RuntimeDependencySubscribers are also publishers as they have args/params
|
||||
RuntimeDependencySubscriberImpl
|
||||
Resource modconfig.DashboardLeafNode `json:"properties,omitempty"`
|
||||
|
||||
Data *dashboardtypes.LeafData `json:"data,omitempty"`
|
||||
TimingResult *queryresult.TimingResult `json:"-"`
|
||||
// function called when the run is complete
|
||||
// this property populated for 'with' runs
|
||||
onComplete func()
|
||||
}
|
||||
|
||||
func (r *LeafRun) AsTreeNode() *dashboardtypes.SnapshotTreeNode {
|
||||
return &dashboardtypes.SnapshotTreeNode{
|
||||
Name: r.Name,
|
||||
NodeType: r.NodeType,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLeafRun(resource modconfig.DashboardLeafNode, parent dashboardtypes.DashboardParent, executionTree *DashboardExecutionTree, opts ...LeafRunOption) (*LeafRun, error) {
|
||||
r := &LeafRun{
|
||||
Resource: resource,
|
||||
}
|
||||
|
||||
// create RuntimeDependencySubscriberImpl- this handles 'with' run creation and resolving runtime dependency resolution
|
||||
// (NOTE: we have to do this after creating run as we need to pass a ref to the run)
|
||||
r.RuntimeDependencySubscriberImpl = *NewRuntimeDependencySubscriber(resource, parent, r, executionTree)
|
||||
|
||||
// apply options AFTER calling NewRuntimeDependencySubscriber
|
||||
for _, opt := range opts {
|
||||
opt(r)
|
||||
}
|
||||
|
||||
err := r.initRuntimeDependencies(executionTree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.NodeType = resource.BlockType()
|
||||
|
||||
// if the node has no runtime dependencies, resolve the sql
|
||||
if !r.hasRuntimeDependencies() {
|
||||
if err := r.resolveSQLAndArgs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// add r into execution tree
|
||||
executionTree.runs[r.Name] = r
|
||||
|
||||
// if we have children (nodes/edges), create runs for them
|
||||
err = r.createChildRuns(executionTree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create buffered channel for children to report their completion
|
||||
r.createChildCompleteChan()
|
||||
|
||||
// populate the names of any withs we depend on
|
||||
r.setRuntimeDependencies()
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *LeafRun) createChildRuns(executionTree *DashboardExecutionTree) error {
|
||||
children := r.resource.GetChildren()
|
||||
if len(children) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.children = make([]dashboardtypes.DashboardTreeRun, len(children))
|
||||
var errors []error
|
||||
|
||||
for i, c := range children {
|
||||
var opts []LeafRunOption
|
||||
childRun, err := NewLeafRun(c.(modconfig.DashboardLeafNode), r, executionTree, opts...)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
r.children[i] = childRun
|
||||
}
|
||||
return error_helpers.CombineErrors(errors...)
|
||||
}
|
||||
|
||||
// Execute implements DashboardTreeRun
|
||||
func (r *LeafRun) Execute(ctx context.Context) {
|
||||
defer func() {
|
||||
// call our oncomplete is we have one
|
||||
// (this is used to collect 'with' data and propagate errors)
|
||||
if r.onComplete != nil {
|
||||
r.onComplete()
|
||||
}
|
||||
}()
|
||||
|
||||
// if there is nothing to do, return
|
||||
if r.Status.IsFinished() {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] LeafRun '%s' Execute()", r.resource.Name())
|
||||
|
||||
// to get here, we must be a query provider
|
||||
|
||||
// if we have children and with runs, start them asyncronously (they may block waiting for our runtime dependencies)
|
||||
r.executeChildrenAsync(ctx)
|
||||
|
||||
// start a goroutine to wait for children to complete
|
||||
doneChan := r.waitForChildrenAsync(ctx)
|
||||
|
||||
if err := r.evaluateRuntimeDependencies(ctx); err != nil {
|
||||
r.SetError(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// set status to running (this sends update event)
|
||||
// (if we have blocked children, this will be changed to blocked)
|
||||
r.setRunning(ctx)
|
||||
|
||||
// if we have sql to execute, do it now
|
||||
// (if we are only performing a base execution, do not run the query)
|
||||
if r.executeSQL != "" {
|
||||
if err := r.executeQuery(ctx); err != nil {
|
||||
r.SetError(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// wait for all children and withs
|
||||
err := <-doneChan
|
||||
if err == nil {
|
||||
log.Printf("[TRACE] %s children complete", r.resource.Name())
|
||||
|
||||
// aggregate our child data
|
||||
r.combineChildData()
|
||||
// set complete status on dashboard
|
||||
r.SetComplete(ctx)
|
||||
} else {
|
||||
log.Printf("[TRACE] %s children complete with error: %s", r.resource.Name(), err.Error())
|
||||
r.SetError(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// SetError implements DashboardTreeRun (override to set snapshothook status)
|
||||
func (r *LeafRun) SetError(ctx context.Context, err error) {
|
||||
// increment error count for snapshot hook
|
||||
statushooks.SnapshotError(ctx)
|
||||
r.DashboardTreeRunImpl.SetError(ctx, err)
|
||||
}
|
||||
|
||||
// SetComplete implements DashboardTreeRun (override to set snapshothook status
|
||||
func (r *LeafRun) SetComplete(ctx context.Context) {
|
||||
// call snapshot hooks with progress
|
||||
statushooks.UpdateSnapshotProgress(ctx, 1)
|
||||
|
||||
r.DashboardTreeRunImpl.SetComplete(ctx)
|
||||
}
|
||||
|
||||
// IsSnapshotPanel implements SnapshotPanel
|
||||
func (*LeafRun) IsSnapshotPanel() {}
|
||||
|
||||
// if this leaf run has a query or sql, execute it now
|
||||
func (r *LeafRun) executeQuery(ctx context.Context) error {
|
||||
log.Printf("[TRACE] LeafRun '%s' SQL resolved, executing", r.resource.Name())
|
||||
|
||||
queryResult, err := r.executionTree.client.ExecuteSync(ctx, r.executeSQL, r.Args...)
|
||||
if err != nil {
|
||||
log.Printf("[TRACE] LeafRun '%s' query failed: %s", r.resource.Name(), err.Error())
|
||||
return err
|
||||
|
||||
}
|
||||
log.Printf("[TRACE] LeafRun '%s' complete", r.resource.Name())
|
||||
|
||||
r.Data = dashboardtypes.NewLeafData(queryResult)
|
||||
r.TimingResult = queryResult.TimingResult
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeafRun) combineChildData() {
|
||||
// we either have children OR a query
|
||||
// if there are no children, do nothing
|
||||
if len(r.children) == 0 {
|
||||
return
|
||||
}
|
||||
// create empty data to populate
|
||||
r.Data = &dashboardtypes.LeafData{}
|
||||
// build map of columns for the schema
|
||||
schemaMap := make(map[string]*queryresult.ColumnDef)
|
||||
for _, c := range r.children {
|
||||
childLeafRun := c.(*LeafRun)
|
||||
data := childLeafRun.Data
|
||||
// if there is no data or this is a 'with', skip
|
||||
if data == nil || childLeafRun.resource.BlockType() == modconfig.BlockTypeWith {
|
||||
continue
|
||||
}
|
||||
for _, s := range data.Columns {
|
||||
if _, ok := schemaMap[s.Name]; !ok {
|
||||
schemaMap[s.Name] = s
|
||||
}
|
||||
}
|
||||
r.Data.Rows = append(r.Data.Rows, data.Rows...)
|
||||
}
|
||||
r.Data.Columns = maps.Values(schemaMap)
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
type LeafRunOption = func(target *LeafRun)
|
||||
|
||||
func setName(name string) LeafRunOption {
|
||||
return func(target *LeafRun) {
|
||||
target.Name = name
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/workspace"
|
||||
)
|
||||
|
||||
// GetReferencedVariables builds map of variables values containing only those mod variables which are referenced
|
||||
// NOTE: we refer to variables in dependency mods in the format which is valid for an SPVARS filer, i.e.
|
||||
// <mod>.<var-name>
|
||||
// the VariableValues map will contain these variables with the name format <mod>.var.<var-name>,
|
||||
// so we must convert the name
|
||||
func GetReferencedVariables(root dashboardtypes.DashboardTreeRun, w *workspace.Workspace) map[string]string {
|
||||
var referencedVariables = make(map[string]string)
|
||||
|
||||
addReferencedVars := func(refs []*modconfig.ResourceReference) {
|
||||
for _, ref := range refs {
|
||||
parts := strings.Split(ref.To, ".")
|
||||
if len(parts) == 2 && parts[0] == "var" {
|
||||
varName := parts[1]
|
||||
varValueName := varName
|
||||
// NOTE: if the ref is NOT for the workspace mod, then use the qualified variable name
|
||||
// (e.g. aws_insights.var.v1)
|
||||
if refMod := ref.GetMetadata().ModName; refMod != w.Mod.ShortName {
|
||||
varValueName = fmt.Sprintf("%s.var.%s", refMod, varName)
|
||||
varName = fmt.Sprintf("%s.%s", refMod, varName)
|
||||
}
|
||||
referencedVariables[varName] = w.VariableValues[varValueName]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch r := root.(type) {
|
||||
case *DashboardRun:
|
||||
//nolint:errcheck // we don't care about errors here, since the callback does not return an error
|
||||
r.dashboard.WalkResources(
|
||||
func(resource modconfig.HclResource) (bool, error) {
|
||||
if resourceWithMetadata, ok := resource.(modconfig.ResourceWithMetadata); ok {
|
||||
addReferencedVars(resourceWithMetadata.GetReferences())
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return referencedVariables
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
type RuntimeDependencyPublisher interface {
|
||||
dashboardtypes.DashboardTreeRun
|
||||
ProvidesRuntimeDependency(dependency *modconfig.RuntimeDependency) bool
|
||||
SubscribeToRuntimeDependency(name string, opts ...RuntimeDependencyPublishOption) chan *dashboardtypes.ResolvedRuntimeDependencyValue
|
||||
PublishRuntimeDependencyValue(name string, result *dashboardtypes.ResolvedRuntimeDependencyValue)
|
||||
GetWithRuns() map[string]*LeafRun
|
||||
}
|
||||
@@ -1,294 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
type runtimeDependencyPublisherImpl struct {
|
||||
DashboardParentImpl
|
||||
Args []any `json:"args,omitempty"`
|
||||
Params []*modconfig.ParamDef `json:"params,omitempty"`
|
||||
subscriptions map[string][]*RuntimeDependencyPublishTarget
|
||||
withValueMutex *sync.Mutex
|
||||
withRuns map[string]*LeafRun
|
||||
inputs map[string]*modconfig.DashboardInput
|
||||
}
|
||||
|
||||
func newRuntimeDependencyPublisherImpl(resource modconfig.DashboardLeafNode, parent dashboardtypes.DashboardParent, run dashboardtypes.DashboardTreeRun, executionTree *DashboardExecutionTree) runtimeDependencyPublisherImpl {
|
||||
b := runtimeDependencyPublisherImpl{
|
||||
DashboardParentImpl: newDashboardParentImpl(resource, parent, run, executionTree),
|
||||
subscriptions: make(map[string][]*RuntimeDependencyPublishTarget),
|
||||
inputs: make(map[string]*modconfig.DashboardInput),
|
||||
withRuns: make(map[string]*LeafRun),
|
||||
withValueMutex: new(sync.Mutex),
|
||||
}
|
||||
// if the resource is a query provider, get params and set status
|
||||
if queryProvider, ok := resource.(modconfig.QueryProvider); ok {
|
||||
// get params
|
||||
b.Params = queryProvider.GetParams()
|
||||
if queryProvider.RequiresExecution(queryProvider) || len(queryProvider.GetChildren()) > 0 {
|
||||
b.Status = dashboardtypes.RunInitialized
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) Initialise(context.Context) {}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) Execute(context.Context) {
|
||||
panic("must be implemented by child struct")
|
||||
}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) AsTreeNode() *dashboardtypes.SnapshotTreeNode {
|
||||
panic("must be implemented by child struct")
|
||||
}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) GetName() string {
|
||||
return p.Name
|
||||
}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) ProvidesRuntimeDependency(dependency *modconfig.RuntimeDependency) bool {
|
||||
resourceName := dependency.SourceResourceName()
|
||||
switch dependency.PropertyPath.ItemType {
|
||||
case modconfig.BlockTypeWith:
|
||||
// we cannot use withRuns here as if withs have dependencies on each other,
|
||||
// this function may be called before all runs have been added
|
||||
// instead, look directly at the underlying resource withs
|
||||
if wp, ok := p.resource.(modconfig.WithProvider); ok {
|
||||
for _, w := range wp.GetWiths() {
|
||||
if w.UnqualifiedName == resourceName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
case modconfig.BlockTypeInput:
|
||||
return p.inputs[resourceName] != nil
|
||||
case modconfig.BlockTypeParam:
|
||||
for _, p := range p.Params {
|
||||
// check short name not resource name (which is unqualified name)
|
||||
if p.ShortName == dependency.PropertyPath.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) SubscribeToRuntimeDependency(name string, opts ...RuntimeDependencyPublishOption) chan *dashboardtypes.ResolvedRuntimeDependencyValue {
|
||||
target := &RuntimeDependencyPublishTarget{
|
||||
// make a channel (buffer to avoid potential sync issues)
|
||||
channel: make(chan *dashboardtypes.ResolvedRuntimeDependencyValue, 1),
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(target)
|
||||
}
|
||||
log.Printf("[TRACE] SubscribeToRuntimeDependency %s", name)
|
||||
|
||||
// subscribe, passing a function which invokes getWithValue to resolve the required with value
|
||||
p.subscriptions[name] = append(p.subscriptions[name], target)
|
||||
return target.channel
|
||||
}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) PublishRuntimeDependencyValue(name string, result *dashboardtypes.ResolvedRuntimeDependencyValue) {
|
||||
for _, target := range p.subscriptions[name] {
|
||||
if target.transform != nil {
|
||||
// careful not to mutate result which may be reused
|
||||
target.channel <- target.transform(result)
|
||||
} else {
|
||||
target.channel <- result
|
||||
}
|
||||
close(target.channel)
|
||||
}
|
||||
// clear subscriptions
|
||||
delete(p.subscriptions, name)
|
||||
}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) GetWithRuns() map[string]*LeafRun {
|
||||
return p.withRuns
|
||||
}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) initWiths() error {
|
||||
// if the resource is a runtime dependency provider, create with runs and resolve dependencies
|
||||
wp, ok := p.resource.(modconfig.WithProvider)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
// if we have with blocks, create runs for them
|
||||
// BEFORE creating child runs, and before adding runtime dependencies
|
||||
err := p.createWithRuns(wp.GetWiths(), p.executionTree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getWithValue accepts the raw with result (dashboardtypes.LeafData) and the property path, and extracts the appropriate data
|
||||
func (p *runtimeDependencyPublisherImpl) getWithValue(name string, result *dashboardtypes.LeafData, path *modconfig.ParsedPropertyPath) (any, error) {
|
||||
// get the set of rows which will be used ot generate the return value
|
||||
rows := result.Rows
|
||||
/*
|
||||
You can
|
||||
reference the whole table with:
|
||||
with.stuff1
|
||||
this is equivalent to:
|
||||
with.stuff1.rows
|
||||
and
|
||||
with.stuff1.rows[*]
|
||||
|
||||
Rows is a list, and you can index it to get a single row:
|
||||
with.stuff1.rows[0]
|
||||
or splat it to get all rows:
|
||||
with.stuff1.rows[*]
|
||||
Each row, in turn, contains all the columns, so you can get a single column of a single row:
|
||||
with.stuff1.rows[0].a
|
||||
if you splat the row, then you can get an array of a single column from all rows. This would be passed to sql as an array:
|
||||
with.stuff1.rows[*].a
|
||||
*/
|
||||
|
||||
// with.stuff1 -> PropertyPath will be ""
|
||||
// with.stuff1.rows -> PropertyPath will be "rows"
|
||||
// with.stuff1.rows[*] -> PropertyPath will be "rows.*"
|
||||
// with.stuff1.rows[0] -> PropertyPath will be "rows.0"
|
||||
// with.stuff1.rows[0].a -> PropertyPath will be "rows.0.a"
|
||||
const rowsSegment = 0
|
||||
const rowsIdxSegment = 1
|
||||
const columnSegment = 2
|
||||
|
||||
// second path section MUST be "rows"
|
||||
if len(path.PropertyPath) > rowsSegment && path.PropertyPath[rowsSegment] != "rows" || len(path.PropertyPath) > (columnSegment+1) {
|
||||
return nil, fmt.Errorf("reference to with '%s' has invalid property path '%s'", name, path.Original)
|
||||
}
|
||||
|
||||
// if no row is specified assume all
|
||||
rowIdxStr := "*"
|
||||
if len(path.PropertyPath) > rowsIdxSegment {
|
||||
// so there is 3rd part - this will be the row idx (or '*')
|
||||
rowIdxStr = path.PropertyPath[rowsIdxSegment]
|
||||
}
|
||||
var column string
|
||||
|
||||
// is a column specified?
|
||||
if len(path.PropertyPath) > columnSegment {
|
||||
column = path.PropertyPath[columnSegment]
|
||||
} else {
|
||||
if len(result.Columns) > 1 {
|
||||
// we do not support returning all columns (yet
|
||||
return nil, fmt.Errorf("reference to with '%s' is returning more than one column - not supported", name)
|
||||
}
|
||||
column = result.Columns[0].Name
|
||||
}
|
||||
|
||||
if rowIdxStr == "*" {
|
||||
return columnValuesFromRows(column, rows)
|
||||
}
|
||||
|
||||
rowIdx, err := strconv.Atoi(rowIdxStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reference to with '%s' has invalid property path '%s' - cannot parse row idx '%s'", name, path.Original, rowIdxStr)
|
||||
}
|
||||
|
||||
// do we have the requested row
|
||||
if rowCount := len(rows); rowIdx >= rowCount {
|
||||
return nil, fmt.Errorf("reference to with '%s' has invalid row index '%d' - %d %s were returned", name, rowIdx, rowCount, utils.Pluralize("row", rowCount))
|
||||
}
|
||||
// so we are returning a single row
|
||||
row := rows[rowIdx]
|
||||
return row[column], nil
|
||||
}
|
||||
|
||||
func columnValuesFromRows(column string, rows []map[string]any) (any, error) {
|
||||
if column == "" {
|
||||
return nil, fmt.Errorf("columnValuesFromRows failed - no column specified")
|
||||
}
|
||||
var res = make([]any, len(rows))
|
||||
for i, row := range rows {
|
||||
var ok bool
|
||||
res[i], ok = row[column]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("column %s does not exist", column)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) setWithValue(w *LeafRun) {
|
||||
p.withValueMutex.Lock()
|
||||
defer p.withValueMutex.Unlock()
|
||||
|
||||
name := w.resource.GetUnqualifiedName()
|
||||
// if there was an error, w.Data will be nil and w.error will be non-nil
|
||||
result := &dashboardtypes.ResolvedRuntimeDependencyValue{Error: w.err}
|
||||
|
||||
if w.err == nil {
|
||||
populateData(w.Data, result)
|
||||
}
|
||||
p.PublishRuntimeDependencyValue(name, result)
|
||||
}
|
||||
|
||||
func populateData(withData *dashboardtypes.LeafData, result *dashboardtypes.ResolvedRuntimeDependencyValue) {
|
||||
result.Value = withData
|
||||
// TACTICAL - is there are any JSON columns convert them back to a JSON string
|
||||
var jsonColumns []string
|
||||
for _, c := range withData.Columns {
|
||||
if c.DataType == "JSONB" || c.DataType == "JSON" {
|
||||
jsonColumns = append(jsonColumns, c.Name)
|
||||
}
|
||||
}
|
||||
// now convert any json values into a json string
|
||||
for _, c := range jsonColumns {
|
||||
for _, row := range withData.Rows {
|
||||
jsonBytes, err := json.Marshal(row[c])
|
||||
if err != nil {
|
||||
// publish result with the error
|
||||
result.Error = err
|
||||
result.Value = nil
|
||||
return
|
||||
}
|
||||
row[c] = string(jsonBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *runtimeDependencyPublisherImpl) createWithRuns(withs []*modconfig.DashboardWith, executionTree *DashboardExecutionTree) error {
|
||||
for _, w := range withs {
|
||||
// NOTE: set the name of the run to be the scoped name
|
||||
withRunName := fmt.Sprintf("%s.%s", p.GetName(), w.UnqualifiedName)
|
||||
withRun, err := NewLeafRun(w, p, executionTree, setName(withRunName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// set an onComplete function to populate 'with' data
|
||||
withRun.onComplete = func() { p.setWithValue(withRun) }
|
||||
|
||||
p.withRuns[w.UnqualifiedName] = withRun
|
||||
p.children = append(p.children, withRun)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// called when the args are resolved - if anyone is subscribing to the args value, publish
|
||||
func (p *runtimeDependencyPublisherImpl) argsResolved(args []any) {
|
||||
// use params to get param names for each arg and then look of subscriber
|
||||
for i, param := range p.Params {
|
||||
if i == len(args) {
|
||||
return
|
||||
}
|
||||
// do we have a subscription for this param
|
||||
if _, ok := p.subscriptions[param.UnqualifiedName]; ok {
|
||||
p.PublishRuntimeDependencyValue(param.UnqualifiedName, &dashboardtypes.ResolvedRuntimeDependencyValue{Value: args[i]})
|
||||
}
|
||||
}
|
||||
log.Printf("[TRACE] %s: argsResolved", p.Name)
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
)
|
||||
|
||||
type RuntimeDependencyPublishOption = func(target *RuntimeDependencyPublishTarget)
|
||||
|
||||
func WithTransform(transform func(*dashboardtypes.ResolvedRuntimeDependencyValue) *dashboardtypes.ResolvedRuntimeDependencyValue) RuntimeDependencyPublishOption {
|
||||
return func(c *RuntimeDependencyPublishTarget) {
|
||||
c.transform = transform
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
)
|
||||
|
||||
type RuntimeDependencyPublishTarget struct {
|
||||
transform func(*dashboardtypes.ResolvedRuntimeDependencyValue) *dashboardtypes.ResolvedRuntimeDependencyValue
|
||||
channel chan *dashboardtypes.ResolvedRuntimeDependencyValue
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
type RuntimeDependencySubscriber interface {
|
||||
RuntimeDependencyPublisher
|
||||
GetBaseDependencySubscriber() RuntimeDependencySubscriber
|
||||
}
|
||||
@@ -1,461 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/turbot/go-kit/helpers"
|
||||
typehelpers "github.com/turbot/go-kit/types"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
type RuntimeDependencySubscriberImpl struct {
|
||||
// all RuntimeDependencySubscribers are also publishers as they have args/params
|
||||
runtimeDependencyPublisherImpl
|
||||
// if the underlying resource has a base resource, create a RuntimeDependencySubscriberImpl instance to handle
|
||||
// generation and publication of runtime depdencies from the base resource
|
||||
baseDependencySubscriber *RuntimeDependencySubscriberImpl
|
||||
// map of runtime dependencies, keyed by dependency long name
|
||||
runtimeDependencies map[string]*dashboardtypes.ResolvedRuntimeDependency
|
||||
RawSQL string `json:"sql,omitempty"`
|
||||
executeSQL string
|
||||
// a list of the (scoped) names of any runtime dependencies that we rely on
|
||||
RuntimeDependencyNames []string `json:"dependencies,omitempty"`
|
||||
}
|
||||
|
||||
func NewRuntimeDependencySubscriber(resource modconfig.DashboardLeafNode, parent dashboardtypes.DashboardParent, run dashboardtypes.DashboardTreeRun, executionTree *DashboardExecutionTree) *RuntimeDependencySubscriberImpl {
|
||||
b := &RuntimeDependencySubscriberImpl{
|
||||
runtimeDependencies: make(map[string]*dashboardtypes.ResolvedRuntimeDependency),
|
||||
}
|
||||
|
||||
// create RuntimeDependencyPublisherImpl
|
||||
// (we must create after creating the run as iut requires a ref to the run)
|
||||
b.runtimeDependencyPublisherImpl = newRuntimeDependencyPublisherImpl(resource, parent, run, executionTree)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// GetBaseDependencySubscriber implements RuntimeDependencySubscriber
|
||||
func (s *RuntimeDependencySubscriberImpl) GetBaseDependencySubscriber() RuntimeDependencySubscriber {
|
||||
return s.baseDependencySubscriber
|
||||
}
|
||||
|
||||
// if the resource is a runtime dependency provider, create with runs and resolve dependencies
|
||||
func (s *RuntimeDependencySubscriberImpl) initRuntimeDependencies(executionTree *DashboardExecutionTree) error {
|
||||
if _, ok := s.resource.(modconfig.RuntimeDependencyProvider); !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if our underlying resource has a base which has runtime dependencies,
|
||||
// create a RuntimeDependencySubscriberImpl for it
|
||||
if err := s.initBaseRuntimeDependencySubscriber(executionTree); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// call into publisher to start any with runs
|
||||
if err := s.runtimeDependencyPublisherImpl.initWiths(); err != nil {
|
||||
return err
|
||||
}
|
||||
// resolve any runtime dependencies
|
||||
return s.resolveRuntimeDependencies()
|
||||
}
|
||||
|
||||
func (s *RuntimeDependencySubscriberImpl) initBaseRuntimeDependencySubscriber(executionTree *DashboardExecutionTree) error {
|
||||
if base := s.resource.(modconfig.HclResource).GetBase(); base != nil {
|
||||
if _, ok := base.(modconfig.RuntimeDependencyProvider); ok {
|
||||
// create base dependency subscriber
|
||||
// pass ourselves as 'run'
|
||||
// - this is only used when sending update events, which will not happen for the baseDependencySubscriber
|
||||
s.baseDependencySubscriber = NewRuntimeDependencySubscriber(base.(modconfig.DashboardLeafNode), nil, s, executionTree)
|
||||
err := s.baseDependencySubscriber.initRuntimeDependencies(executionTree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create buffered channel for base with to report their completion
|
||||
s.baseDependencySubscriber.createChildCompleteChan()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// if this node has runtime dependencies, find the publisher of the dependency and create a dashboardtypes.ResolvedRuntimeDependency
|
||||
// which we use to resolve the values
|
||||
func (s *RuntimeDependencySubscriberImpl) resolveRuntimeDependencies() error {
|
||||
rdp, ok := s.resource.(modconfig.RuntimeDependencyProvider)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
runtimeDependencies := rdp.GetRuntimeDependencies()
|
||||
|
||||
for n, d := range runtimeDependencies {
|
||||
// find a runtime dependency publisher who can provider this runtime dependency
|
||||
publisher := s.findRuntimeDependencyPublisher(d)
|
||||
if publisher == nil {
|
||||
// should never happen as validation should have caught this
|
||||
return fmt.Errorf("cannot resolve runtime dependency %s", d.String())
|
||||
}
|
||||
|
||||
// read name and dep into local loop vars to ensure correct value used when transform func is invoked
|
||||
name := n
|
||||
dep := d
|
||||
|
||||
// determine the function to use to retrieve the runtime dependency value
|
||||
var opts []RuntimeDependencyPublishOption
|
||||
|
||||
switch dep.PropertyPath.ItemType {
|
||||
case modconfig.BlockTypeWith:
|
||||
// set a transform function to extract the requested with data
|
||||
opts = append(opts, WithTransform(func(resolvedVal *dashboardtypes.ResolvedRuntimeDependencyValue) *dashboardtypes.ResolvedRuntimeDependencyValue {
|
||||
transformedResolvedVal := &dashboardtypes.ResolvedRuntimeDependencyValue{Error: resolvedVal.Error}
|
||||
if resolvedVal.Error == nil {
|
||||
// the runtime dependency value for a 'with' is *dashboardtypes.LeafData
|
||||
withValue, err := s.getWithValue(name, resolvedVal.Value.(*dashboardtypes.LeafData), dep.PropertyPath)
|
||||
if err != nil {
|
||||
transformedResolvedVal.Error = fmt.Errorf("failed to resolve with value '%s' for %s: %s", dep.PropertyPath.Original, name, err.Error())
|
||||
} else {
|
||||
transformedResolvedVal.Value = withValue
|
||||
}
|
||||
}
|
||||
return transformedResolvedVal
|
||||
}))
|
||||
}
|
||||
// subscribe, passing a function which invokes getWithValue to resolve the required with value
|
||||
valueChannel := publisher.SubscribeToRuntimeDependency(d.SourceResourceName(), opts...)
|
||||
|
||||
publisherName := publisher.GetName()
|
||||
s.runtimeDependencies[name] = dashboardtypes.NewResolvedRuntimeDependency(dep, valueChannel, publisherName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *RuntimeDependencySubscriberImpl) findRuntimeDependencyPublisher(runtimeDependency *modconfig.RuntimeDependency) RuntimeDependencyPublisher {
|
||||
// the runtime dependency publisher is either the root dashboard run,
|
||||
// or if this resource (or in case of a node/edge, the resource parent) has a base,
|
||||
// the baseDependencySubscriber for that base
|
||||
var subscriber RuntimeDependencySubscriber = s
|
||||
if s.NodeType == modconfig.BlockTypeNode || s.NodeType == modconfig.BlockTypeEdge {
|
||||
subscriber = s.parent.(RuntimeDependencySubscriber)
|
||||
}
|
||||
baseSubscriber := subscriber.GetBaseDependencySubscriber()
|
||||
|
||||
// "if I have a base with runtime dependencies, those dependencies must be provided BY THE BASE"
|
||||
// check the provider property on the runtime dependency
|
||||
// - if the matches the underlying resource for the baseDependencySubscriber,
|
||||
// then baseDependencySubscriber _should_ be the dependency publisher
|
||||
if !helpers.IsNil(baseSubscriber) && runtimeDependency.Provider == baseSubscriber.GetResource() {
|
||||
if baseSubscriber.ProvidesRuntimeDependency(runtimeDependency) {
|
||||
return baseSubscriber
|
||||
}
|
||||
|
||||
// unexpected
|
||||
log.Printf("[WARN] dependency %s has a dependency provider matching the base resource %s but the BaseDependencySubscriber does not provider the runtime dependency",
|
||||
runtimeDependency.String(), baseSubscriber.GetName())
|
||||
return nil
|
||||
}
|
||||
|
||||
// "if I am a base resource with runtime dependencies, I provide my own dependencies"
|
||||
// see if we can satisfy the dependency (this would occur when initialising the baseDependencySubscriber)
|
||||
if s.ProvidesRuntimeDependency(runtimeDependency) {
|
||||
return s
|
||||
}
|
||||
|
||||
// "if I am a nested resource, my dashboard provides my dependencies"
|
||||
// otherwise the dashboard run must be the publisher
|
||||
dashboardRun := s.executionTree.runs[s.DashboardName].(RuntimeDependencyPublisher)
|
||||
if dashboardRun.ProvidesRuntimeDependency(runtimeDependency) {
|
||||
return dashboardRun
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *RuntimeDependencySubscriberImpl) evaluateRuntimeDependencies(ctx context.Context) error {
|
||||
log.Printf("[TRACE] %s: evaluateRuntimeDependencies", s.Name)
|
||||
// now wait for any runtime dependencies then resolve args and params
|
||||
// (it is possible to have params but no sql)
|
||||
if s.hasRuntimeDependencies() {
|
||||
// if there are any unresolved runtime dependencies, wait for them
|
||||
if err := s.waitForRuntimeDependencies(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("[TRACE] %s: runtime dependencies availablem resolving sql and args", s.Name)
|
||||
|
||||
// ok now we have runtime dependencies, we can resolve the query
|
||||
if err := s.resolveSQLAndArgs(); err != nil {
|
||||
return err
|
||||
}
|
||||
// call the argsResolved callback in case anyone is waiting for the args
|
||||
s.argsResolved(s.Args)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *RuntimeDependencySubscriberImpl) waitForRuntimeDependencies(ctx context.Context) error {
|
||||
log.Printf("[TRACE] %s: waitForRuntimeDependencies", s.Name)
|
||||
|
||||
if !s.hasRuntimeDependencies() {
|
||||
log.Printf("[TRACE] %s: no runtime dependencies", s.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// wait for base dependencies if we have any
|
||||
if s.baseDependencySubscriber != nil {
|
||||
log.Printf("[TRACE] %s: calling baseDependencySubscriber.waitForRuntimeDependencies", s.Name)
|
||||
if err := s.baseDependencySubscriber.waitForRuntimeDependencies(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] %s: checking whether all depdencies are resolved", s.Name)
|
||||
|
||||
allRuntimeDepsResolved := true
|
||||
for _, dep := range s.runtimeDependencies {
|
||||
if !dep.IsResolved() {
|
||||
allRuntimeDepsResolved = false
|
||||
log.Printf("[TRACE] %s: dependency %s is NOT resolved", s.Name, dep.Dependency.String())
|
||||
}
|
||||
}
|
||||
if allRuntimeDepsResolved {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] %s: BLOCKED", s.Name)
|
||||
// set status to blocked
|
||||
s.setStatus(ctx, dashboardtypes.RunBlocked)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var errChan = make(chan error)
|
||||
var doneChan = make(chan struct{})
|
||||
for _, r := range s.runtimeDependencies {
|
||||
if !r.IsResolved() {
|
||||
// make copy of loop var for goroutine
|
||||
resolvedDependency := r
|
||||
log.Printf("[TRACE] %s: wait for %s", s.Name, resolvedDependency.Dependency.String())
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// block until the dependency is available
|
||||
err := resolvedDependency.Resolve()
|
||||
log.Printf("[TRACE] %s: Resolve returned for %s", s.Name, resolvedDependency.Dependency.String())
|
||||
if err != nil {
|
||||
log.Printf("[TRACE] %s: Resolve for %s returned error:L %s", s.Name, resolvedDependency.Dependency.String(), err.Error())
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
log.Printf("[TRACE] %s: goroutine waiting for all runtime deps to be available", s.Name)
|
||||
wg.Wait()
|
||||
close(doneChan)
|
||||
}()
|
||||
|
||||
var errors []error
|
||||
|
||||
wait_loop:
|
||||
for {
|
||||
select {
|
||||
case err := <-errChan:
|
||||
errors = append(errors, err)
|
||||
case <-doneChan:
|
||||
break wait_loop
|
||||
case <-ctx.Done():
|
||||
errors = append(errors, ctx.Err())
|
||||
break wait_loop
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] %s: all runtime dependencies ready", s.resource.Name())
|
||||
return error_helpers.CombineErrors(errors...)
|
||||
}
|
||||
|
||||
func (s *RuntimeDependencySubscriberImpl) findRuntimeDependenciesForParentProperty(parentProperty string) []*dashboardtypes.ResolvedRuntimeDependency {
|
||||
var res []*dashboardtypes.ResolvedRuntimeDependency
|
||||
for _, dep := range s.runtimeDependencies {
|
||||
if dep.Dependency.ParentPropertyName == parentProperty {
|
||||
res = append(res, dep)
|
||||
}
|
||||
}
|
||||
// also look at base subscriber
|
||||
if s.baseDependencySubscriber != nil {
|
||||
for _, dep := range s.baseDependencySubscriber.runtimeDependencies {
|
||||
if dep.Dependency.ParentPropertyName == parentProperty {
|
||||
res = append(res, dep)
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *RuntimeDependencySubscriberImpl) findRuntimeDependencyForParentProperty(parentProperty string) *dashboardtypes.ResolvedRuntimeDependency {
|
||||
res := s.findRuntimeDependenciesForParentProperty(parentProperty)
|
||||
if len(res) > 1 {
|
||||
panic(fmt.Sprintf("findRuntimeDependencyForParentProperty for %s, parent property %s, returned more that 1 result", s.Name, parentProperty))
|
||||
}
|
||||
if res == nil {
|
||||
return nil
|
||||
}
|
||||
// return first result
|
||||
return res[0]
|
||||
}
|
||||
|
||||
// resolve the sql for this leaf run into the source sql and resolved args
|
||||
func (s *RuntimeDependencySubscriberImpl) resolveSQLAndArgs() error {
|
||||
log.Printf("[TRACE] %s: resolveSQLAndArgs", s.resource.Name())
|
||||
queryProvider, ok := s.resource.(modconfig.QueryProvider)
|
||||
if !ok {
|
||||
// not a query provider - nothing to do
|
||||
return nil
|
||||
}
|
||||
|
||||
// convert arg runtime dependencies into arg map
|
||||
runtimeArgs, err := s.buildRuntimeDependencyArgs()
|
||||
if err != nil {
|
||||
log.Printf("[TRACE] %s: buildRuntimeDependencyArgs failed: %s", s.resource.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// now if any param defaults had runtime dependencies, populate them
|
||||
s.populateParamDefaults(queryProvider)
|
||||
|
||||
log.Printf("[TRACE] %s: built runtime args: %v", s.resource.Name(), runtimeArgs)
|
||||
|
||||
// does this leaf run have any SQL to execute?
|
||||
if queryProvider.RequiresExecution(queryProvider) {
|
||||
log.Printf("[TRACE] ResolveArgsFromQueryProvider for %s", queryProvider.Name())
|
||||
resolvedQuery, err := s.executionTree.workspace.ResolveQueryFromQueryProvider(queryProvider, runtimeArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.RawSQL = resolvedQuery.RawSQL
|
||||
s.executeSQL = resolvedQuery.ExecuteSQL
|
||||
s.Args = resolvedQuery.Args
|
||||
} else {
|
||||
// otherwise just resolve the args
|
||||
|
||||
// merge the base args with the runtime args
|
||||
runtimeArgs, err = modconfig.MergeArgs(queryProvider, runtimeArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args, err := modconfig.ResolveArgs(queryProvider, runtimeArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Args = args
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *RuntimeDependencySubscriberImpl) populateParamDefaults(provider modconfig.QueryProvider) {
|
||||
paramDefs := provider.GetParams()
|
||||
for _, paramDef := range paramDefs {
|
||||
if dep := s.findRuntimeDependencyForParentProperty(paramDef.UnqualifiedName); dep != nil {
|
||||
// assuming the default property is the target, set the default
|
||||
if typehelpers.SafeString(dep.Dependency.TargetPropertyName) == "default" {
|
||||
//nolint:errcheck // the only reason where SetDefault could fail is if `dep.Value` cannot be marshalled as a JSON string
|
||||
paramDef.SetDefault(dep.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// convert runtime dependencies into arg map
|
||||
func (s *RuntimeDependencySubscriberImpl) buildRuntimeDependencyArgs() (*modconfig.QueryArgs, error) {
|
||||
res := modconfig.NewQueryArgs()
|
||||
|
||||
log.Printf("[TRACE] %s: buildRuntimeDependencyArgs - %d runtime dependencies", s.resource.Name(), len(s.runtimeDependencies))
|
||||
|
||||
// if the runtime dependencies use position args, get the max index and ensure the args array is large enough
|
||||
maxArgIndex := -1
|
||||
// build list of all args runtime dependencies
|
||||
argRuntimeDependencies := s.findRuntimeDependenciesForParentProperty(modconfig.AttributeArgs)
|
||||
|
||||
for _, dep := range argRuntimeDependencies {
|
||||
if dep.Dependency.TargetPropertyIndex != nil && *dep.Dependency.TargetPropertyIndex > maxArgIndex {
|
||||
maxArgIndex = *dep.Dependency.TargetPropertyIndex
|
||||
}
|
||||
}
|
||||
if maxArgIndex != -1 {
|
||||
res.ArgList = make([]*string, maxArgIndex+1)
|
||||
}
|
||||
|
||||
// now set the arg values
|
||||
for _, dep := range argRuntimeDependencies {
|
||||
if dep.Dependency.TargetPropertyName != nil {
|
||||
err := res.SetNamedArgVal(dep.Value, *dep.Dependency.TargetPropertyName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
if dep.Dependency.TargetPropertyIndex == nil {
|
||||
return nil, fmt.Errorf("invalid runtime dependency - both ArgName and ArgIndex are nil ")
|
||||
}
|
||||
err := res.SetPositionalArgVal(dep.Value, *dep.Dependency.TargetPropertyIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// populate the list of runtime dependencies that this run depends on
|
||||
func (s *RuntimeDependencySubscriberImpl) setRuntimeDependencies() {
|
||||
names := make(map[string]struct{}, len(s.runtimeDependencies))
|
||||
for _, d := range s.runtimeDependencies {
|
||||
// add to DependencyWiths using ScopedName, i.e. <parent FullName>.<with UnqualifiedName>.
|
||||
// we do this as there may be a with from a base resource with a clashing with name
|
||||
// NOTE: this must be consistent with the naming in RuntimeDependencyPublisherImpl.createWithRuns
|
||||
names[d.ScopedName()] = struct{}{}
|
||||
}
|
||||
|
||||
// get base runtime dependencies (if any)
|
||||
if s.baseDependencySubscriber != nil {
|
||||
s.baseDependencySubscriber.setRuntimeDependencies()
|
||||
s.RuntimeDependencyNames = append(s.RuntimeDependencyNames, s.baseDependencySubscriber.RuntimeDependencyNames...)
|
||||
}
|
||||
|
||||
s.RuntimeDependencyNames = maps.Keys(names)
|
||||
}
|
||||
|
||||
func (s *RuntimeDependencySubscriberImpl) hasRuntimeDependencies() bool {
|
||||
return len(s.runtimeDependencies)+len(s.baseRuntimeDependencies()) > 0
|
||||
}
|
||||
|
||||
func (s *RuntimeDependencySubscriberImpl) baseRuntimeDependencies() map[string]*dashboardtypes.ResolvedRuntimeDependency {
|
||||
if s.baseDependencySubscriber == nil {
|
||||
return map[string]*dashboardtypes.ResolvedRuntimeDependency{}
|
||||
}
|
||||
return s.baseDependencySubscriber.runtimeDependencies
|
||||
}
|
||||
|
||||
// override DashboardParentImpl.executeChildrenAsync to also execute 'withs' of our baseRun
|
||||
func (s *RuntimeDependencySubscriberImpl) executeChildrenAsync(ctx context.Context) {
|
||||
// if we have a baseDependencySubscriber, execute it
|
||||
if s.baseDependencySubscriber != nil {
|
||||
go s.baseDependencySubscriber.executeWithsAsync(ctx)
|
||||
}
|
||||
|
||||
// if this leaf run has children (including with runs) execute them asynchronously
|
||||
|
||||
// set RuntimeDependenciesOnly if needed
|
||||
s.DashboardParentImpl.executeChildrenAsync(ctx)
|
||||
}
|
||||
|
||||
// called when the args are resolved - if anyone is subscribing to the args value, publish
|
||||
func (s *RuntimeDependencySubscriberImpl) argsResolved(args []any) {
|
||||
if s.baseDependencySubscriber != nil {
|
||||
s.baseDependencySubscriber.argsResolved(args)
|
||||
}
|
||||
s.runtimeDependencyPublisherImpl.argsResolved(args)
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardevents"
|
||||
"github.com/turbot/steampipe/pkg/dashboard/dashboardtypes"
|
||||
"github.com/turbot/steampipe/pkg/initialisation"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
)
|
||||
|
||||
func GenerateSnapshot(ctx context.Context, target string, initData *initialisation.InitData, inputs map[string]any) (snapshot *dashboardtypes.SteampipeSnapshot, err error) {
|
||||
w := initData.Workspace
|
||||
|
||||
parsedName, err := modconfig.ParseResourceName(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// no session for manual execution
|
||||
sessionId := ""
|
||||
errorChannel := make(chan error)
|
||||
resultChannel := make(chan *dashboardtypes.SteampipeSnapshot)
|
||||
dashboardEventHandler := func(ctx context.Context, event dashboardevents.DashboardEvent) {
|
||||
handleDashboardEvent(ctx, event, resultChannel, errorChannel)
|
||||
}
|
||||
w.RegisterDashboardEventHandler(ctx, dashboardEventHandler)
|
||||
// clear event handlers again in case another snapshot will be generated in this run
|
||||
defer w.UnregisterDashboardEventHandlers()
|
||||
|
||||
// all runtime dependencies must be resolved before execution (i.e. inputs must be passed in)
|
||||
Executor.interactive = false
|
||||
if err := Executor.ExecuteDashboard(ctx, sessionId, target, inputs, w, initData.Client); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
select {
|
||||
case err = <-errorChannel:
|
||||
return nil, err
|
||||
case snapshot = <-resultChannel:
|
||||
// set the filename root of the snapshot
|
||||
fileRootName, err := parsedName.ToFullNameWithMod(w.Mod.ShortName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snapshot.FileNameRoot = fileRootName
|
||||
// return the context error (if any) to ensure we respect cancellation
|
||||
return snapshot, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func handleDashboardEvent(_ context.Context, event dashboardevents.DashboardEvent, resultChannel chan *dashboardtypes.SteampipeSnapshot, errorChannel chan error) {
|
||||
switch e := event.(type) {
|
||||
case *dashboardevents.ExecutionError:
|
||||
errorChannel <- e.Error
|
||||
case *dashboardevents.ExecutionComplete:
|
||||
log.Println("[TRACE] execution complete event", *e)
|
||||
snap := ExecutionCompleteToSnapshot(e)
|
||||
resultChannel <- snap
|
||||
}
|
||||
}
|
||||
|
||||
// ExecutionCompleteToSnapshot transforms the ExecutionComplete event into a SteampipeSnapshot
|
||||
func ExecutionCompleteToSnapshot(event *dashboardevents.ExecutionComplete) *dashboardtypes.SteampipeSnapshot {
|
||||
return &dashboardtypes.SteampipeSnapshot{
|
||||
SchemaVersion: fmt.Sprintf("%d", dashboardtypes.SteampipeSnapshotSchemaVersion),
|
||||
Panels: event.Panels,
|
||||
Layout: event.Root.AsTreeNode(),
|
||||
Inputs: event.Inputs,
|
||||
Variables: event.Variables,
|
||||
SearchPath: event.SearchPath,
|
||||
StartTime: event.StartTime,
|
||||
EndTime: event.EndTime,
|
||||
Title: event.Root.GetTitle(),
|
||||
}
|
||||
}
|
||||
@@ -16,16 +16,14 @@ import (
|
||||
"github.com/turbot/steampipe/pkg/pluginmanager_service/grpc/proto"
|
||||
pluginshared "github.com/turbot/steampipe/pkg/pluginmanager_service/grpc/shared"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/options"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
type ConnectionPluginData struct {
|
||||
Name string
|
||||
Config string
|
||||
Type string
|
||||
Options *options.Connection
|
||||
Schema *sdkproto.Schema
|
||||
Name string
|
||||
Config string
|
||||
Type string
|
||||
Schema *sdkproto.Schema
|
||||
}
|
||||
|
||||
// ConnectionPlugin is a structure representing an instance of a plugin
|
||||
@@ -41,12 +39,11 @@ type ConnectionPlugin struct {
|
||||
PluginShortName string
|
||||
}
|
||||
|
||||
func (p ConnectionPlugin) addConnection(name string, config string, connectionOptions *options.Connection, connectionType string) {
|
||||
func (p ConnectionPlugin) addConnection(name string, config string, connectionType string) {
|
||||
p.ConnectionMap[name] = &ConnectionPluginData{
|
||||
Name: name,
|
||||
Config: config,
|
||||
Type: connectionType,
|
||||
Options: connectionOptions,
|
||||
Name: name,
|
||||
Config: config,
|
||||
Type: connectionType,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -328,7 +325,7 @@ func createConnectionPlugin(connection *modconfig.Connection, reattach *proto.Re
|
||||
log.Printf("[WARN] no connection config loaded for '%s', skipping", c)
|
||||
continue
|
||||
}
|
||||
connectionPlugin.addConnection(c, config.Config, config.Options, config.Type)
|
||||
connectionPlugin.addConnection(c, config.Config, config.Type)
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] created connection plugin for connection: '%s', pluginInstance: '%s'", connectionName, pluginInstance)
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
package steampipeconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/gertd/go-pluralize"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/terraform-components/tfdiags"
|
||||
)
|
||||
|
||||
type MissingVariableError struct {
|
||||
MissingVariables []*modconfig.Variable
|
||||
MissingTransitiveVariables map[DependencyPathKey][]*modconfig.Variable
|
||||
workspaceMod *modconfig.Mod
|
||||
}
|
||||
|
||||
func NewMissingVarsError(workspaceMod *modconfig.Mod) MissingVariableError {
|
||||
return MissingVariableError{
|
||||
MissingTransitiveVariables: make(map[DependencyPathKey][]*modconfig.Variable),
|
||||
workspaceMod: workspaceMod,
|
||||
}
|
||||
}
|
||||
|
||||
func (m MissingVariableError) Error() string {
|
||||
//allMissing := append(m.MissingVariables, m.MissingTransitiveVariables...)
|
||||
missingCount := len(m.MissingVariables)
|
||||
for _, missing := range m.MissingTransitiveVariables {
|
||||
missingCount += len(missing)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("missing %d variable %s:\n%s%s",
|
||||
missingCount,
|
||||
utils.Pluralize("value", missingCount),
|
||||
m.getVariableMissingString(),
|
||||
m.getTransitiveVariableMissingString(),
|
||||
)
|
||||
}
|
||||
|
||||
func (m MissingVariableError) getVariableMissingString() string {
|
||||
var sb strings.Builder
|
||||
|
||||
varNames := make([]string, len(m.MissingVariables))
|
||||
for i, v := range m.MissingVariables {
|
||||
varNames[i] = m.getVariableName(v)
|
||||
}
|
||||
|
||||
// sort names for top level first
|
||||
sort.Slice(varNames, func(i, j int) bool {
|
||||
if len(strings.Split(varNames[i], ".")) < len(strings.Split(varNames[j], ".")) {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
for _, v := range varNames {
|
||||
sb.WriteString(fmt.Sprintf("\t%s not set\n", v))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (m MissingVariableError) getTransitiveVariableMissingString() string {
|
||||
var sb strings.Builder
|
||||
for modPath, missingVars := range m.MissingTransitiveVariables {
|
||||
parentPath := modPath.GetParent()
|
||||
varCount := len(missingVars)
|
||||
|
||||
varNames := make([]string, len(missingVars))
|
||||
for i, v := range missingVars {
|
||||
varNames[i] = m.getVariableName(v)
|
||||
}
|
||||
|
||||
pluralizer := pluralize.NewClient()
|
||||
pluralizer.AddIrregularRule("has", "have")
|
||||
pluralizer.AddIrregularRule("an arg", "args")
|
||||
varsString := strings.Join(varNames, ",")
|
||||
|
||||
sb.WriteString(
|
||||
fmt.Sprintf("\tdependency mod %s cannot be loaded because %s %s %s no value. Mod %s must pass %s for %s in the `require` block of its mod.sp\n",
|
||||
modPath,
|
||||
pluralizer.Pluralize("variable", varCount, false),
|
||||
varsString,
|
||||
pluralizer.Pluralize("has", varCount, false),
|
||||
parentPath,
|
||||
pluralizer.Pluralize("a value", varCount, false),
|
||||
varsString,
|
||||
))
|
||||
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (m MissingVariableError) getVariableName(v *modconfig.Variable) string {
|
||||
if v.Mod.Name() == m.workspaceMod.Name() {
|
||||
return v.ShortName
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", v.Mod.ShortName, v.ShortName)
|
||||
}
|
||||
|
||||
type VariableValidationFailedError struct {
|
||||
diags tfdiags.Diagnostics
|
||||
}
|
||||
|
||||
func newVariableValidationFailedError(diags tfdiags.Diagnostics) VariableValidationFailedError {
|
||||
return VariableValidationFailedError{diags: diags}
|
||||
}
|
||||
func (m VariableValidationFailedError) Error() string {
|
||||
var sb strings.Builder
|
||||
|
||||
for i, diag := range m.diags {
|
||||
if diag.Severity() == tfdiags.Error {
|
||||
sb.WriteString(fmt.Sprintf("%s: %s",
|
||||
diag.Description().Summary,
|
||||
diag.Description().Detail))
|
||||
if i < len(m.diags)-1 {
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package inputvars
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/versionmap"
|
||||
"github.com/turbot/terraform-components/tfdiags"
|
||||
)
|
||||
|
||||
func CollectVariableValuesFromModRequire(m *modconfig.Mod, lock *versionmap.WorkspaceLock) (InputValues, error) {
|
||||
res := make(InputValues)
|
||||
if m.Require != nil {
|
||||
for _, depModConstraint := range m.Require.Mods {
|
||||
if args := depModConstraint.Args; args != nil {
|
||||
// find the loaded dep mod which satisfies this constraint
|
||||
resolvedConstraint := lock.GetMod(depModConstraint.Name, m)
|
||||
if resolvedConstraint == nil {
|
||||
return nil, fmt.Errorf("dependency mod %s is not loaded", depModConstraint.Name)
|
||||
}
|
||||
for varName, varVal := range args {
|
||||
varFullName := fmt.Sprintf("%s.var.%s", resolvedConstraint.Alias, varName)
|
||||
|
||||
sourceRange := tfdiags.SourceRange{
|
||||
Filename: m.Require.DeclRange.Filename,
|
||||
Start: tfdiags.SourcePos{
|
||||
Line: m.Require.DeclRange.Start.Line,
|
||||
Column: m.Require.DeclRange.Start.Column,
|
||||
Byte: m.Require.DeclRange.Start.Byte,
|
||||
},
|
||||
End: tfdiags.SourcePos{
|
||||
Line: m.Require.DeclRange.End.Line,
|
||||
Column: m.Require.DeclRange.End.Column,
|
||||
Byte: m.Require.DeclRange.End.Byte,
|
||||
},
|
||||
}
|
||||
|
||||
res[varFullName] = &InputValue{
|
||||
Value: varVal,
|
||||
SourceType: ValueFromModFile,
|
||||
SourceRange: sourceRange,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/turbot/pipe-fittings/options"
|
||||
"github.com/turbot/pipe-fittings/workspace_profile"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -19,17 +21,19 @@ import (
|
||||
pfilepaths "github.com/turbot/pipe-fittings/filepaths"
|
||||
"github.com/turbot/pipe-fittings/hclhelpers"
|
||||
"github.com/turbot/pipe-fittings/ociinstaller/versionfile"
|
||||
pparse "github.com/turbot/pipe-fittings/parse"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe-plugin-sdk/v5/sperr"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/db/db_common"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/filepaths"
|
||||
"github.com/turbot/steampipe/pkg/parse"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/options"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/parse"
|
||||
)
|
||||
|
||||
var GlobalWorkspaceProfile *workspace_profile.SteampipeWorkspaceProfile
|
||||
|
||||
var GlobalConfig *SteampipeConfig
|
||||
var defaultConfigFileName = "default.spc"
|
||||
var defaultConfigSampleFileName = "default.spc.sample"
|
||||
@@ -169,7 +173,7 @@ func loadSteampipeConfig(ctx context.Context, modLocation string, commandName st
|
||||
// only include workspace.spc from workspace directory
|
||||
include = filehelpers.InclusionsFromFiles([]string{filepaths.WorkspaceConfigFileName})
|
||||
// update load options to ONLY allow terminal options
|
||||
loadOptions = &loadConfigOptions{include: include, allowedOptions: []string{options.TerminalBlock}}
|
||||
loadOptions = &loadConfigOptions{include: include}
|
||||
ew := loadConfig(ctx, modLocation, steampipeConfig, loadOptions)
|
||||
if ew.GetError() != nil {
|
||||
return nil, ew.WrapErrorWithMessage("failed to load workspace config")
|
||||
@@ -240,19 +244,19 @@ func loadConfig(ctx context.Context, configFolder string, steampipeConfig *Steam
|
||||
return perror_helpers.ErrorAndWarnings{}
|
||||
}
|
||||
|
||||
fileData, diags := parse.LoadFileData(configPaths...)
|
||||
fileData, diags := pparse.LoadFileData(configPaths...)
|
||||
if diags.HasErrors() {
|
||||
log.Printf("[WARN] loadConfig: failed to load all config files: %v\n", err)
|
||||
return perror_helpers.DiagsToErrorsAndWarnings("Failed to load all config files", diags)
|
||||
}
|
||||
|
||||
body, diags := parse.ParseHclFiles(fileData)
|
||||
body, diags := pparse.ParseHclFiles(fileData)
|
||||
if diags.HasErrors() {
|
||||
return perror_helpers.DiagsToErrorsAndWarnings("Failed to load all config files", diags)
|
||||
}
|
||||
|
||||
// do a partial decode
|
||||
content, moreDiags := body.Content(parse.ConfigBlockSchema)
|
||||
content, moreDiags := body.Content(pparse.ConfigBlockSchema)
|
||||
if moreDiags.HasErrors() {
|
||||
diags = append(diags, moreDiags...)
|
||||
return perror_helpers.DiagsToErrorsAndWarnings("Failed to load config", diags)
|
||||
@@ -267,7 +271,7 @@ func loadConfig(ctx context.Context, configFolder string, steampipeConfig *Steam
|
||||
switch block.Type {
|
||||
|
||||
case modconfig.BlockTypePlugin:
|
||||
plugin, moreDiags := parse.DecodePlugin(block)
|
||||
plugin, moreDiags := pparse.DecodePlugin(block)
|
||||
diags = append(diags, moreDiags...)
|
||||
if moreDiags.HasErrors() {
|
||||
continue
|
||||
@@ -298,7 +302,7 @@ func loadConfig(ctx context.Context, configFolder string, steampipeConfig *Steam
|
||||
if err := optionsBlockPermitted(block, optionBlockMap, opts); err != nil {
|
||||
return perror_helpers.NewErrorsAndWarning(err)
|
||||
}
|
||||
opts, moreDiags := parse.DecodeOptions(block)
|
||||
opts, moreDiags := pparse.DecodeOptions(block, SteampipeOptionsBlockMapping)
|
||||
if moreDiags.HasErrors() {
|
||||
diags = append(diags, moreDiags...)
|
||||
continue
|
||||
@@ -361,3 +365,30 @@ func optionsBlockPermitted(block *hcl.Block, blockMap map[string]bool, opts *loa
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SteampipeOptionsBlockMapping is an OptionsBlockFactory used to map global steampipe options
|
||||
// TODO KAI look at deprecations
|
||||
func SteampipeOptionsBlockMapping(block *hcl.Block) (options.Options, hcl.Diagnostics) {
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
switch block.Labels[0] {
|
||||
|
||||
case options.DatabaseBlock:
|
||||
return new(options.Database), nil
|
||||
case options.GeneralBlock:
|
||||
return new(options.General), nil
|
||||
case options.QueryBlock:
|
||||
return new(options.Query), nil
|
||||
case options.CheckBlock:
|
||||
return new(options.Check), nil
|
||||
case options.PluginBlock:
|
||||
return new(options.Plugin), nil
|
||||
default:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Unexpected options type '%s'", block.Type),
|
||||
Subject: hclhelpers.BlockRangePointer(block),
|
||||
})
|
||||
return nil, diags
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/turbot/pipe-fittings/app_specific"
|
||||
"github.com/turbot/pipe-fittings/options"
|
||||
"github.com/turbot/pipe-fittings/plugin"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/options"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,327 +0,0 @@
|
||||
package steampipeconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
error_helpers2 "github.com/turbot/pipe-fittings/error_helpers"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
filehelpers "github.com/turbot/go-kit/files"
|
||||
"github.com/turbot/go-kit/helpers"
|
||||
"github.com/turbot/steampipe-plugin-sdk/v5/plugin"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/error_helpers"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/parse"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/versionmap"
|
||||
)
|
||||
|
||||
// LoadMod parses all hcl files in modPath and returns a single mod
|
||||
// if CreatePseudoResources flag is set, construct hcl resources for files with specific extensions
|
||||
// NOTE: it is an error if there is more than 1 mod defined, however zero mods is acceptable
|
||||
// - a default mod will be created assuming there are any resource files
|
||||
func LoadMod(ctx context.Context, modPath string, parseCtx *parse.ModParseContext) (mod *modconfig.Mod, errorsAndWarnings error_helpers2.ErrorAndWarnings) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
errorsAndWarnings = error_helpers2.NewErrorsAndWarning(helpers.ToError(r))
|
||||
}
|
||||
}()
|
||||
|
||||
mod, loadModResult := loadModDefinition(ctx, modPath, parseCtx)
|
||||
if loadModResult.Error != nil {
|
||||
return nil, loadModResult
|
||||
}
|
||||
|
||||
// if this is a dependency mod, initialise the dependency config
|
||||
if parseCtx.DependencyConfig != nil {
|
||||
parseCtx.DependencyConfig.SetModProperties(mod)
|
||||
}
|
||||
|
||||
// set the current mod on the run context
|
||||
if err := parseCtx.SetCurrentMod(mod); err != nil {
|
||||
return nil, error_helpers2.NewErrorsAndWarning(err)
|
||||
}
|
||||
|
||||
// load the mod dependencies
|
||||
if err := loadModDependencies(ctx, mod, parseCtx); err != nil {
|
||||
return nil, error_helpers2.NewErrorsAndWarning(err)
|
||||
}
|
||||
|
||||
// populate the resource maps of the current mod using the dependency mods
|
||||
mod.ResourceMaps = parseCtx.GetResourceMaps()
|
||||
// now load the mod resource hcl (
|
||||
mod, errorsAndWarnings = loadModResources(ctx, mod, parseCtx)
|
||||
|
||||
// add in any warnings from mod load
|
||||
errorsAndWarnings.AddWarning(loadModResult.Warnings...)
|
||||
return mod, errorsAndWarnings
|
||||
}
|
||||
|
||||
func loadModDefinition(ctx context.Context, modPath string, parseCtx *parse.ModParseContext) (mod *modconfig.Mod, errorsAndWarnings error_helpers2.ErrorAndWarnings) {
|
||||
errorsAndWarnings = error_helpers2.ErrorAndWarnings{}
|
||||
// verify the mod folder exists
|
||||
_, err := os.Stat(modPath)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, error_helpers2.NewErrorsAndWarning(fmt.Errorf("mod folder %s does not exist", modPath))
|
||||
}
|
||||
|
||||
modFilePath, exists := parse.ModfileExists(modPath)
|
||||
if exists {
|
||||
// load the mod definition to get the dependencies
|
||||
var res *parse.DecodeResult
|
||||
mod, res = parse.ParseModDefinition(modFilePath, parseCtx.EvalCtx)
|
||||
errorsAndWarnings = error_helpers2.DiagsToErrorsAndWarnings("mod load failed", res.Diags)
|
||||
if res.Diags.HasErrors() {
|
||||
return nil, errorsAndWarnings
|
||||
}
|
||||
} else {
|
||||
// so there is no mod file - should we create a default?
|
||||
if !parseCtx.ShouldCreateDefaultMod() {
|
||||
errorsAndWarnings.Error = fmt.Errorf("mod folder %s does not contain a mod resource definition", modPath)
|
||||
// ShouldCreateDefaultMod flag NOT set - fail
|
||||
return nil, errorsAndWarnings
|
||||
}
|
||||
// just create a default mod
|
||||
mod = modconfig.CreateDefaultMod(modPath)
|
||||
|
||||
}
|
||||
return mod, errorsAndWarnings
|
||||
}
|
||||
|
||||
func loadModDependencies(ctx context.Context, parent *modconfig.Mod, parseCtx *parse.ModParseContext) error {
|
||||
var errors []error
|
||||
if parent.Require != nil {
|
||||
// now ensure there is a lock file - if we have any mod dependnecies there MUST be a lock file -
|
||||
// otherwise 'steampipe install' must be run
|
||||
if err := parseCtx.EnsureWorkspaceLock(parent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, requiredModVersion := range parent.Require.Mods {
|
||||
// get the locked version ofd this dependency
|
||||
lockedVersion, err := parseCtx.WorkspaceLock.GetLockedModVersion(requiredModVersion, parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lockedVersion == nil {
|
||||
return fmt.Errorf("not all dependencies are installed - run 'steampipe mod install'")
|
||||
}
|
||||
if err := loadModDependency(ctx, lockedVersion, parseCtx); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return error_helpers.CombineErrors(errors...)
|
||||
}
|
||||
|
||||
func loadModDependency(ctx context.Context, modDependency *versionmap.ResolvedVersionConstraint, parseCtx *parse.ModParseContext) error {
|
||||
// dependency mods are installed to <mod path>/<mod nam>@version
|
||||
// for example workspace_folder/.steampipe/mods/github.com/turbot/steampipe-mod-aws-compliance@v1.0
|
||||
|
||||
// we need to list all mod folder in the parent folder: workspace_folder/.steampipe/mods/github.com/turbot/
|
||||
// for each folder we parse the mod name and version and determine whether it meets the version constraint
|
||||
|
||||
// search the parent folder for a mod installation which satisfied the given mod dependency
|
||||
dependencyDir, err := parseCtx.WorkspaceLock.FindInstalledDependency(modDependency)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// we need to modify the ListOptions to ensure we include hidden files - these are excluded by default
|
||||
prevExclusions := parseCtx.ListOptions.Exclude
|
||||
parseCtx.ListOptions.Exclude = nil
|
||||
defer func() { parseCtx.ListOptions.Exclude = prevExclusions }()
|
||||
|
||||
childParseCtx := parse.NewChildModParseContext(parseCtx, modDependency, dependencyDir)
|
||||
// NOTE: pass in the version and dependency path of the mod - these must be set before it loads its dependencies
|
||||
dependencyMod, errAndWarnings := LoadMod(ctx, dependencyDir, childParseCtx)
|
||||
if errAndWarnings.GetError() != nil {
|
||||
return errAndWarnings.GetError()
|
||||
}
|
||||
|
||||
// update loaded dependency mods
|
||||
parseCtx.AddLoadedDependencyMod(dependencyMod)
|
||||
// TODO IS THIS NEEDED????
|
||||
if parseCtx.ParentParseCtx != nil {
|
||||
// add mod resources to parent parse context
|
||||
parseCtx.ParentParseCtx.AddModResources(dependencyMod)
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func loadModResources(ctx context.Context, mod *modconfig.Mod, parseCtx *parse.ModParseContext) (*modconfig.Mod, error_helpers2.ErrorAndWarnings) {
|
||||
// if flag is set, create pseudo resources by mapping files
|
||||
var pseudoResources []modconfig.MappableResource
|
||||
var err error
|
||||
if parseCtx.CreatePseudoResources() {
|
||||
// now execute any pseudo-resource creations based on file mappings
|
||||
pseudoResources, err = createPseudoResources(ctx, mod, parseCtx)
|
||||
if err != nil {
|
||||
return nil, error_helpers2.NewErrorsAndWarning(err)
|
||||
}
|
||||
}
|
||||
|
||||
// get the source files
|
||||
sourcePaths, err := getSourcePaths(ctx, mod.ModPath, parseCtx.ListOptions)
|
||||
if err != nil {
|
||||
log.Printf("[WARN] LoadMod: failed to get mod file paths: %v\n", err)
|
||||
return nil, error_helpers2.NewErrorsAndWarning(err)
|
||||
}
|
||||
|
||||
// load the raw file data
|
||||
fileData, diags := parse.LoadFileData(sourcePaths...)
|
||||
if diags.HasErrors() {
|
||||
return nil, error_helpers2.NewErrorsAndWarning(plugin.DiagsToError("Failed to load all mod files", diags))
|
||||
}
|
||||
|
||||
// parse all hcl files (NOTE - this reads the CurrentMod out of ParseContext and adds to it)
|
||||
mod, errAndWarnings := parse.ParseMod(ctx, fileData, pseudoResources, parseCtx)
|
||||
|
||||
return mod, errAndWarnings
|
||||
}
|
||||
|
||||
// LoadModResourceNames parses all hcl files in modPath and returns the names of all resources
|
||||
func LoadModResourceNames(ctx context.Context, mod *modconfig.Mod, parseCtx *parse.ModParseContext) (resources *modconfig.WorkspaceResources, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = helpers.ToError(r)
|
||||
}
|
||||
}()
|
||||
|
||||
resources = modconfig.NewWorkspaceResources()
|
||||
if parseCtx == nil {
|
||||
parseCtx = &parse.ModParseContext{}
|
||||
}
|
||||
// verify the mod folder exists
|
||||
if _, err := os.Stat(mod.ModPath); os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("mod folder %s does not exist", mod.ModPath)
|
||||
}
|
||||
|
||||
// now execute any pseudo-resource creations based on file mappings
|
||||
pseudoResources, err := createPseudoResources(ctx, mod, parseCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// add pseudo resources to result
|
||||
for _, r := range pseudoResources {
|
||||
if strings.HasPrefix(r.Name(), "query.") || strings.HasPrefix(r.Name(), "local.query.") {
|
||||
resources.Query[r.Name()] = true
|
||||
}
|
||||
}
|
||||
|
||||
sourcePaths, err := getSourcePaths(ctx, mod.ModPath, parseCtx.ListOptions)
|
||||
if err != nil {
|
||||
log.Printf("[WARN] LoadModResourceNames: failed to get mod file paths: %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileData, diags := parse.LoadFileData(sourcePaths...)
|
||||
if diags.HasErrors() {
|
||||
return nil, plugin.DiagsToError("Failed to load all mod files", diags)
|
||||
}
|
||||
|
||||
parsedResourceNames, err := parse.ParseModResourceNames(fileData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resources.Merge(parsedResourceNames), nil
|
||||
}
|
||||
|
||||
// GetModFileExtensions returns list of all file extensions we care about
|
||||
// this will be the mod data extension, plus any registered extensions registered in fileToResourceMap
|
||||
func GetModFileExtensions() []string {
|
||||
res := append(modconfig.RegisteredFileExtensions(), constants.ModDataExtensions...)
|
||||
return append(res, constants.VariablesExtensions...)
|
||||
}
|
||||
|
||||
// build list of all filepaths we need to parse/load the mod
|
||||
// this will include hcl files (with .sp extension)
|
||||
// as well as any other files with extensions that have been registered for pseudo resource creation
|
||||
// (see steampipeconfig/modconfig/resource_type_map.go)
|
||||
func getSourcePaths(ctx context.Context, modPath string, listOpts *filehelpers.ListOptions) ([]string, error) {
|
||||
sourcePaths, err := filehelpers.ListFilesWithContext(ctx, modPath, listOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sourcePaths, nil
|
||||
}
|
||||
|
||||
// create pseudo-resources for any files whose extensions are registered
|
||||
func createPseudoResources(ctx context.Context, mod *modconfig.Mod, parseCtx *parse.ModParseContext) ([]modconfig.MappableResource, error) {
|
||||
// create list options to find pseudo resources
|
||||
listOpts := &filehelpers.ListOptions{
|
||||
Flags: parseCtx.ListOptions.Flags,
|
||||
Include: filehelpers.InclusionsFromExtensions(modconfig.RegisteredFileExtensions()),
|
||||
Exclude: parseCtx.ListOptions.Exclude,
|
||||
}
|
||||
// list all registered files
|
||||
sourcePaths, err := getSourcePaths(ctx, mod.ModPath, listOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var errors []error
|
||||
var res []modconfig.MappableResource
|
||||
|
||||
// for every source path:
|
||||
// - if it is NOT a registered type, skip
|
||||
// [- if an existing resource has already referred directly to this file, skip] *not yet*
|
||||
for _, path := range sourcePaths {
|
||||
factory, ok := modconfig.ResourceTypeMap[filepath.Ext(path)]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
resource, fileData, err := factory(mod.ModPath, path, parseCtx.CurrentMod)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
if resource != nil {
|
||||
metadata, err := getPseudoResourceMetadata(mod, resource.Name(), path, fileData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resource.SetMetadata(metadata)
|
||||
res = append(res, resource)
|
||||
}
|
||||
}
|
||||
|
||||
// show errors as trace logging
|
||||
if len(errors) > 0 {
|
||||
for _, err := range errors {
|
||||
log.Printf("[TRACE] failed to convert local file into resource: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func getPseudoResourceMetadata(mod *modconfig.Mod, resourceName string, path string, fileData []byte) (*modconfig.ResourceMetadata, error) {
|
||||
sourceDefinition := string(fileData)
|
||||
split := strings.Split(sourceDefinition, "\n")
|
||||
lineCount := len(split)
|
||||
|
||||
// convert the name into a short name
|
||||
parsedName, err := modconfig.ParseResourceName(resourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := &modconfig.ResourceMetadata{
|
||||
ResourceName: parsedName.Name,
|
||||
FileName: path,
|
||||
StartLineNumber: 1,
|
||||
EndLineNumber: lineCount,
|
||||
IsAutoGenerated: true,
|
||||
SourceDefinition: sourceDefinition,
|
||||
}
|
||||
m.SetMod(mod)
|
||||
|
||||
return m, nil
|
||||
}
|
||||
@@ -1,218 +0,0 @@
|
||||
package steampipeconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/turbot/pipe-fittings/error_helpers"
|
||||
"golang.org/x/exp/maps"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe-plugin-sdk/v5/plugin"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/inputvars"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/modconfig"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/parse"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/versionmap"
|
||||
"github.com/turbot/terraform-components/tfdiags"
|
||||
)
|
||||
|
||||
func LoadVariableDefinitions(ctx context.Context, variablePath string, parseCtx *parse.ModParseContext) (*modconfig.ModVariableMap, error) {
|
||||
// only load mod and variables blocks
|
||||
parseCtx.BlockTypes = []string{modconfig.BlockTypeVariable}
|
||||
mod, errAndWarnings := LoadMod(ctx, variablePath, parseCtx)
|
||||
if errAndWarnings.GetError() != nil {
|
||||
return nil, errAndWarnings.GetError()
|
||||
}
|
||||
|
||||
variableMap := modconfig.NewModVariableMap(mod)
|
||||
|
||||
return variableMap, nil
|
||||
}
|
||||
|
||||
func GetVariableValues(parseCtx *parse.ModParseContext, variableMap *modconfig.ModVariableMap, validate bool) (*modconfig.ModVariableMap, error_helpers.ErrorAndWarnings) {
|
||||
log.Printf("[INFO] GetVariableValues")
|
||||
// now resolve all input variables
|
||||
inputValues, errorsAndWarnings := getInputVariables(parseCtx, variableMap, validate)
|
||||
if errorsAndWarnings.Error == nil {
|
||||
// now update the variables map with the input values
|
||||
inputValues.SetVariableValues(variableMap)
|
||||
}
|
||||
|
||||
return variableMap, errorsAndWarnings
|
||||
}
|
||||
|
||||
func getInputVariables(parseCtx *parse.ModParseContext, variableMap *modconfig.ModVariableMap, validate bool) (inputvars.InputValues, error_helpers.ErrorAndWarnings) {
|
||||
variableFileArgs := viper.GetStringSlice(constants.ArgVarFile)
|
||||
variableArgs := viper.GetStringSlice(constants.ArgVariable)
|
||||
|
||||
// get mod and mod path from run context
|
||||
mod := parseCtx.CurrentMod
|
||||
path := mod.ModPath
|
||||
|
||||
log.Printf("[INFO] getInputVariables, variableFileArgs: %s, variableArgs: %s", variableFileArgs, variableArgs)
|
||||
|
||||
var inputValuesUnparsed, err = inputvars.CollectVariableValues(path, variableFileArgs, variableArgs, parseCtx.CurrentMod)
|
||||
if err != nil {
|
||||
log.Printf("[WARN] CollectVariableValues failed: %s", err.Error())
|
||||
|
||||
return nil, error_helpers.NewErrorsAndWarning(err)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] collected unparsed input values for vars: %s", strings.Join(maps.Keys(inputValuesUnparsed), ","))
|
||||
|
||||
if validate {
|
||||
if err := identifyAllMissingVariables(parseCtx, variableMap, inputValuesUnparsed); err != nil {
|
||||
log.Printf("[INFO] identifyAllMissingVariables returned a validation error: %s", err.Error())
|
||||
|
||||
return nil, error_helpers.NewErrorsAndWarning(err)
|
||||
}
|
||||
}
|
||||
|
||||
// only parse values for public variables
|
||||
parsedValues, diags := inputvars.ParseVariableValues(inputValuesUnparsed, variableMap, validate)
|
||||
if diags.HasErrors() {
|
||||
log.Printf("[INFO] ParseVariableValues returned error: %s", diags.Err())
|
||||
} else {
|
||||
log.Printf("[INFO] parsed values for public variables: %s", strings.Join(maps.Keys(parsedValues), ","))
|
||||
}
|
||||
|
||||
if validate {
|
||||
moreDiags := inputvars.CheckInputVariables(variableMap.PublicVariables, parsedValues)
|
||||
diags = append(diags, moreDiags...)
|
||||
}
|
||||
|
||||
return parsedValues, newVariableValidationResult(diags)
|
||||
}
|
||||
|
||||
func newVariableValidationResult(diags tfdiags.Diagnostics) error_helpers.ErrorAndWarnings {
|
||||
warnings := plugin.DiagsToWarnings(diags.ToHCL())
|
||||
var err error
|
||||
if diags.HasErrors() {
|
||||
err = newVariableValidationFailedError(diags)
|
||||
}
|
||||
return error_helpers.NewErrorsAndWarning(err, warnings...)
|
||||
}
|
||||
|
||||
func identifyAllMissingVariables(parseCtx *parse.ModParseContext, variableMap *modconfig.ModVariableMap, variableValues map[string]inputvars.UnparsedVariableValue) error {
|
||||
// convert variableValues into a lookup
|
||||
var variableValueLookup = utils.SliceToLookup(maps.Keys(variableValues))
|
||||
missingVarsMap, err := identifyMissingVariablesForDependencies(parseCtx.WorkspaceLock, variableMap, variableValueLookup, nil)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(missingVarsMap) == 0 {
|
||||
// all good
|
||||
return nil
|
||||
}
|
||||
|
||||
// build a MissingVariableError
|
||||
missingVarErr := NewMissingVarsError(parseCtx.CurrentMod)
|
||||
|
||||
// build a lookup with the dependency path of the root mod and all top level dependencies
|
||||
rootName := variableMap.Mod.ShortName
|
||||
topLevelModLookup := map[DependencyPathKey]struct{}{DependencyPathKey(rootName): {}}
|
||||
for dep := range parseCtx.WorkspaceLock.InstallCache {
|
||||
depPathKey := newDependencyPathKey(rootName, dep)
|
||||
topLevelModLookup[depPathKey] = struct{}{}
|
||||
}
|
||||
for depPath, missingVars := range missingVarsMap {
|
||||
if _, isTopLevel := topLevelModLookup[depPath]; isTopLevel {
|
||||
missingVarErr.MissingVariables = append(missingVarErr.MissingVariables, missingVars...)
|
||||
} else {
|
||||
missingVarErr.MissingTransitiveVariables[depPath] = missingVars
|
||||
}
|
||||
}
|
||||
|
||||
return missingVarErr
|
||||
}
|
||||
|
||||
func identifyMissingVariablesForDependencies(workspaceLock *versionmap.WorkspaceLock, variableMap *modconfig.ModVariableMap, parentVariableValuesLookup map[string]struct{}, dependencyPath []string) (map[DependencyPathKey][]*modconfig.Variable, error) {
|
||||
// return a map of missing variables, keyed by dependency path
|
||||
res := make(map[DependencyPathKey][]*modconfig.Variable)
|
||||
|
||||
// update the path to this dependency
|
||||
dependencyPath = append(dependencyPath, variableMap.Mod.GetInstallCacheKey())
|
||||
|
||||
// clone variableValuesLookup so we can mutate it with depdency specific args overrides
|
||||
var variableValueLookup = make(map[string]struct{}, len(parentVariableValuesLookup))
|
||||
for k := range parentVariableValuesLookup {
|
||||
// convert the variable name to the short name if it is fully qualified and belongs to the current mod
|
||||
k = getVariableValueMapKey(k, variableMap)
|
||||
|
||||
variableValueLookup[k] = struct{}{}
|
||||
}
|
||||
|
||||
// first get any args specified in the mod requires
|
||||
// note the actual value of these may be unknown as we have not yet resolved
|
||||
depModArgs, err := inputvars.CollectVariableValuesFromModRequire(variableMap.Mod, workspaceLock)
|
||||
for varName := range depModArgs {
|
||||
// convert the variable name to the short name if it is fully qualified and belongs to the current mod
|
||||
varName = getVariableValueMapKey(varName, variableMap)
|
||||
|
||||
variableValueLookup[varName] = struct{}{}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// handle root variables
|
||||
missingVariables := identifyMissingVariables(variableMap.RootVariables, variableValueLookup, variableMap.Mod.ShortName)
|
||||
if len(missingVariables) > 0 {
|
||||
res[newDependencyPathKey(dependencyPath...)] = missingVariables
|
||||
}
|
||||
|
||||
// now iterate through all the dependency variable maps
|
||||
for _, dependencyVariableMap := range variableMap.DependencyVariables {
|
||||
childMissingMap, err := identifyMissingVariablesForDependencies(workspaceLock, dependencyVariableMap, variableValueLookup, dependencyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// add results into map
|
||||
for k, v := range childMissingMap {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// getVariableValueMapKey checks whether the variable is fully qualified and belongs to the current mod,
|
||||
// if so use the short name
|
||||
func getVariableValueMapKey(k string, variableMap *modconfig.ModVariableMap) string {
|
||||
// attempt to parse the variable name.
|
||||
// Note: if the variable is not fully qualified (e.g. "var_name"), ParseResourceName will return an error
|
||||
// in which case we add it to our map unchanged
|
||||
parsedName, err := modconfig.ParseResourceName(k)
|
||||
// if this IS a dependency variable, the parse will success
|
||||
// if the mod name is the same as the current mod (variableMap.Mod)
|
||||
// then add a map entry with the variable short name
|
||||
// this will allow us to match the variable value to a variable defined in this mod
|
||||
if err == nil && parsedName.Mod == variableMap.Mod.ShortName {
|
||||
k = parsedName.Name
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
func identifyMissingVariables(variableMap map[string]*modconfig.Variable, variableValuesLookup map[string]struct{}, modName string) []*modconfig.Variable {
|
||||
|
||||
var needed []*modconfig.Variable
|
||||
|
||||
for shortName, v := range variableMap {
|
||||
if !v.Required() {
|
||||
continue // We only prompt for required variables
|
||||
}
|
||||
_, unparsedValExists := variableValuesLookup[shortName]
|
||||
|
||||
if !unparsedValExists {
|
||||
needed = append(needed, v)
|
||||
}
|
||||
}
|
||||
sort.SliceStable(needed, func(i, j int) bool {
|
||||
return needed[i].Name() < needed[j].Name()
|
||||
})
|
||||
return needed
|
||||
|
||||
}
|
||||
@@ -1,244 +0,0 @@
|
||||
package modconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/turbot/go-kit/types"
|
||||
typehelpers "github.com/turbot/go-kit/types"
|
||||
"github.com/turbot/pipe-fittings/hclhelpers"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// Benchmark is a struct representing the Benchmark resource
|
||||
type Benchmark struct {
|
||||
ResourceWithMetadataImpl
|
||||
ModTreeItemImpl
|
||||
|
||||
// required to allow partial decoding
|
||||
Remain hcl.Body `hcl:",remain" json:"-"`
|
||||
|
||||
// child names as NamedItem structs - used to allow setting children via the 'children' property
|
||||
ChildNames NamedItemList `cty:"child_names" json:"-"`
|
||||
// used for introspection tables
|
||||
ChildNameStrings []string `cty:"child_name_strings" column:"children,jsonb" json:"-"`
|
||||
|
||||
// dashboard specific properties
|
||||
Base *Benchmark `hcl:"base" json:"-"`
|
||||
Width *int `cty:"width" hcl:"width" column:"width,text" json:"-"`
|
||||
Type *string `cty:"type" hcl:"type" column:"type,text" json:"-"`
|
||||
Display *string `cty:"display" hcl:"display" json:"-"`
|
||||
}
|
||||
|
||||
func NewRootBenchmarkWithChildren(mod *Mod, children []ModTreeItem) HclResource {
|
||||
fullName := fmt.Sprintf("%s.%s.%s", mod.ShortName, "benchmark", "root")
|
||||
benchmark := &Benchmark{
|
||||
ModTreeItemImpl: ModTreeItemImpl{
|
||||
HclResourceImpl: HclResourceImpl{
|
||||
ShortName: "root",
|
||||
FullName: fullName,
|
||||
UnqualifiedName: fmt.Sprintf("%s.%s", "benchmark", "root"),
|
||||
blockType: "benchmark",
|
||||
},
|
||||
Mod: mod,
|
||||
},
|
||||
}
|
||||
benchmark.children = append(benchmark.children, children...)
|
||||
return benchmark
|
||||
}
|
||||
|
||||
func NewBenchmark(block *hcl.Block, mod *Mod, shortName string) HclResource {
|
||||
fullName := fmt.Sprintf("%s.%s.%s", mod.ShortName, block.Type, shortName)
|
||||
benchmark := &Benchmark{
|
||||
ModTreeItemImpl: ModTreeItemImpl{
|
||||
HclResourceImpl: HclResourceImpl{
|
||||
ShortName: shortName,
|
||||
FullName: fullName,
|
||||
UnqualifiedName: fmt.Sprintf("%s.%s", block.Type, shortName),
|
||||
DeclRange: hclhelpers.BlockRange(block),
|
||||
blockType: block.Type,
|
||||
},
|
||||
Mod: mod,
|
||||
},
|
||||
}
|
||||
benchmark.SetAnonymous(block)
|
||||
return benchmark
|
||||
}
|
||||
|
||||
func (b *Benchmark) Equals(other *Benchmark) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return !b.Diff(other).HasChanges()
|
||||
}
|
||||
|
||||
// OnDecoded implements HclResource
|
||||
func (b *Benchmark) OnDecoded(block *hcl.Block, _ ResourceMapsProvider) hcl.Diagnostics {
|
||||
b.setBaseProperties()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Benchmark) String() string {
|
||||
// build list of children's names
|
||||
var children []string
|
||||
for _, child := range b.children {
|
||||
children = append(children, child.Name())
|
||||
}
|
||||
// build list of parents names
|
||||
var parents []string
|
||||
for _, p := range b.parents {
|
||||
parents = append(parents, p.Name())
|
||||
}
|
||||
sort.Strings(children)
|
||||
return fmt.Sprintf(`
|
||||
-----
|
||||
Name: %s
|
||||
Title: %s
|
||||
Description: %s
|
||||
Parent: %s
|
||||
Children:
|
||||
%s
|
||||
`,
|
||||
b.FullName,
|
||||
types.SafeString(b.Title),
|
||||
types.SafeString(b.Description),
|
||||
strings.Join(parents, "\n "),
|
||||
strings.Join(children, "\n "))
|
||||
}
|
||||
|
||||
// GetChildControls return a flat list of controls underneath the benchmark in the tree
|
||||
func (b *Benchmark) GetChildControls() []*Control {
|
||||
var res []*Control
|
||||
for _, child := range b.children {
|
||||
if control, ok := child.(*Control); ok {
|
||||
res = append(res, control)
|
||||
} else if benchmark, ok := child.(*Benchmark); ok {
|
||||
res = append(res, benchmark.GetChildControls()...)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// GetWidth implements DashboardLeafNode
|
||||
func (b *Benchmark) GetWidth() int {
|
||||
if b.Width == nil {
|
||||
return 0
|
||||
}
|
||||
return *b.Width
|
||||
}
|
||||
|
||||
// GetDisplay implements DashboardLeafNode
|
||||
func (b *Benchmark) GetDisplay() string {
|
||||
return typehelpers.SafeString(b.Display)
|
||||
}
|
||||
|
||||
// GetType implements DashboardLeafNode
|
||||
func (b *Benchmark) GetType() string {
|
||||
return typehelpers.SafeString(b.Type)
|
||||
}
|
||||
|
||||
// GetUnqualifiedName implements DashboardLeafNode, ModTreeItem
|
||||
func (b *Benchmark) GetUnqualifiedName() string {
|
||||
return b.UnqualifiedName
|
||||
}
|
||||
|
||||
func (b *Benchmark) Diff(other *Benchmark) *DashboardTreeItemDiffs {
|
||||
res := &DashboardTreeItemDiffs{
|
||||
Item: b,
|
||||
Name: b.Name(),
|
||||
}
|
||||
|
||||
if !utils.SafeStringsEqual(b.Description, other.Description) {
|
||||
res.AddPropertyDiff("Description")
|
||||
}
|
||||
if !utils.SafeStringsEqual(b.Documentation, other.Documentation) {
|
||||
res.AddPropertyDiff("Documentation")
|
||||
}
|
||||
if !utils.SafeStringsEqual(b.Title, other.Title) {
|
||||
res.AddPropertyDiff("Title")
|
||||
}
|
||||
if len(b.Tags) != len(other.Tags) {
|
||||
res.AddPropertyDiff("Tags")
|
||||
} else {
|
||||
for k, v := range b.Tags {
|
||||
if otherVal := other.Tags[k]; v != otherVal {
|
||||
res.AddPropertyDiff("Tags")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !utils.SafeStringsEqual(b.Type, other.Type) {
|
||||
res.AddPropertyDiff("Type")
|
||||
}
|
||||
|
||||
if len(b.ChildNameStrings) != len(other.ChildNameStrings) {
|
||||
res.AddPropertyDiff("Childen")
|
||||
} else {
|
||||
myChildNames := b.ChildNameStrings
|
||||
sort.Strings(myChildNames)
|
||||
otherChildNames := other.ChildNameStrings
|
||||
sort.Strings(otherChildNames)
|
||||
if strings.Join(myChildNames, ",") != strings.Join(otherChildNames, ",") {
|
||||
res.AddPropertyDiff("Childen")
|
||||
}
|
||||
}
|
||||
|
||||
res.dashboardLeafNodeDiff(b, other)
|
||||
return res
|
||||
}
|
||||
|
||||
func (b *Benchmark) WalkResources(resourceFunc func(resource ModTreeItem) (bool, error)) error {
|
||||
for _, child := range b.children {
|
||||
continueWalking, err := resourceFunc(child)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !continueWalking {
|
||||
break
|
||||
}
|
||||
|
||||
if childContainer, ok := child.(*Benchmark); ok {
|
||||
if err := childContainer.WalkResources(resourceFunc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Benchmark) SetChildren(children []ModTreeItem) {
|
||||
b.children = children
|
||||
}
|
||||
|
||||
// CtyValue implements CtyValueProvider
|
||||
func (b *Benchmark) CtyValue() (cty.Value, error) {
|
||||
return GetCtyValue(b)
|
||||
}
|
||||
|
||||
func (b *Benchmark) setBaseProperties() {
|
||||
if b.Base == nil {
|
||||
return
|
||||
}
|
||||
// copy base into the HclResourceImpl 'base' property so it is accessible to all nested structs
|
||||
b.base = b.Base
|
||||
// call into parent nested struct setBaseProperties
|
||||
b.ModTreeItemImpl.setBaseProperties()
|
||||
|
||||
if b.Width == nil {
|
||||
b.Width = b.Base.Width
|
||||
}
|
||||
|
||||
if b.Display == nil {
|
||||
b.Display = b.Base.Display
|
||||
}
|
||||
|
||||
if len(b.children) == 0 {
|
||||
b.children = b.Base.children
|
||||
b.ChildNameStrings = b.Base.ChildNameStrings
|
||||
b.ChildNames = b.Base.ChildNames
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package modconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/turbot/go-kit/helpers"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/options"
|
||||
)
|
||||
|
||||
type ConfigMap map[string]interface{}
|
||||
|
||||
// SetStringItem checks is string pointer is non-nil and if so, add to map with given key
|
||||
func (m ConfigMap) SetStringItem(argValue *string, argName string) {
|
||||
if argValue != nil {
|
||||
m[argName] = *argValue
|
||||
}
|
||||
}
|
||||
|
||||
// SetStringSliceItem checks is string slice pointer is non-nil and if so, add to map with given key
|
||||
func (m ConfigMap) SetStringSliceItem(argValue []string, argName string) {
|
||||
if argValue != nil {
|
||||
m[argName] = argValue
|
||||
}
|
||||
}
|
||||
|
||||
// SetIntItem checks is int pointer is non-nil and if so, add to map with given key
|
||||
func (m ConfigMap) SetIntItem(argValue *int, argName string) {
|
||||
if argValue != nil {
|
||||
m[argName] = *argValue
|
||||
}
|
||||
}
|
||||
|
||||
// SetBoolItem checks is bool pointer is non-nil and if so, add to map with given key
|
||||
func (m ConfigMap) SetBoolItem(argValue *bool, argName string) {
|
||||
if argValue != nil {
|
||||
m[argName] = *argValue
|
||||
}
|
||||
}
|
||||
|
||||
// PopulateConfigMapForOptions populates the config map for a given options object
|
||||
// NOTE: this mutates configMap
|
||||
func (m ConfigMap) PopulateConfigMapForOptions(o options.Options) {
|
||||
for k, v := range o.ConfigMap() {
|
||||
m[k] = v
|
||||
// also store a scoped version of the config property
|
||||
m[getScopedKey(o, k)] = v
|
||||
}
|
||||
}
|
||||
|
||||
// generated a scoped key for the config property. For example if o is a database options object and k is 'search-path'
|
||||
// the scoped key will be 'database.search-path'
|
||||
func getScopedKey(o options.Options, k string) string {
|
||||
t := reflect.TypeOf(helpers.DereferencePointer(o)).Name()
|
||||
return fmt.Sprintf("%s.%s", strings.ToLower(t), k)
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/turbot/pipe-fittings/plugin"
|
||||
"log"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
@@ -13,7 +12,6 @@ import (
|
||||
"github.com/turbot/pipe-fittings/hclhelpers"
|
||||
"github.com/turbot/pipe-fittings/utils"
|
||||
"github.com/turbot/steampipe/pkg/constants"
|
||||
"github.com/turbot/steampipe/pkg/steampipeconfig/options"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
@@ -62,9 +60,7 @@ type Connection struct {
|
||||
|
||||
Error error
|
||||
|
||||
// options
|
||||
Options *options.Connection `json:"options,omitempty"`
|
||||
DeclRange plugin.Range `json:"decl_range"`
|
||||
DeclRange plugin.Range `json:"decl_range"`
|
||||
}
|
||||
|
||||
func (c *Connection) GetDeclRange() plugin.Range {
|
||||
@@ -97,39 +93,17 @@ func (c *Connection) ImportDisabled() bool {
|
||||
}
|
||||
|
||||
func (c *Connection) Equals(other *Connection) bool {
|
||||
connectionOptionsEqual := (c.Options == nil) == (other.Options == nil)
|
||||
if c.Options != nil {
|
||||
connectionOptionsEqual = c.Options.Equals(other.Options)
|
||||
}
|
||||
return c.Name == other.Name &&
|
||||
c.Plugin == other.Plugin &&
|
||||
c.Type == other.Type &&
|
||||
strings.Join(c.ConnectionNames, ",") == strings.Join(other.ConnectionNames, ",") &&
|
||||
connectionOptionsEqual &&
|
||||
c.Config == other.Config &&
|
||||
c.ImportSchema == other.ImportSchema
|
||||
|
||||
}
|
||||
|
||||
// SetOptions sets the options on the connection
|
||||
// verify the options object is a valid options type (only options.Connection currently supported)
|
||||
func (c *Connection) SetOptions(opts options.Options, block *hcl.Block) hcl.Diagnostics {
|
||||
var diags hcl.Diagnostics
|
||||
switch o := opts.(type) {
|
||||
case *options.Connection:
|
||||
c.Options = o
|
||||
default:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("invalid nested option type %s - only 'connection' options blocks are supported for Connections", reflect.TypeOf(o).Name()),
|
||||
Subject: hclhelpers.BlockRangePointer(block),
|
||||
})
|
||||
}
|
||||
return diags
|
||||
}
|
||||
|
||||
func (c *Connection) String() string {
|
||||
return fmt.Sprintf("\n----\nName: %s\nPlugin: %s\nConfig:\n%s\nOptions:\n%s\n", c.Name, c.Plugin, c.Config, c.Options.String())
|
||||
return fmt.Sprintf("\n----\nName: %s\nPlugin: %s\nConfig:\n%s\n", c.Name, c.Plugin, c.Config)
|
||||
}
|
||||
|
||||
// Validate verifies the Type property is valid,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user