refactor: add err checks (#2896)

Signed-off-by: Larry Bordowitz <laurence.bordowitz@gmail.com>
This commit is contained in:
Larry Bordowitz
2025-06-17 11:18:53 -05:00
committed by GitHub
parent 952c7b255f
commit ed6699f8c9
32 changed files with 271 additions and 121 deletions

View File

@@ -337,7 +337,6 @@ func TestMkConfigDir_new(t *testing.T) {
func TestMkConfigDir_exists(t *testing.T) { func TestMkConfigDir_exists(t *testing.T) {
tmpConfigDir := filepath.Join(t.TempDir(), ".terraform.d") tmpConfigDir := filepath.Join(t.TempDir(), ".terraform.d")
os.Mkdir(tmpConfigDir, os.ModePerm)
err := mkConfigDir(tmpConfigDir) err := mkConfigDir(tmpConfigDir)
if err != nil { if err != nil {

View File

@@ -66,14 +66,20 @@ func TestStateHookStopping(t *testing.T) {
// We'll now force lastPersist to be long enough ago that persisting // We'll now force lastPersist to be long enough ago that persisting
// should be due on the next call. // should be due on the next call.
hook.intermediatePersist.LastPersist = time.Now().Add(-5 * time.Hour) hook.intermediatePersist.LastPersist = time.Now().Add(-5 * time.Hour)
hook.PostStateUpdate(s) _, err = hook.PostStateUpdate(s)
if err != nil {
t.Fatalf("unexpected error from PostStateUpdate: %s", err)
}
if is.Written == nil || !is.Written.Equal(s) { if is.Written == nil || !is.Written.Equal(s) {
t.Fatalf("mismatching state written") t.Fatalf("mismatching state written")
} }
if is.Persisted == nil || !is.Persisted.Equal(s) { if is.Persisted == nil || !is.Persisted.Equal(s) {
t.Fatalf("mismatching state persisted") t.Fatalf("mismatching state persisted")
} }
hook.PostStateUpdate(s) _, err = hook.PostStateUpdate(s)
if err != nil {
t.Fatalf("unexpected error from PostStateUpdate: %s", err)
}
if is.Written == nil || !is.Written.Equal(s) { if is.Written == nil || !is.Written.Equal(s) {
t.Fatalf("mismatching state written") t.Fatalf("mismatching state written")
} }
@@ -108,12 +114,18 @@ func TestStateHookStopping(t *testing.T) {
} }
is.Persisted = nil is.Persisted = nil
hook.PostStateUpdate(s) _, err = hook.PostStateUpdate(s)
if err != nil {
t.Fatalf("unexpected error from PostStateUpdate: %s", err)
}
if is.Persisted == nil || !is.Persisted.Equal(s) { if is.Persisted == nil || !is.Persisted.Equal(s) {
t.Fatalf("mismatching state persisted") t.Fatalf("mismatching state persisted")
} }
is.Persisted = nil is.Persisted = nil
hook.PostStateUpdate(s) _, err = hook.PostStateUpdate(s)
if err != nil {
t.Fatalf("unexpected error from PostStateUpdate: %s", err)
}
if is.Persisted == nil || !is.Persisted.Equal(s) { if is.Persisted == nil || !is.Persisted.Equal(s) {
t.Fatalf("mismatching state persisted") t.Fatalf("mismatching state persisted")
} }
@@ -165,14 +177,20 @@ func TestStateHookCustomPersistRule(t *testing.T) {
// We'll now force lastPersist to be long enough ago that persisting // We'll now force lastPersist to be long enough ago that persisting
// should be due on the next call. // should be due on the next call.
hook.intermediatePersist.LastPersist = time.Now().Add(-5 * time.Hour) hook.intermediatePersist.LastPersist = time.Now().Add(-5 * time.Hour)
hook.PostStateUpdate(s) _, err = hook.PostStateUpdate(s)
if err != nil {
t.Fatalf("unexpected error from PostStateUpdate: %s", err)
}
if is.Written == nil || !is.Written.Equal(s) { if is.Written == nil || !is.Written.Equal(s) {
t.Fatalf("mismatching state written") t.Fatalf("mismatching state written")
} }
if is.Persisted != nil { if is.Persisted != nil {
t.Fatalf("has a persisted state, but shouldn't") t.Fatalf("has a persisted state, but shouldn't")
} }
hook.PostStateUpdate(s) _, err = hook.PostStateUpdate(s)
if err != nil {
t.Fatalf("unexpected error from PostStateUpdate: %s", err)
}
if is.Written == nil || !is.Written.Equal(s) { if is.Written == nil || !is.Written.Equal(s) {
t.Fatalf("mismatching state written") t.Fatalf("mismatching state written")
} }
@@ -212,12 +230,18 @@ func TestStateHookCustomPersistRule(t *testing.T) {
} }
is.Persisted = nil is.Persisted = nil
hook.PostStateUpdate(s) _, err = hook.PostStateUpdate(s)
if err != nil {
t.Fatalf("unexpected error from PostStateUpdate: %s", err)
}
if is.Persisted == nil || !is.Persisted.Equal(s) { if is.Persisted == nil || !is.Persisted.Equal(s) {
t.Fatalf("mismatching state persisted") t.Fatalf("mismatching state persisted")
} }
is.Persisted = nil is.Persisted = nil
hook.PostStateUpdate(s) _, err = hook.PostStateUpdate(s)
if err != nil {
t.Fatalf("unexpected error from PostStateUpdate: %s", err)
}
if is.Persisted == nil || !is.Persisted.Equal(s) { if is.Persisted == nil || !is.Persisted.Equal(s) {
t.Fatalf("mismatching state persisted") t.Fatalf("mismatching state persisted")
} }

View File

@@ -29,8 +29,6 @@ type ArmClient struct {
// These Clients are only initialized if an Access Key isn't provided // These Clients are only initialized if an Access Key isn't provided
groupsClient *resources.GroupsClient groupsClient *resources.GroupsClient
storageAccountsClient *armStorage.AccountsClient storageAccountsClient *armStorage.AccountsClient
containersClient *containers.Client
blobsClient *blobs.Client
// azureAdStorageAuth is only here if we're using AzureAD Authentication but is an Authorizer for Storage // azureAdStorageAuth is only here if we're using AzureAD Authentication but is an Authorizer for Storage
azureAdStorageAuth *autorest.Authorizer azureAdStorageAuth *autorest.Authorizer

View File

@@ -142,10 +142,6 @@ func (b *Backend) StateMgr(ctx context.Context, name string) (statemgr.Full, err
return stateMgr, nil return stateMgr, nil
} }
func (b *Backend) client() *RemoteClient {
return &RemoteClient{}
}
func (b *Backend) path(name string) string { func (b *Backend) path(name string) string {
if name == backend.DefaultStateName { if name == backend.DefaultStateName {
return b.keyName return b.keyName

View File

@@ -22,7 +22,6 @@ import (
) )
const ( const (
leaseHeader = "x-ms-lease-id"
// Must be lower case // Must be lower case
lockInfoMetaKey = "terraformlockid" lockInfoMetaKey = "terraformlockid"
) )

View File

@@ -44,15 +44,6 @@ func testAccAzureBackendRunningInAzure(t *testing.T) {
} }
} }
// these kind of tests can only run when within GitHub Actions (e.g. OIDC)
func testAccAzureBackendRunningInGitHubActions(t *testing.T) {
testAccAzureBackend(t)
if os.Getenv("TF_RUNNING_IN_GITHUB_ACTIONS") == "" {
t.Skip("Skipping test since not running in GitHub Actions")
}
}
func buildTestClient(t *testing.T, res resourceNames) *ArmClient { func buildTestClient(t *testing.T, res resourceNames) *ArmClient {
subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID")
tenantID := os.Getenv("ARM_TENANT_ID") tenantID := os.Getenv("ARM_TENANT_ID")

View File

@@ -181,7 +181,7 @@ func (c *RemoteClient) Put(data []byte) error {
// the user. We may end up with dangling chunks but there is no way // the user. We may end up with dangling chunks but there is no way
// to be sure we won't. // to be sure we won't.
path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash) path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash)
kv.DeleteTree(path, nil) _, _ = kv.DeleteTree(path, nil)
} }
} }
@@ -307,11 +307,17 @@ func (c *RemoteClient) Delete() error {
} }
_, err = kv.Delete(c.Path, nil) _, err = kv.Delete(c.Path, nil)
if err != nil {
return err
}
// If there were chunks we need to remove them // If there were chunks we need to remove them
if chunked { if chunked {
path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash) path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash)
kv.DeleteTree(path, nil) _, err = kv.DeleteTree(path, nil)
if err != nil {
return err
}
} }
return err return err
@@ -539,7 +545,8 @@ func (c *RemoteClient) createSession() (string, error) {
log.Println("[INFO] created consul lock session", id) log.Println("[INFO] created consul lock session", id)
// keep the session renewed // keep the session renewed
go session.RenewPeriodic(lockSessionTTL, id, nil, ctx.Done()) // there's not really any good way of propagating errors from this function, so we ignore them
go session.RenewPeriodic(lockSessionTTL, id, nil, ctx.Done()) //nolint:errcheck
return id, nil return id, nil
} }
@@ -574,8 +581,14 @@ func (c *RemoteClient) unlock(id string) error {
} }
// We ignore the errors that may happen during cleanup // We ignore the errors that may happen during cleanup
kv := c.Client.KV() kv := c.Client.KV()
kv.Delete(c.lockPath()+lockSuffix, nil) _, err = kv.Delete(c.lockPath()+lockSuffix, nil)
kv.Delete(c.lockPath()+lockInfoSuffix, nil) if err != nil {
log.Printf("[ERROR] could not delete lock @ %s: %s\n", c.lockPath()+lockSuffix, err)
}
_, err = kv.Delete(c.lockPath()+lockInfoSuffix, nil)
if err != nil {
log.Printf("[ERROR] could not delete lock info @ %s: %s\n", c.lockPath()+lockInfoSuffix, err)
}
return nil return nil
} }
@@ -618,7 +631,10 @@ func (c *RemoteClient) unlock(id string) error {
// This is only cleanup, and will fail if the lock was immediately taken by // This is only cleanup, and will fail if the lock was immediately taken by
// another client, so we don't report an error to the user here. // another client, so we don't report an error to the user here.
c.consulLock.Destroy() err := c.consulLock.Destroy()
if err != nil {
log.Printf("[ERROR] could not destroy consul lock: %s\n", err)
}
return errs return errs
} }
@@ -644,7 +660,10 @@ func uncompressState(data []byte) ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
b.ReadFrom(gz) _, err = b.ReadFrom(gz)
if err != nil {
return nil, err
}
if err := gz.Close(); err != nil { if err := gz.Close(); err != nil {
return nil, err return nil, err
} }

View File

@@ -444,7 +444,7 @@ func TestConsul_lostLockConnection(t *testing.T) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
dialed := conns.dialedDone() dialed := conns.dialedDone()
// kill any open connections // kill any open connections
conns.Kill() conns.Kill(t)
// wait for a new connection to be dialed, and kill it again // wait for a new connection to be dialed, and kill it again
<-dialed <-dialed
} }
@@ -493,12 +493,15 @@ func (u *unreliableConns) dialedDone() chan struct{} {
// Kill these with a deadline, just to make sure we don't end up with any EOFs // Kill these with a deadline, just to make sure we don't end up with any EOFs
// that get ignored. // that get ignored.
func (u *unreliableConns) Kill() { func (u *unreliableConns) Kill(t *testing.T) {
u.Lock() u.Lock()
defer u.Unlock() defer u.Unlock()
for _, conn := range u.conns { for _, conn := range u.conns {
conn.(*net.TCPConn).SetDeadline(time.Now()) err := conn.(*net.TCPConn).SetDeadline(time.Now())
if err != nil {
t.Fatal("failed to kill connection:", err)
}
} }
u.conns = nil u.conns = nil
} }

View File

@@ -10,6 +10,7 @@ import (
"context" "context"
"crypto/md5" "crypto/md5"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"log" "log"
@@ -84,30 +85,39 @@ func (c *remoteClient) Lock(info *statemgr.LockInfo) (string, error) {
if err != nil { if err != nil {
return "", c.lockError(err) return "", c.lockError(err)
} }
defer c.cosUnlock(c.bucket, c.lockFile) // Local helper function so we can call it multiple places
lockUnlock := func(parent error) error {
if err := c.cosUnlock(c.bucket, c.lockFile); err != nil {
return errors.Join(
fmt.Errorf("error unlocking cos state: %w", c.lockError(err)),
parent,
)
}
return parent
}
exists, _, _, err := c.getObject(c.lockFile) exists, _, _, err := c.getObject(c.lockFile)
if err != nil { if err != nil {
return "", c.lockError(err) return "", lockUnlock(c.lockError(err))
} }
if exists { if exists {
return "", c.lockError(fmt.Errorf("lock file %s exists", c.lockFile)) return "", lockUnlock(c.lockError(fmt.Errorf("lock file %s exists", c.lockFile)))
} }
info.Path = c.lockFile info.Path = c.lockFile
data, err := json.Marshal(info) data, err := json.Marshal(info)
if err != nil { if err != nil {
return "", c.lockError(err) return "", lockUnlock(c.lockError(err))
} }
check := fmt.Sprintf("%x", md5.Sum(data)) check := fmt.Sprintf("%x", md5.Sum(data))
err = c.putObject(c.lockFile, data) err = c.putObject(c.lockFile, data)
if err != nil { if err != nil {
return "", c.lockError(err) return "", lockUnlock(c.lockError(err))
} }
return check, nil return check, lockUnlock(nil)
} }
// Unlock unlock remote state file // Unlock unlock remote state file
@@ -330,7 +340,10 @@ func (c *remoteClient) deleteBucket(recursive bool) error {
return fmt.Errorf("failed to empty bucket %v: %w", c.bucket, err) return fmt.Errorf("failed to empty bucket %v: %w", c.bucket, err)
} }
for _, v := range obs { for _, v := range obs {
c.deleteObject(v.Key) err := c.deleteObject(v.Key)
if err != nil {
return fmt.Errorf("failed to delete object with key %s: %w", v.Key, err)
}
} }
} }

View File

@@ -137,11 +137,16 @@ type testHTTPHandler struct {
func (h *testHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) { func (h *testHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) {
switch r.Method { switch r.Method {
case "GET": case "GET":
w.Write(h.Data) if _, err := w.Write(h.Data); err != nil {
w.WriteHeader(500)
return
}
w.WriteHeader(200)
case "PUT": case "PUT":
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
if _, err := io.Copy(buf, r.Body); err != nil { if _, err := io.Copy(buf, r.Body); err != nil {
w.WriteHeader(500) w.WriteHeader(500)
return
} }
w.WriteHeader(201) w.WriteHeader(201)
h.Data = buf.Bytes() h.Data = buf.Bytes()
@@ -149,6 +154,7 @@ func (h *testHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
if _, err := io.Copy(buf, r.Body); err != nil { if _, err := io.Copy(buf, r.Body); err != nil {
w.WriteHeader(500) w.WriteHeader(500)
return
} }
h.Data = buf.Bytes() h.Data = buf.Bytes()
case "LOCK": case "LOCK":
@@ -164,7 +170,8 @@ func (h *testHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200) w.WriteHeader(200)
default: default:
w.WriteHeader(500) w.WriteHeader(500)
w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method))) // this is already returning a 500, no need for further error checking
_, _ = fmt.Fprintf(w, "Unknown method: %s", r.Method)
} }
} }
@@ -172,7 +179,11 @@ func (h *testHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) {
func (h *testHTTPHandler) HandleWebDAV(w http.ResponseWriter, r *http.Request) { func (h *testHTTPHandler) HandleWebDAV(w http.ResponseWriter, r *http.Request) {
switch r.Method { switch r.Method {
case "GET": case "GET":
w.Write(h.Data) if _, err := w.Write(h.Data); err != nil {
w.WriteHeader(500)
return
}
w.WriteHeader(200)
case "PUT": case "PUT":
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
if _, err := io.Copy(buf, r.Body); err != nil { if _, err := io.Copy(buf, r.Body); err != nil {
@@ -190,7 +201,8 @@ func (h *testHTTPHandler) HandleWebDAV(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200) w.WriteHeader(200)
default: default:
w.WriteHeader(500) w.WriteHeader(500)
w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method))) // this is already returning a 500, no need for further error checking
_, _ = fmt.Fprintf(w, "Unknown method: %s", r.Method)
} }
} }

View File

@@ -80,13 +80,16 @@ func (b *Backend) configure(ctx context.Context) error {
// set the default client lock info per the test config // set the default client lock info per the test config
data := schema.FromContextBackendConfig(ctx) data := schema.FromContextBackendConfig(ctx)
if v, ok := data.GetOk("lock_id"); ok && v.(string) != "" { _, hasDefaultLock := locks.m[backend.DefaultStateName]
if v, ok := data.GetOk("lock_id"); ok && v.(string) != "" && !hasDefaultLock {
info := statemgr.NewLockInfo() info := statemgr.NewLockInfo()
info.ID = v.(string) info.ID = v.(string)
info.Operation = "test" info.Operation = "test"
info.Info = "test config" info.Info = "test config"
locks.lock(backend.DefaultStateName, info) if _, err := locks.lock(backend.DefaultStateName, info); err != nil {
return err
}
} }
return nil return nil
@@ -140,17 +143,34 @@ func (b *Backend) StateMgr(_ context.Context, name string) (statemgr.Full, error
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to lock inmem state: %w", err) return nil, fmt.Errorf("failed to lock inmem state: %w", err)
} }
defer s.Unlock(lockID)
// Local helper function so we can call it multiple places
lockUnlock := func(parent error) error {
if err := s.Unlock(lockID); err != nil {
return errors.Join(
fmt.Errorf("error unlocking inmem state: %w", err),
parent,
)
}
return parent
}
// If we have no state, we have to create an empty state // If we have no state, we have to create an empty state
if v := s.State(); v == nil { if v := s.State(); v == nil {
if err := s.WriteState(statespkg.NewState()); err != nil { if err := s.WriteState(statespkg.NewState()); err != nil {
err = lockUnlock(err)
return nil, err return nil, err
} }
if err := s.PersistState(nil); err != nil { if err := s.PersistState(nil); err != nil {
err = lockUnlock(err)
return nil, err return nil, err
} }
} }
// Unlock, the state should now be initialized
if err := lockUnlock(nil); err != nil {
return nil, err
}
} }
return s, nil return s, nil

View File

@@ -371,7 +371,10 @@ func uncompressState(data string) ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
b.ReadFrom(gz) _, err = b.ReadFrom(gz)
if err != nil {
return nil, err
}
if err := gz.Close(); err != nil { if err := gz.Close(); err != nil {
return nil, err return nil, err
} }

View File

@@ -472,7 +472,10 @@ func getAssumeRoleAK(accessKey, secretKey, stsToken, region, roleArn, sessionNam
return "", "", "", err return "", "", "", err
} }
if stsEndpoint != "" { if stsEndpoint != "" {
endpoints.AddEndpointMapping(region, "STS", stsEndpoint) err = endpoints.AddEndpointMapping(region, "STS", stsEndpoint)
if err != nil {
return "", "", "", err
}
} }
response, err := client.AssumeRole(request) response, err := client.AssumeRole(request)
if err != nil { if err != nil {

View File

@@ -983,15 +983,6 @@ func generalError(msg string, err error) error {
} }
} }
func checkConstraintsWarning(err error) tfdiags.Diagnostic {
return tfdiags.Sourceless(
tfdiags.Warning,
fmt.Sprintf("Failed to check version constraints: %v", err),
"Checking version constraints is considered optional, but this is an"+
"unexpected error which should be reported.",
)
}
// The newline in this error is to make it look good in the CLI! // The newline in this error is to make it look good in the CLI!
const initialRetryError = ` const initialRetryError = `
[reset][yellow]There was an error connecting to the remote backend. Please do not exit [reset][yellow]There was an error connecting to the remote backend. Please do not exit

View File

@@ -373,7 +373,9 @@ in order to capture the filesystem context the remote workspace expects:
log.Printf("[ERROR] error searching process ID: %v", err) log.Printf("[ERROR] error searching process ID: %v", err)
return return
} }
p.Signal(syscall.SIGINT) if err := p.Signal(syscall.SIGINT); err != nil {
log.Printf("[ERROR] error sending interrupt signal: %v", err)
}
} }
} }
}() }()

View File

@@ -53,7 +53,10 @@ func TestRemoteClient_Put_withRunID(t *testing.T) {
// Create a new empty state. // Create a new empty state.
sf := statefile.New(states.NewState(), "", 0) sf := statefile.New(states.NewState(), "", 0)
var buf bytes.Buffer var buf bytes.Buffer
statefile.Write(sf, &buf, encryption.StateEncryptionDisabled()) err := statefile.Write(sf, &buf, encryption.StateEncryptionDisabled())
if err != nil {
t.Fatalf("error writing to statefile, got %v", err)
}
// Store the new state to verify (this will be done // Store the new state to verify (this will be done
// by the mock that is used) that the run ID is set. // by the mock that is used) that the run ID is set.

View File

@@ -216,22 +216,28 @@ func testServer(t *testing.T) *httptest.Server {
// Respond to service discovery calls. // Respond to service discovery calls.
mux.HandleFunc("/well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
io.WriteString(w, `{ _, err := io.WriteString(w, `{
"state.v2": "/api/v2/", "state.v2": "/api/v2/",
"tfe.v2.1": "/api/v2/", "tfe.v2.1": "/api/v2/",
"versions.v1": "/v1/versions/" "versions.v1": "/v1/versions/"
}`) }`)
if err != nil {
w.WriteHeader(500)
}
}) })
// Respond to service version constraints calls. // Respond to service version constraints calls.
mux.HandleFunc("/v1/versions/", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/v1/versions/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
io.WriteString(w, fmt.Sprintf(`{ _, err := io.WriteString(w, fmt.Sprintf(`{
"service": "%s", "service": "%s",
"product": "terraform", "product": "terraform",
"minimum": "0.1.0", "minimum": "0.1.0",
"maximum": "10.0.0" "maximum": "10.0.0"
}`, path.Base(r.URL.Path))) }`, path.Base(r.URL.Path)))
if err != nil {
w.WriteHeader(500)
}
}) })
// Respond to pings to get the API version header. // Respond to pings to get the API version header.
@@ -243,7 +249,7 @@ func testServer(t *testing.T) *httptest.Server {
// Respond to the initial query to read the hashicorp org entitlements. // Respond to the initial query to read the hashicorp org entitlements.
mux.HandleFunc("/api/v2/organizations/hashicorp/entitlement-set", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/api/v2/organizations/hashicorp/entitlement-set", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/vnd.api+json") w.Header().Set("Content-Type", "application/vnd.api+json")
io.WriteString(w, `{ _, err := io.WriteString(w, `{
"data": { "data": {
"id": "org-GExadygjSbKP8hsY", "id": "org-GExadygjSbKP8hsY",
"type": "entitlement-sets", "type": "entitlement-sets",
@@ -257,12 +263,15 @@ func testServer(t *testing.T) *httptest.Server {
} }
} }
}`) }`)
if err != nil {
w.WriteHeader(500)
}
}) })
// Respond to the initial query to read the no-operations org entitlements. // Respond to the initial query to read the no-operations org entitlements.
mux.HandleFunc("/api/v2/organizations/no-operations/entitlement-set", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/api/v2/organizations/no-operations/entitlement-set", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/vnd.api+json") w.Header().Set("Content-Type", "application/vnd.api+json")
io.WriteString(w, `{ _, err := io.WriteString(w, `{
"data": { "data": {
"id": "org-ufxa3y8jSbKP8hsT", "id": "org-ufxa3y8jSbKP8hsT",
"type": "entitlement-sets", "type": "entitlement-sets",
@@ -276,13 +285,16 @@ func testServer(t *testing.T) *httptest.Server {
} }
} }
}`) }`)
if err != nil {
w.WriteHeader(500)
}
}) })
// All tests that are assumed to pass will use the hashicorp organization, // All tests that are assumed to pass will use the hashicorp organization,
// so for all other organization requests we will return a 404. // so for all other organization requests we will return a 404.
mux.HandleFunc("/api/v2/organizations/", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/api/v2/organizations/", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(404) w.WriteHeader(404)
io.WriteString(w, `{ _, err := io.WriteString(w, `{
"errors": [ "errors": [
{ {
"status": "404", "status": "404",
@@ -290,6 +302,9 @@ func testServer(t *testing.T) *httptest.Server {
} }
] ]
}`) }`)
if err != nil {
w.WriteHeader(500)
}
}) })
return httptest.NewServer(mux) return httptest.NewServer(mux)

View File

@@ -424,7 +424,7 @@ func testLocksInWorkspace(t *testing.T, b1, b2 Backend, testForceUnlock bool, wo
_, err = lockerB.Lock(infoB) _, err = lockerB.Lock(infoB)
if err == nil { if err == nil {
lockerA.Unlock(lockIDA) _ = lockerA.Unlock(lockIDA) // test already failed, no need to check err further
t.Fatal("client B obtained lock while held by client A") t.Fatal("client B obtained lock while held by client A")
} }

View File

@@ -324,7 +324,9 @@ in order to capture the filesystem context the remote workspace expects:
log.Printf("[ERROR] error searching process ID: %v", err) log.Printf("[ERROR] error searching process ID: %v", err)
return return
} }
p.Signal(syscall.SIGINT) if err := p.Signal(syscall.SIGINT); err != nil {
log.Printf("[ERROR] error sending interrupt signal: %v", err)
}
} }
} }
}() }()

View File

@@ -84,7 +84,7 @@ func TestCloud_runTaskStageWithPolicyEvaluation(t *testing.T) {
trs := policyEvaluationSummarizer{ trs := policyEvaluationSummarizer{
cloud: b, cloud: b,
} }
c.context.Poll(0, 0, func(i int) (bool, error) { err := c.context.Poll(0, 0, func(i int) (bool, error) {
cont, _, _ := trs.Summarize(c.context, c.writer, c.taskStage()) cont, _, _ := trs.Summarize(c.context, c.writer, c.taskStage())
if cont { if cont {
return true, nil return true, nil
@@ -98,5 +98,8 @@ func TestCloud_runTaskStageWithPolicyEvaluation(t *testing.T) {
} }
return false, nil return false, nil
}) })
if err != nil {
t.Fatalf("Error while polling: %v", err)
}
} }
} }

View File

@@ -155,7 +155,7 @@ func TestCloud_runTasksWithTaskResults(t *testing.T) {
trs := taskResultSummarizer{ trs := taskResultSummarizer{
cloud: b, cloud: b,
} }
c.context.Poll(0, 0, func(i int) (bool, error) { err := c.context.Poll(0, 0, func(i int) (bool, error) {
cont, _, _ := trs.Summarize(c.context, c.writer, c.taskStage()) cont, _, _ := trs.Summarize(c.context, c.writer, c.taskStage())
if cont { if cont {
return true, nil return true, nil
@@ -169,5 +169,8 @@ func TestCloud_runTasksWithTaskResults(t *testing.T) {
} }
return false, nil return false, nil
}) })
if err != nil {
t.Fatalf("Error while polling: %v", err)
}
} }
} }

View File

@@ -136,7 +136,10 @@ func testRunner(t *testing.T, cases testCases, orgCount int, tfEnvFlags ...strin
if lenInput > 0 { if lenInput > 0 {
for i := 0; i < lenInput; i++ { for i := 0; i < lenInput; i++ {
input := tfCmd.userInput[i] input := tfCmd.userInput[i]
exp.SendLine(input) _, err := exp.SendLine(input)
if err != nil {
subtest.Fatal(err)
}
// use the index to find the corresponding // use the index to find the corresponding
// output that matches the input. // output that matches the input.
if lenInputOutput-1 >= i { if lenInputOutput-1 >= i {
@@ -183,6 +186,13 @@ func setTfeClient() {
} }
} }
func chdirOCF(dir string) {
if err := os.Chdir(dir); err != nil {
fmt.Printf("Could not change directories: %v\n", err)
os.Exit(1)
}
}
func setupBinary() func() { func setupBinary() func() {
log.Println("Setting up terraform binary") log.Println("Setting up terraform binary")
tmpTerraformBinaryDir, err := os.MkdirTemp("", "terraform-test") tmpTerraformBinaryDir, err := os.MkdirTemp("", "terraform-test")
@@ -192,9 +202,9 @@ func setupBinary() func() {
} }
log.Println(tmpTerraformBinaryDir) log.Println(tmpTerraformBinaryDir)
currentDir, err := os.Getwd() currentDir, err := os.Getwd()
defer os.Chdir(currentDir) defer chdirOCF(currentDir)
if err != nil { if err != nil {
fmt.Printf("Could not change directories: %v\n", err) fmt.Printf("Could not get current directory: %v\n", err)
os.Exit(1) os.Exit(1)
} }
// Getting top level dir // Getting top level dir
@@ -204,10 +214,7 @@ func setupBinary() func() {
topLevel := len(dirPaths) - 3 topLevel := len(dirPaths) - 3
topDir := strings.Join(dirPaths[0:topLevel], "/") topDir := strings.Join(dirPaths[0:topLevel], "/")
if err := os.Chdir(topDir); err != nil { chdirOCF(topDir)
fmt.Printf("Could not change directories: %v\n", err)
os.Exit(1)
}
cmd := exec.Command( cmd := exec.Command(
"go", "go",

View File

@@ -182,7 +182,7 @@ func TestCloudLocks(t *testing.T) {
_, err = lockerB.Lock(infoB) _, err = lockerB.Lock(infoB)
if err == nil { if err == nil {
lockerA.Unlock(lockIDA) _ = lockerA.Unlock(lockIDA) // test already failed, no need to check err further
t.Fatal("client B obtained lock while held by client A") t.Fatal("client B obtained lock while held by client A")
} }
if _, ok := err.(*statemgr.LockError); !ok { if _, ok := err.(*statemgr.LockError); !ok {
@@ -335,12 +335,15 @@ func TestState_PersistState(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
cloudState.WriteState(states.BuildState(func(s *states.SyncState) { err = cloudState.WriteState(states.BuildState(func(s *states.SyncState) {
s.SetOutputValue( s.SetOutputValue(
addrs.OutputValue{Name: "boop"}.Absolute(addrs.RootModuleInstance), addrs.OutputValue{Name: "boop"}.Absolute(addrs.RootModuleInstance),
cty.StringVal("beep"), false, "", cty.StringVal("beep"), false, "",
) )
})) }))
if err != nil {
t.Fatal(err)
}
err = cloudState.PersistState(nil) err = cloudState.PersistState(nil)
if err != nil { if err != nil {

View File

@@ -40,10 +40,6 @@ import (
"github.com/opentofu/opentofu/internal/tofu" "github.com/opentofu/opentofu/internal/tofu"
) )
const (
testCred = "test-auth-token"
)
var ( var (
tfeHost = "app.terraform.io" tfeHost = "app.terraform.io"
credsSrc = svcauth.StaticCredentialsSource(map[svchost.Hostname]svcauth.HostCredentials{ credsSrc = svcauth.StaticCredentialsSource(map[svchost.Hostname]svcauth.HostCredentials{
@@ -424,12 +420,18 @@ func testServerWithSnapshotsEnabled(t *testing.T, enabled bool) *httptest.Server
fakeState := states.NewState() fakeState := states.NewState()
fakeStateFile := statefile.New(fakeState, "boop", 1) fakeStateFile := statefile.New(fakeState, "boop", 1)
var buf bytes.Buffer var buf bytes.Buffer
statefile.Write(fakeStateFile, &buf, encryption.StateEncryptionDisabled()) err := statefile.Write(fakeStateFile, &buf, encryption.StateEncryptionDisabled())
if err != nil {
t.Fatal(err)
}
respBody := buf.Bytes() respBody := buf.Bytes()
w.Header().Set("content-type", "application/json") w.Header().Set("content-type", "application/json")
w.Header().Set("content-length", strconv.FormatInt(int64(len(respBody)), 10)) w.Header().Set("content-length", strconv.FormatInt(int64(len(respBody)), 10))
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
w.Write(respBody) _, err = w.Write(respBody)
if err != nil {
t.Fatal(err)
}
return return
} }
@@ -477,7 +479,10 @@ func testServerWithSnapshotsEnabled(t *testing.T, enabled bool) *httptest.Server
} }
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
w.Write(fakeBodyRaw) _, err = w.Write(fakeBodyRaw)
if err != nil {
t.Fatal(err)
}
})) }))
serverURL = server.URL serverURL = server.URL
return server return server
@@ -490,20 +495,26 @@ var testDefaultRequestHandlers = map[string]func(http.ResponseWriter, *http.Requ
// Respond to service discovery calls. // Respond to service discovery calls.
"/well-known/terraform.json": func(w http.ResponseWriter, r *http.Request) { "/well-known/terraform.json": func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
io.WriteString(w, `{ _, err := io.WriteString(w, `{
"tfe.v2": "/api/v2/", "tfe.v2": "/api/v2/",
}`) }`)
if err != nil {
w.WriteHeader(500)
}
}, },
// Respond to service version constraints calls. // Respond to service version constraints calls.
"/v1/versions/": func(w http.ResponseWriter, r *http.Request) { "/v1/versions/": func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
io.WriteString(w, fmt.Sprintf(`{ _, err := io.WriteString(w, fmt.Sprintf(`{
"service": "%s", "service": "%s",
"product": "terraform", "product": "terraform",
"minimum": "0.1.0", "minimum": "0.1.0",
"maximum": "10.0.0" "maximum": "10.0.0"
}`, path.Base(r.URL.Path))) }`, path.Base(r.URL.Path)))
if err != nil {
w.WriteHeader(500)
}
}, },
// Respond to pings to get the API version header. // Respond to pings to get the API version header.
@@ -515,7 +526,7 @@ var testDefaultRequestHandlers = map[string]func(http.ResponseWriter, *http.Requ
// Respond to the initial query to read the hashicorp org entitlements. // Respond to the initial query to read the hashicorp org entitlements.
"/api/v2/organizations/hashicorp/entitlement-set": func(w http.ResponseWriter, r *http.Request) { "/api/v2/organizations/hashicorp/entitlement-set": func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/vnd.api+json") w.Header().Set("Content-Type", "application/vnd.api+json")
io.WriteString(w, `{ _, err := io.WriteString(w, `{
"data": { "data": {
"id": "org-GExadygjSbKP8hsY", "id": "org-GExadygjSbKP8hsY",
"type": "entitlement-sets", "type": "entitlement-sets",
@@ -529,12 +540,15 @@ var testDefaultRequestHandlers = map[string]func(http.ResponseWriter, *http.Requ
} }
} }
}`) }`)
if err != nil {
w.WriteHeader(500)
}
}, },
// Respond to the initial query to read the no-operations org entitlements. // Respond to the initial query to read the no-operations org entitlements.
"/api/v2/organizations/no-operations/entitlement-set": func(w http.ResponseWriter, r *http.Request) { "/api/v2/organizations/no-operations/entitlement-set": func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/vnd.api+json") w.Header().Set("Content-Type", "application/vnd.api+json")
io.WriteString(w, `{ _, err := io.WriteString(w, `{
"data": { "data": {
"id": "org-ufxa3y8jSbKP8hsT", "id": "org-ufxa3y8jSbKP8hsT",
"type": "entitlement-sets", "type": "entitlement-sets",
@@ -548,13 +562,16 @@ var testDefaultRequestHandlers = map[string]func(http.ResponseWriter, *http.Requ
} }
} }
}`) }`)
if err != nil {
w.WriteHeader(500)
}
}, },
// All tests that are assumed to pass will use the hashicorp organization, // All tests that are assumed to pass will use the hashicorp organization,
// so for all other organization requests we will return a 404. // so for all other organization requests we will return a 404.
"/api/v2/organizations/": func(w http.ResponseWriter, r *http.Request) { "/api/v2/organizations/": func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(404) w.WriteHeader(404)
io.WriteString(w, `{ _, err := io.WriteString(w, `{
"errors": [ "errors": [
{ {
"status": "404", "status": "404",
@@ -562,6 +579,9 @@ var testDefaultRequestHandlers = map[string]func(http.ResponseWriter, *http.Requ
} }
] ]
}`) }`)
if err != nil {
w.WriteHeader(500)
}
}, },
} }

View File

@@ -105,7 +105,8 @@ func (g *AcyclicGraph) TransitiveReduction() {
for _, u := range g.Vertices() { for _, u := range g.Vertices() {
uTargets := g.downEdgesNoCopy(u) uTargets := g.downEdgesNoCopy(u)
g.DepthFirstWalk(g.downEdgesNoCopy(u), func(v Vertex, d int) error { // err is always returned as nil
_ = g.DepthFirstWalk(g.downEdgesNoCopy(u), func(v Vertex, d int) error {
shared := uTargets.Intersection(g.downEdgesNoCopy(v)) shared := uTargets.Intersection(g.downEdgesNoCopy(v))
for _, vPrime := range shared { for _, vPrime := range shared {
g.RemoveEdge(BasicEdge(u, vPrime)) g.RemoveEdge(BasicEdge(u, vPrime))

View File

@@ -460,7 +460,8 @@ func TestAcyclicGraphWalkOrder(t *testing.T) {
t.Run("DepthFirst", func(t *testing.T) { t.Run("DepthFirst", func(t *testing.T) {
var visits []vertexAtDepth var visits []vertexAtDepth
g.walk(depthFirst|downOrder, true, start, func(v Vertex, d int) error { // err will always be nil
_ = g.walk(depthFirst|downOrder, true, start, func(v Vertex, d int) error {
visits = append(visits, vertexAtDepth{v, d}) visits = append(visits, vertexAtDepth{v, d})
return nil return nil
@@ -474,7 +475,8 @@ func TestAcyclicGraphWalkOrder(t *testing.T) {
}) })
t.Run("ReverseDepthFirst", func(t *testing.T) { t.Run("ReverseDepthFirst", func(t *testing.T) {
var visits []vertexAtDepth var visits []vertexAtDepth
g.walk(depthFirst|upOrder, true, reverse, func(v Vertex, d int) error { // err will always be nil
_ = g.walk(depthFirst|upOrder, true, reverse, func(v Vertex, d int) error {
visits = append(visits, vertexAtDepth{v, d}) visits = append(visits, vertexAtDepth{v, d})
return nil return nil
@@ -488,7 +490,8 @@ func TestAcyclicGraphWalkOrder(t *testing.T) {
}) })
t.Run("BreadthFirst", func(t *testing.T) { t.Run("BreadthFirst", func(t *testing.T) {
var visits []vertexAtDepth var visits []vertexAtDepth
g.walk(breadthFirst|downOrder, true, start, func(v Vertex, d int) error { // err will always be nil
_ = g.walk(breadthFirst|downOrder, true, start, func(v Vertex, d int) error {
visits = append(visits, vertexAtDepth{v, d}) visits = append(visits, vertexAtDepth{v, d})
return nil return nil
@@ -502,7 +505,8 @@ func TestAcyclicGraphWalkOrder(t *testing.T) {
}) })
t.Run("ReverseBreadthFirst", func(t *testing.T) { t.Run("ReverseBreadthFirst", func(t *testing.T) {
var visits []vertexAtDepth var visits []vertexAtDepth
g.walk(breadthFirst|upOrder, true, reverse, func(v Vertex, d int) error { // err will always be nil
_ = g.walk(breadthFirst|upOrder, true, reverse, func(v Vertex, d int) error {
visits = append(visits, vertexAtDepth{v, d}) visits = append(visits, vertexAtDepth{v, d})
return nil return nil

View File

@@ -57,15 +57,15 @@ func (g *marshalGraph) Dot(opts *DotOpts) []byte {
} }
var w indentWriter var w indentWriter
w.WriteString("digraph {\n") w.writeString("digraph {\n")
w.Indent() w.Indent()
// some dot defaults // some dot defaults
w.WriteString(`compound = "true"` + "\n") w.writeString(`compound = "true"` + "\n")
w.WriteString(`newrank = "true"` + "\n") w.writeString(`newrank = "true"` + "\n")
// the top level graph is written as the first subgraph // the top level graph is written as the first subgraph
w.WriteString(`subgraph "root" {` + "\n") w.writeString(`subgraph "root" {` + "\n")
g.writeBody(opts, &w) g.writeBody(opts, &w)
// cluster isn't really used other than for naming purposes in some graphs // cluster isn't really used other than for naming purposes in some graphs
@@ -80,7 +80,7 @@ func (g *marshalGraph) Dot(opts *DotOpts) []byte {
} }
w.Unindent() w.Unindent()
w.WriteString("}\n") w.writeString("}\n")
return w.Bytes() return w.Bytes()
} }
@@ -152,7 +152,8 @@ func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int,
name = "cluster_" + name name = "cluster_" + name
sg.Attrs["label"] = sg.Name sg.Attrs["label"] = sg.Name
} }
w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) // writing to the buffer does not produce an error
_, _ = fmt.Fprintf(w, "subgraph %q {\n", name)
sg.writeBody(opts, w) sg.writeBody(opts, w)
for _, sg := range sg.Subgraphs { for _, sg := range sg.Subgraphs {
@@ -164,7 +165,7 @@ func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) {
w.Indent() w.Indent()
for _, as := range attrStrings(g.Attrs) { for _, as := range attrStrings(g.Attrs) {
w.WriteString(as + "\n") w.writeString(as + "\n")
} }
// list of Vertices that aren't to be included in the dot output // list of Vertices that aren't to be included in the dot output
@@ -176,7 +177,7 @@ func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) {
continue continue
} }
w.Write(v.dot(g, opts)) _, _ = w.Write(v.dot(g, opts))
} }
var dotEdges []string var dotEdges []string
@@ -219,11 +220,11 @@ func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) {
sort.Strings(dotEdges) sort.Strings(dotEdges)
for _, e := range dotEdges { for _, e := range dotEdges {
w.WriteString(e + "\n") w.writeString(e + "\n")
} }
w.Unindent() w.Unindent()
w.WriteString("}\n") w.writeString("}\n")
} }
func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { func writeAttrs(buf *bytes.Buffer, attrs map[string]string) {
@@ -273,6 +274,12 @@ func (w *indentWriter) Write(b []byte) (int, error) {
return w.Buffer.Write(b) return w.Buffer.Write(b)
} }
// writeString is a helper function to write a string to the indentWriter without the need for handling errors.
// the errors are ignored here because writing to a bytes.Buffer should never fail
func (w *indentWriter) writeString(s string) {
_, _ = w.WriteString(s)
}
func (w *indentWriter) WriteString(s string) (int, error) { func (w *indentWriter) WriteString(s string) (int, error) {
w.indent() w.indent()
return w.Buffer.WriteString(s) return w.Buffer.WriteString(s)

View File

@@ -74,15 +74,13 @@ func (p *provisioner) ProvisionResource(req *tfplugin5.ProvisionResource_Request
configVal, err := decodeDynamicValue(req.Config, ty) configVal, err := decodeDynamicValue(req.Config, ty)
if err != nil { if err != nil {
srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)
srv.Send(srvResp) return srv.Send(srvResp)
return nil
} }
connVal, err := decodeDynamicValue(req.Connection, shared.ConnectionBlockSupersetSchema.ImpliedType()) connVal, err := decodeDynamicValue(req.Connection, shared.ConnectionBlockSupersetSchema.ImpliedType())
if err != nil { if err != nil {
srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)
srv.Send(srvResp) return srv.Send(srvResp)
return nil
} }
resp := p.provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ resp := p.provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{
@@ -92,8 +90,7 @@ func (p *provisioner) ProvisionResource(req *tfplugin5.ProvisionResource_Request
}) })
srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, resp.Diagnostics) srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, resp.Diagnostics)
srv.Send(srvResp) return srv.Send(srvResp)
return nil
} }
func (p *provisioner) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { func (p *provisioner) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) {

View File

@@ -340,7 +340,7 @@ func DirFromModule(ctx context.Context, loader *configload.Loader, rootDir, modu
continue continue
} }
err = os.MkdirAll(instPath, os.ModePerm) err := os.MkdirAll(instPath, os.ModePerm)
if err != nil { if err != nil {
diags = diags.Append(tfdiags.Sourceless( diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error, tfdiags.Error,
@@ -353,7 +353,7 @@ func DirFromModule(ctx context.Context, loader *configload.Loader, rootDir, modu
// We copy rather than "rename" here because renaming between directories // We copy rather than "rename" here because renaming between directories
// can be tricky in edge-cases like network filesystems, etc. // can be tricky in edge-cases like network filesystems, etc.
log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath) log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath)
err := copy.CopyDir(instPath, tempPath) err = copy.CopyDir(instPath, tempPath)
if err != nil { if err != nil {
diags = diags.Append(tfdiags.Sourceless( diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error, tfdiags.Error,
@@ -377,7 +377,7 @@ func DirFromModule(ctx context.Context, loader *configload.Loader, rootDir, modu
hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
} }
retManifest.WriteSnapshotToDir(modulesDir) err = retManifest.WriteSnapshotToDir(modulesDir)
if err != nil { if err != nil {
diags = diags.Append(tfdiags.Sourceless( diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error, tfdiags.Error,

View File

@@ -302,7 +302,10 @@ func TestDirFromModule_rel_submodules(t *testing.T) {
t.Fatalf("failed to switch to temp dir %s: %s", tmpDir, err) t.Fatalf("failed to switch to temp dir %s: %s", tmpDir, err)
} }
t.Cleanup(func() { t.Cleanup(func() {
os.Chdir(oldDir) err := os.Chdir(oldDir)
if err != nil {
t.Logf("error running chdir to %s: %s", oldDir, err)
}
// Trigger garbage collection to ensure that all open file handles are closed. // Trigger garbage collection to ensure that all open file handles are closed.
// This prevents TempDir RemoveAll cleanup errors on Windows. // This prevents TempDir RemoveAll cleanup errors on Windows.
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {

View File

@@ -405,7 +405,8 @@ func optionalValueNotComputable(schema *configschema.Attribute, val cty.Value) b
} }
foundNonComputedAttr := false foundNonComputedAttr := false
cty.Walk(val, func(path cty.Path, v cty.Value) (bool, error) { // err is always nil
_ = cty.Walk(val, func(path cty.Path, v cty.Value) (bool, error) {
if v.IsNull() { if v.IsNull() {
return true, nil return true, nil
} }
@@ -439,7 +440,8 @@ func validPriorFromConfig(schema nestedSchema, prior, config cty.Value) bool {
stop := errors.New("stop") stop := errors.New("stop")
valid := true valid := true
cty.Walk(prior, func(path cty.Path, priorV cty.Value) (bool, error) { // err is always nil or `stop`
_ = cty.Walk(prior, func(path cty.Path, priorV cty.Value) (bool, error) {
configV, err := path.Apply(config) configV, err := path.Apply(config)
if err != nil { if err != nil {
// most likely dynamic objects with different types // most likely dynamic objects with different types

View File

@@ -236,7 +236,11 @@ func mockRegHandler(config map[uint8]struct{}) http.Handler {
return return
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.Write(js) _, err = w.Write(js)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
} }
mux.Handle("/v1/modules/", mux.Handle("/v1/modules/",
@@ -257,7 +261,10 @@ func mockRegHandler(config map[uint8]struct{}) http.Handler {
mux.HandleFunc("/.well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/.well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
io.WriteString(w, `{"modules.v1":"http://localhost/v1/modules/", "providers.v1":"http://localhost/v1/providers/"}`) _, err := io.WriteString(w, `{"modules.v1":"http://localhost/v1/modules/", "providers.v1":"http://localhost/v1/providers/"}`)
if err != nil {
w.WriteHeader(500)
}
}) })
return mux return mux
} }