mirror of
https://github.com/opentffoundation/opentf.git
synced 2026-03-21 07:00:37 -04:00
We don't typically just broadly run automatic rewriting tools like "go fix" across our codebase because that tends to cause annoying and unnecessary merge conflicts when we're backporting to earlier release branches. But all of the files in this commit were changed in some non-trivial way already during the OpenTofu v1.11 development period anyway, and so the likelyhood we'd be able to successfully backport from them is reduced and therefore this seems like a good opportunity to do some focused modernization using "go fix". My rules for what to include or not are admittedly quite "vibes-based", but the general idea was: - Focusing on files under the "command" directory only, because that's already been an area of intentional refactoring during this development period. - If the existing diff in a file is already significantly larger than the changes the fixer proposed to make, or if the fixer is proposing to change a line that was already changed in this development period. - More willing to include "_test.go" files than non-test files, even if they hadn't changed as much already, just because backports from test files for bug fixes tend to be entirely new test cases more than they are modifications to existing test cases, and so the risk of conflicts is lower there. Signed-off-by: Martin Atkins <mart@degeneration.co.uk>
139 lines
3.2 KiB
Go
139 lines
3.2 KiB
Go
// Copyright (c) The OpenTofu Authors
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
// Copyright (c) 2023 HashiCorp, Inc.
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
package views
|
|
|
|
import (
|
|
"encoding/json"
|
|
"strings"
|
|
"testing"
|
|
|
|
"github.com/opentofu/opentofu/internal/command/arguments"
|
|
"github.com/opentofu/opentofu/internal/terminal"
|
|
"github.com/opentofu/opentofu/internal/tfdiags"
|
|
)
|
|
|
|
func TestValidateHuman(t *testing.T) {
|
|
testCases := map[string]struct {
|
|
diag tfdiags.Diagnostic
|
|
wantSuccess bool
|
|
wantSubstring string
|
|
}{
|
|
"success": {
|
|
nil,
|
|
true,
|
|
"The configuration is valid.",
|
|
},
|
|
"warning": {
|
|
tfdiags.Sourceless(
|
|
tfdiags.Warning,
|
|
"Your shoelaces are untied",
|
|
"Watch out, or you'll trip!",
|
|
),
|
|
true,
|
|
"The configuration is valid, but there were some validation warnings",
|
|
},
|
|
"error": {
|
|
tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"Configuration is missing random_pet",
|
|
"Every configuration should have a random_pet.",
|
|
),
|
|
false,
|
|
"Error: Configuration is missing random_pet",
|
|
},
|
|
}
|
|
for name, tc := range testCases {
|
|
t.Run(name, func(t *testing.T) {
|
|
streams, done := terminal.StreamsForTesting(t)
|
|
view := NewView(streams)
|
|
view.Configure(&arguments.View{NoColor: true})
|
|
v := NewValidate(arguments.ViewOptions{ViewType: arguments.ViewHuman}, view)
|
|
|
|
var diags tfdiags.Diagnostics
|
|
|
|
if tc.diag != nil {
|
|
diags = diags.Append(tc.diag)
|
|
}
|
|
|
|
ret := v.Results(diags)
|
|
|
|
if tc.wantSuccess && ret != 0 {
|
|
t.Errorf("expected 0 return code, got %d", ret)
|
|
} else if !tc.wantSuccess && ret != 1 {
|
|
t.Errorf("expected 1 return code, got %d", ret)
|
|
}
|
|
|
|
got := done(t).All()
|
|
if strings.Contains(got, "Success!") != tc.wantSuccess {
|
|
t.Errorf("unexpected output:\n%s", got)
|
|
}
|
|
if !strings.Contains(got, tc.wantSubstring) {
|
|
t.Errorf("expected output to include %q, but was:\n%s", tc.wantSubstring, got)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestValidateJSON(t *testing.T) {
|
|
testCases := map[string]struct {
|
|
diag tfdiags.Diagnostic
|
|
wantSuccess bool
|
|
}{
|
|
"success": {
|
|
nil,
|
|
true,
|
|
},
|
|
"warning": {
|
|
tfdiags.Sourceless(
|
|
tfdiags.Warning,
|
|
"Your shoelaces are untied",
|
|
"Watch out, or you'll trip!",
|
|
),
|
|
true,
|
|
},
|
|
"error": {
|
|
tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"Configuration is missing random_pet",
|
|
"Every configuration should have a random_pet.",
|
|
),
|
|
false,
|
|
},
|
|
}
|
|
for name, tc := range testCases {
|
|
t.Run(name, func(t *testing.T) {
|
|
streams, done := terminal.StreamsForTesting(t)
|
|
view := NewView(streams)
|
|
view.Configure(&arguments.View{NoColor: true})
|
|
v := NewValidate(arguments.ViewOptions{ViewType: arguments.ViewJSON}, view)
|
|
|
|
var diags tfdiags.Diagnostics
|
|
|
|
if tc.diag != nil {
|
|
diags = diags.Append(tc.diag)
|
|
}
|
|
|
|
ret := v.Results(diags)
|
|
|
|
if tc.wantSuccess && ret != 0 {
|
|
t.Errorf("expected 0 return code, got %d", ret)
|
|
} else if !tc.wantSuccess && ret != 1 {
|
|
t.Errorf("expected 1 return code, got %d", ret)
|
|
}
|
|
|
|
got := done(t).All()
|
|
|
|
// Make sure the result looks like JSON; we comprehensively test
|
|
// the structure of this output in the command package tests.
|
|
var result map[string]any
|
|
|
|
if err := json.Unmarshal([]byte(got), &result); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
})
|
|
}
|
|
}
|