mirror of
https://github.com/opentffoundation/opentf.git
synced 2025-12-19 17:59:05 -05:00
Add new configuration to AzureRM Backend (#3251)
Signed-off-by: Larry Bordowitz <laurence.bordowitz@gmail.com>
This commit is contained in:
@@ -38,8 +38,12 @@ ENHANCEMENTS:
|
||||
* It is now possible to configure the registry protocol retry count and request timeout settings in the CLI configuration, in addition to the previously-available environment variables. ([#3256](https://github.com/opentofu/opentofu/pull/3256))
|
||||
* Upgrade github.com/hashicorp/go-getter to v1.7.9 to fix [GO-2025-3892](https://pkg.go.dev/vuln/GO-2025-3892). ([#3227](https://github.com/opentofu/opentofu/pull/3227))
|
||||
* The module installer will copy files in parallel to improve performance of `init` ([#3214](https://github.com/opentofu/opentofu/pull/3214))
|
||||
* The following option has been added to the `azurerm` backend:
|
||||
* The following configuration options have been added to the `azurerm` backend:
|
||||
* `use_cli`: set to true by default, this can be set to false to disable command line authentcation. ([#3034](https://github.com/opentofu/opentofu/pull/3034))
|
||||
* `use_aks_workload_identity`: set to false by default, this allows authentication in Azure Kubernetes when using Workload Identity Federation. ([#3251](https://github.com/opentofu/opentofu/pull/3251))
|
||||
* `client_id_file_path`: allows the user to set the `client_id` through a file. ([#3251](https://github.com/opentofu/opentofu/pull/3251))
|
||||
* `client_secret_file_path`: allows the user to set the `client_secret` through a file. ([#3251](https://github.com/opentofu/opentofu/pull/3251))
|
||||
* `client_certificate`: allows the user to set the certificate directly, as opposed to only setting it through a file. ([#3251](https://github.com/opentofu/opentofu/pull/3251))
|
||||
* Upgrade github.com/go-viper/mapstructure/v2 to v2.4.0 to fix [GO-2025-3900](https://pkg.go.dev/vuln/GO-2025-3900). ([#3229](https://github.com/opentofu/opentofu/pull/3229))
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
@@ -112,3 +112,44 @@ Finally, run the MSI test:
|
||||
```bash
|
||||
$ ./azure.test -test.v -test.run "TestAcc.*ManagedServiceIdentity"
|
||||
```
|
||||
|
||||
### Running AKS Workload Identity Test
|
||||
|
||||
We strongly recommend using the workspace in the `meta-test` folder to set up the AKS Kubernetes cluster and associated authorizations.
|
||||
|
||||
Within the same directory as this README, compile all the tests:
|
||||
|
||||
```bash
|
||||
$ GOOS=linux GOARCH=amd64 go test -c .
|
||||
```
|
||||
|
||||
This will generate an `azure.test` file. Assuming that `kubectl` is configured to go to a pod named `shell-demo` in the `default` namespace, run the following command:
|
||||
|
||||
```bash
|
||||
kubectl cp azure.test shell-demo:/
|
||||
```
|
||||
|
||||
Shell into the pod:
|
||||
|
||||
```bash
|
||||
kubectl exec --stdin --tty shell-demo -- /bin/sh
|
||||
```
|
||||
|
||||
Set up the following environment variables:
|
||||
|
||||
```bash
|
||||
export TF_AZURE_TEST=1
|
||||
export TF_ACC=1
|
||||
export ARM_LOCATION=centralus
|
||||
export ARM_SUBSCRIPTION_ID='xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
|
||||
export ARM_TENANT_ID='xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
|
||||
export TF_AZURE_TEST_STORAGE_ACCOUNT_NAME=acctestsaxxxx
|
||||
export TF_AZURE_TEST_RESOURCE_GROUP_NAME=acctestRG-backend-1234567890-xxxx
|
||||
export TF_AZURE_TEST_CONTAINER_NAME=acctestcont
|
||||
```
|
||||
|
||||
Finally, run the AKS Workload Identity test:
|
||||
|
||||
```bash
|
||||
$ ./azure.test -test.v -test.run "TestAcc.*AKSWorkloadIdentity"
|
||||
```
|
||||
|
||||
@@ -20,6 +20,7 @@ type Config struct {
|
||||
OIDCAuthConfig
|
||||
MSIAuthConfig
|
||||
StorageAddresses
|
||||
WorkloadIdentityAuthConfig
|
||||
}
|
||||
|
||||
type AuthMethod interface {
|
||||
@@ -50,6 +51,7 @@ func GetAuthMethod(ctx context.Context, config *Config) (AuthMethod, error) {
|
||||
&clientSecretCredentialAuth{},
|
||||
&oidcAuth{},
|
||||
&managedIdentityAuth{},
|
||||
&workloadIdentityAuth{},
|
||||
&azureCLICredentialAuth{},
|
||||
}
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
@@ -42,8 +42,8 @@ func (cred *azureCLICredentialAuth) Validate(ctx context.Context, config *Config
|
||||
if !config.CLIAuthEnabled {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Command Line Auth: use_cli set to false",
|
||||
"Use of command-line auth (az) has been prevented by setting use_cli to false.",
|
||||
"Invalid Azure Command Line Auth",
|
||||
"Setting use_cli to false prevents the use of command-line auth (az).",
|
||||
))
|
||||
return diags
|
||||
}
|
||||
@@ -51,7 +51,7 @@ func (cred *azureCLICredentialAuth) Validate(ctx context.Context, config *Config
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Command Line Auth: az not found in PATH",
|
||||
"Invalid Azure Command Line Auth",
|
||||
"Error looking for command az in your PATH. Make sure the Azure Command Line tool is installed and executable.",
|
||||
))
|
||||
return diags
|
||||
@@ -61,8 +61,8 @@ func (cred *azureCLICredentialAuth) Validate(ctx context.Context, config *Config
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Command Line Auth: az command error",
|
||||
fmt.Sprintf("Error using the az command: %s", err.Error()),
|
||||
"Invalid Azure Command Line Auth",
|
||||
fmt.Sprintf("Error using the az command: %s.", tfdiags.FormatError(err)),
|
||||
))
|
||||
}
|
||||
return diags
|
||||
|
||||
@@ -6,8 +6,11 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -19,6 +22,7 @@ import (
|
||||
)
|
||||
|
||||
type ClientCertificateAuthConfig struct {
|
||||
ClientCertificate string
|
||||
ClientCertificatePassword string
|
||||
ClientCertificatePath string
|
||||
}
|
||||
@@ -34,17 +38,29 @@ func (cred *clientCertAuth) Name() string {
|
||||
func (cred *clientCertAuth) Construct(ctx context.Context, config *Config) (azcore.TokenCredential, error) {
|
||||
client := httpclient.New(ctx)
|
||||
|
||||
privateKey, certificate, err := decodePFXCertificate(
|
||||
config.ClientCertificateAuthConfig.ClientCertificatePath,
|
||||
config.ClientCertificateAuthConfig.ClientCertificatePassword,
|
||||
clientCertificate, err := consolidateCertificate(config.ClientCertificate, config.ClientCertificatePath)
|
||||
if err != nil {
|
||||
// This should never happen; this is checked in the Validate function
|
||||
return nil, err
|
||||
}
|
||||
|
||||
privateKey, certificate, err := pkcs12.Decode(
|
||||
clientCertificate,
|
||||
config.ClientCertificatePassword,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientId, err := consolidateClientId(config)
|
||||
if err != nil {
|
||||
// This should never happen; this is checked in the Validate function
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return azidentity.NewClientCertificateCredential(
|
||||
config.StorageAddresses.TenantID,
|
||||
config.ClientSecretCredentialAuthConfig.ClientID,
|
||||
config.TenantID,
|
||||
clientId,
|
||||
[]*x509.Certificate{certificate},
|
||||
privateKey,
|
||||
&azidentity.ClientCertificateCredentialOptions{
|
||||
@@ -55,36 +71,39 @@ func (cred *clientCertAuth) Construct(ctx context.Context, config *Config) (azco
|
||||
|
||||
func (cred *clientCertAuth) Validate(_ context.Context, config *Config) tfdiags.Diagnostics {
|
||||
var diags tfdiags.Diagnostics
|
||||
if config.StorageAddresses.TenantID == "" {
|
||||
if config.TenantID == "" {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Client Certificate Auth: missing Tenant ID",
|
||||
"Tenant ID is required",
|
||||
"Invalid Azure Client Certificate Auth",
|
||||
"Tenant ID is missing.",
|
||||
))
|
||||
}
|
||||
if config.ClientSecretCredentialAuthConfig.ClientID == "" {
|
||||
|
||||
_, err := consolidateClientId(config)
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Client Certificate Auth: missing Client ID",
|
||||
"Client ID is required",
|
||||
"Invalid Azure Client Certificate Auth",
|
||||
fmt.Sprintf("The Client ID is misconfigured: %s.", tfdiags.FormatError(err)),
|
||||
))
|
||||
}
|
||||
if config.ClientCertificateAuthConfig.ClientCertificatePath == "" {
|
||||
clientCertificate, err := consolidateCertificate(config.ClientCertificate, config.ClientCertificatePath)
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Client Certificate Auth: missing certificate path",
|
||||
"The path to the client certificate is required",
|
||||
"Invalid Azure Client Certificate Auth",
|
||||
fmt.Sprintf("The Client Certificate is misconfigured: %s.", tfdiags.FormatError(err)),
|
||||
))
|
||||
} else {
|
||||
_, _, err := decodePFXCertificate(
|
||||
config.ClientCertificateAuthConfig.ClientCertificatePath,
|
||||
config.ClientCertificateAuthConfig.ClientCertificatePassword,
|
||||
_, _, err := pkcs12.Decode(
|
||||
clientCertificate,
|
||||
config.ClientCertificatePassword,
|
||||
)
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Client Certificate Auth: certificate credential error",
|
||||
fmt.Sprintf("The following error was encountered processing the certificate credentials: %s", err.Error()),
|
||||
"Invalid Azure Client Certificate Auth",
|
||||
fmt.Sprintf("The Client Certificate is invalid: %s.", tfdiags.FormatError(err)),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -95,12 +114,42 @@ func (cred *clientCertAuth) AugmentConfig(_ context.Context, config *Config) err
|
||||
return checkNamesForAccessKeyCredentials(config.StorageAddresses)
|
||||
}
|
||||
|
||||
func decodePFXCertificate(pfxFileName string, password string) (privateKey interface{}, certificate *x509.Certificate, err error) {
|
||||
// read file contents, decode cert
|
||||
contents, err := os.ReadFile(pfxFileName)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("problem reading file at %s: %w", pfxFileName, err)
|
||||
return
|
||||
func consolidateCertificate(base64EncodedCertificate, certificateFilename string) ([]byte, error) {
|
||||
var certBytes []byte
|
||||
var fileBytes []byte
|
||||
|
||||
if len(base64EncodedCertificate) > 0 {
|
||||
var err error
|
||||
certBytes, err = base64.StdEncoding.DecodeString(base64EncodedCertificate)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding client certificate: %w", err)
|
||||
}
|
||||
}
|
||||
return pkcs12.Decode(contents, password)
|
||||
if len(certificateFilename) > 0 {
|
||||
var err error
|
||||
fileBytes, err = os.ReadFile(certificateFilename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading client certificate file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
hasCert := len(certBytes) > 0
|
||||
hasFile := len(fileBytes) > 0
|
||||
|
||||
if !hasCert && !hasFile {
|
||||
return nil, errors.New("missing certificate, client certificate is required")
|
||||
}
|
||||
|
||||
if !hasCert {
|
||||
return fileBytes, nil
|
||||
}
|
||||
|
||||
if !hasFile {
|
||||
return certBytes, nil
|
||||
}
|
||||
|
||||
if !bytes.Equal(certBytes, fileBytes) {
|
||||
return nil, errors.New("client certificate provided directly and through file do not match; either make them the same value or only provide one")
|
||||
}
|
||||
return fileBytes, nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
@@ -15,8 +16,10 @@ import (
|
||||
)
|
||||
|
||||
type ClientSecretCredentialAuthConfig struct {
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
ClientID string
|
||||
ClientIDFilePath string
|
||||
ClientSecret string
|
||||
ClientSecretFilePath string
|
||||
}
|
||||
|
||||
type clientSecretCredentialAuth struct{}
|
||||
@@ -29,11 +32,21 @@ func (cred *clientSecretCredentialAuth) Name() string {
|
||||
|
||||
func (cred *clientSecretCredentialAuth) Construct(ctx context.Context, config *Config) (azcore.TokenCredential, error) {
|
||||
client := httpclient.New(ctx)
|
||||
clientId, err := consolidateClientId(config)
|
||||
if err != nil {
|
||||
// This should never happen; this is checked in the Validate function
|
||||
return nil, err
|
||||
}
|
||||
clientSecret, err := consolidateClientSecret(config)
|
||||
if err != nil {
|
||||
// This should never happen; this is checked in the Validate function
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return azidentity.NewClientSecretCredential(
|
||||
config.StorageAddresses.TenantID,
|
||||
config.ClientID,
|
||||
config.ClientSecret,
|
||||
clientId,
|
||||
clientSecret,
|
||||
&azidentity.ClientSecretCredentialOptions{
|
||||
ClientOptions: clientOptions(client, config.CloudConfig),
|
||||
},
|
||||
@@ -45,22 +58,24 @@ func (cred *clientSecretCredentialAuth) Validate(_ context.Context, config *Conf
|
||||
if config.StorageAddresses.TenantID == "" {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Client Secret Auth: missing Tenant ID",
|
||||
"Tenant ID is required",
|
||||
"Invalid Azure Client Secret Auth",
|
||||
"Tenant ID is missing.",
|
||||
))
|
||||
}
|
||||
if config.ClientID == "" {
|
||||
_, err := consolidateClientId(config)
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Client Secret Auth: missing Client ID",
|
||||
"Client ID is required",
|
||||
"Invalid Azure Client Secret Auth",
|
||||
fmt.Sprintf("The Client ID is misconfigured: %s.", tfdiags.FormatError(err)),
|
||||
))
|
||||
}
|
||||
if config.ClientSecret == "" {
|
||||
_, err = consolidateClientSecret(config)
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Client Secret Auth: missing Client Secret",
|
||||
"Client Secret is required",
|
||||
"Invalid Azure Client Secret Auth",
|
||||
fmt.Sprintf("The Client Secret is misconfigured: %s.", tfdiags.FormatError(err)),
|
||||
))
|
||||
}
|
||||
return diags
|
||||
@@ -69,3 +84,11 @@ func (cred *clientSecretCredentialAuth) Validate(_ context.Context, config *Conf
|
||||
func (cred *clientSecretCredentialAuth) AugmentConfig(_ context.Context, config *Config) error {
|
||||
return checkNamesForAccessKeyCredentials(config.StorageAddresses)
|
||||
}
|
||||
|
||||
func consolidateClientId(config *Config) (string, error) {
|
||||
return consolidateFileAndValue(config.ClientID, config.ClientIDFilePath, "client ID", false)
|
||||
}
|
||||
|
||||
func consolidateClientSecret(config *Config) (string, error) {
|
||||
return consolidateFileAndValue(config.ClientSecret, config.ClientSecretFilePath, "client secret", false)
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func (cred *managedIdentityAuth) Validate(_ context.Context, config *Config) tfd
|
||||
if !config.MSIAuthConfig.UseMsi {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure Managed Service Identity Auth: use_msi set to false",
|
||||
"Invalid Azure Managed Service Identity Auth",
|
||||
"The Managed Service Identity (MSI) needs to have \"use_msi\" (or ARM_USE_MSI) set to true in order to be used.",
|
||||
))
|
||||
}
|
||||
|
||||
@@ -8,11 +8,9 @@ package auth
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
@@ -38,22 +36,27 @@ func (cred *oidcAuth) Name() string {
|
||||
|
||||
func (cred *oidcAuth) Construct(ctx context.Context, config *Config) (azcore.TokenCredential, error) {
|
||||
client := httpclient.New(ctx)
|
||||
|
||||
clientId, err := consolidateClientId(config)
|
||||
if err != nil {
|
||||
// This should never happen; this is checked in the Validate function
|
||||
return nil, err
|
||||
}
|
||||
var token string
|
||||
var err error
|
||||
if config.OIDCToken == "" && config.OIDCTokenFilePath == "" {
|
||||
token, err = getTokenFromRemote(client, config.OIDCAuthConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
token, err = consolidateToken(config.OIDCAuthConfig)
|
||||
token, err = consolidateToken(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return azidentity.NewClientAssertionCredential(
|
||||
config.TenantID,
|
||||
config.ClientID,
|
||||
clientId,
|
||||
func(_ context.Context) (string, error) {
|
||||
return token, nil
|
||||
},
|
||||
@@ -103,22 +106,8 @@ func getTokenFromRemote(client *http.Client, config OIDCAuthConfig) (string, err
|
||||
return token.Value, nil
|
||||
}
|
||||
|
||||
func consolidateToken(config OIDCAuthConfig) (string, error) {
|
||||
token := config.OIDCToken
|
||||
if config.OIDCTokenFilePath == "" {
|
||||
return token, nil
|
||||
}
|
||||
|
||||
b, err := os.ReadFile(config.OIDCTokenFilePath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading token file: %w", err)
|
||||
}
|
||||
|
||||
fileToken := string(b)
|
||||
if token != "" && token != fileToken {
|
||||
return "", errors.New("token provided directly and through file do not match; either make them the same value or only provide one")
|
||||
}
|
||||
return fileToken, nil
|
||||
func consolidateToken(config *Config) (string, error) {
|
||||
return consolidateFileAndValue(config.OIDCToken, config.OIDCTokenFilePath, "token", true)
|
||||
}
|
||||
|
||||
func (cred *oidcAuth) Validate(ctx context.Context, config *Config) tfdiags.Diagnostics {
|
||||
@@ -126,23 +115,24 @@ func (cred *oidcAuth) Validate(ctx context.Context, config *Config) tfdiags.Diag
|
||||
if !config.UseOIDC {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure OpenID Connect Auth: use_oidc set to false",
|
||||
"use_oidc or the environment variable ARM_USE_OIDC must be set to true",
|
||||
"Invalid Azure OpenID Connect Auth",
|
||||
"OpenID Connect Auth is disabled when use_oidc or the environment variable ARM_USE_OIDC are unset or set explicitly to false.",
|
||||
))
|
||||
return diags
|
||||
}
|
||||
if config.TenantID == "" {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure OpenID Connect Auth: missing Tenant ID",
|
||||
"Tenant ID is required",
|
||||
"Invalid Azure OpenID Connect Auth",
|
||||
"Tenant ID is missing.",
|
||||
))
|
||||
}
|
||||
if config.ClientID == "" {
|
||||
_, err := consolidateClientId(config)
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure OpenID Connect Auth: missing Client ID",
|
||||
"Client ID is required",
|
||||
"Invalid Azure OpenID Connect Auth",
|
||||
fmt.Sprintf("The Client ID is misconfigured: %s.", tfdiags.FormatError(err)),
|
||||
))
|
||||
}
|
||||
directTokenUnset := config.OIDCToken == "" && config.OIDCTokenFilePath == ""
|
||||
@@ -150,8 +140,8 @@ func (cred *oidcAuth) Validate(ctx context.Context, config *Config) tfdiags.Diag
|
||||
if directTokenUnset && indirectTokenUnset {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure OpenID Connect Auth: missing access token",
|
||||
"An access token must be provided, either directly with a variable or through a file, or indirectly through a request URL and request token (as in GitHub Actions)",
|
||||
"Invalid Azure OpenID Connect Auth",
|
||||
"An access token must be provided, either directly with a variable or through a file, or indirectly through a request URL and request token (as in GitHub Actions).",
|
||||
))
|
||||
}
|
||||
if directTokenUnset {
|
||||
@@ -160,17 +150,17 @@ func (cred *oidcAuth) Validate(ctx context.Context, config *Config) tfdiags.Diag
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure OpenID Connect Auth: error fetching token",
|
||||
fmt.Sprintf("The following error was encountered while fetching the token: %s", err.Error()),
|
||||
"Invalid Azure OpenID Connect Auth",
|
||||
fmt.Sprintf("Tried to test fetching the token, but received this error instead: %s.", tfdiags.FormatError(err)),
|
||||
))
|
||||
}
|
||||
}
|
||||
// This will work, even if both token and file path are empty
|
||||
if _, err := consolidateToken(config.OIDCAuthConfig); err != nil {
|
||||
if _, err := consolidateToken(config); err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Azure OpenID Connect Auth: error in token configuration",
|
||||
fmt.Sprintf("The following error was encountered: %s", err.Error()),
|
||||
"Invalid Azure OpenID Connect Auth",
|
||||
fmt.Sprintf("The token is misconfigured: %s", err.Error()),
|
||||
))
|
||||
}
|
||||
return diags
|
||||
|
||||
49
internal/backend/remote-state/azure/auth/utility.go
Normal file
49
internal/backend/remote-state/azure/auth/utility.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (c) The OpenTofu Authors
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
// Copyright (c) 2023 HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// consolidateFileAndValue takes the (potentially empty) values of a directly-set configuration string and
|
||||
// the string value of a plaintext file and picks the one that's nonempty. If both are set and nonempty,
|
||||
// it checks that they share an identical value and returns that value. If they're both empty, it returns
|
||||
// an error unless acceptEmpty is true.
|
||||
func consolidateFileAndValue(value, fileName, fieldName string, acceptEmpty bool) (string, error) {
|
||||
var fileValue string
|
||||
if fileName != "" {
|
||||
b, err := os.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading %s file: %w", fieldName, err)
|
||||
}
|
||||
fileValue = string(b)
|
||||
}
|
||||
|
||||
hasValue := value != ""
|
||||
hasFile := fileValue != ""
|
||||
|
||||
if !hasValue && !hasFile {
|
||||
if acceptEmpty {
|
||||
return "", nil
|
||||
}
|
||||
return "", fmt.Errorf("missing %s, a %s is required", fieldName, fieldName)
|
||||
}
|
||||
|
||||
if !hasValue {
|
||||
return fileValue, nil
|
||||
}
|
||||
|
||||
if !hasFile {
|
||||
return value, nil
|
||||
}
|
||||
|
||||
if value != fileValue {
|
||||
return "", fmt.Errorf("%s provided directly and through file do not match; either make them the same value or only provide one", fieldName)
|
||||
}
|
||||
return fileValue, nil
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
// Copyright (c) The OpenTofu Authors
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
// Copyright (c) 2023 HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/opentofu/opentofu/internal/httpclient"
|
||||
"github.com/opentofu/opentofu/internal/tfdiags"
|
||||
)
|
||||
|
||||
type WorkloadIdentityAuthConfig struct {
|
||||
UseAKSWorkloadIdentity bool
|
||||
}
|
||||
|
||||
type workloadIdentityAuth struct{}
|
||||
|
||||
var _ AuthMethod = &workloadIdentityAuth{}
|
||||
|
||||
func (cred *workloadIdentityAuth) Name() string {
|
||||
return "AKS Workload Identity Auth"
|
||||
}
|
||||
|
||||
func (cred *workloadIdentityAuth) Construct(ctx context.Context, config *Config) (azcore.TokenCredential, error) {
|
||||
client := httpclient.New(ctx)
|
||||
return azidentity.NewWorkloadIdentityCredential(
|
||||
&azidentity.WorkloadIdentityCredentialOptions{
|
||||
ClientOptions: clientOptions(client, config.CloudConfig),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (cred *workloadIdentityAuth) Validate(_ context.Context, config *Config) tfdiags.Diagnostics {
|
||||
var diags tfdiags.Diagnostics
|
||||
if !config.UseAKSWorkloadIdentity {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Invalid AKS Workload Identity Auth",
|
||||
"The AKS Workload Identity Auth needs to have \"use_aks_workload_identity\" (or ARM_USE_AKS_WORKLOAD_IDENTITY) set to true in order to be used.",
|
||||
))
|
||||
}
|
||||
return diags
|
||||
}
|
||||
|
||||
func (cred *workloadIdentityAuth) AugmentConfig(_ context.Context, config *Config) error {
|
||||
return checkNamesForAccessKeyCredentials(config.StorageAddresses)
|
||||
}
|
||||
@@ -94,6 +94,13 @@ func New(enc encryption.StateEncryption) backend.Backend {
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
|
||||
},
|
||||
|
||||
"client_id_file_path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The path to a file containing the Client ID.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID_FILE_PATH", ""),
|
||||
},
|
||||
|
||||
"endpoint": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
@@ -130,16 +137,25 @@ func New(enc encryption.StateEncryption) backend.Backend {
|
||||
},
|
||||
|
||||
// Service Principal (Client Certificate) specific
|
||||
|
||||
"client_certificate": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "A Base64-encoded PKCS#12 (PFX, not PEM) certificate used as the Client Certificate when authenticating as a Service Principal. The file must encode both the public certificate and its private key.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE", ""),
|
||||
},
|
||||
|
||||
"client_certificate_password": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The password associated with the Client Certificate specified in `client_certificate_path`",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PASSWORD", ""),
|
||||
},
|
||||
|
||||
"client_certificate_path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The path to the PFX file used as the Client Certificate when authenticating as a Service Principal",
|
||||
Description: "The path to the PKCS#12 PFX file used as the Client Certificate when authenticating as a Service Principal. The file must encode both the public certificate and its private key.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PATH", ""),
|
||||
},
|
||||
|
||||
@@ -151,6 +167,13 @@ func New(enc encryption.StateEncryption) backend.Backend {
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
|
||||
},
|
||||
|
||||
"client_secret_file_path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The path to a file containing the Client Secret.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET_FILE_PATH", ""),
|
||||
},
|
||||
|
||||
// Managed Service Identity specific
|
||||
"use_msi": {
|
||||
Type: schema.TypeBool,
|
||||
@@ -198,6 +221,13 @@ func New(enc encryption.StateEncryption) backend.Backend {
|
||||
Description: "The bearer token to use for the request to the OIDC providers `oidc_request_url` URL to fetch an ID token. Needs to be used in conjunction with `oidc_request_url`. This is meant to be used for Github Actions.",
|
||||
},
|
||||
|
||||
"use_aks_workload_identity": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_AKS_WORKLOAD_IDENTITY", false),
|
||||
Description: "Set to true to if you want to use Azure's AKS Workload Identity to authenticate to Azure. Defaults to false.",
|
||||
},
|
||||
|
||||
// Feature Flags
|
||||
"use_azuread_auth": {
|
||||
Type: schema.TypeBool,
|
||||
@@ -269,10 +299,13 @@ func (b *Backend) configure(ctx context.Context) error {
|
||||
CLIAuthEnabled: data.Get("use_cli").(bool),
|
||||
},
|
||||
ClientSecretCredentialAuthConfig: auth.ClientSecretCredentialAuthConfig{
|
||||
ClientID: data.Get("client_id").(string),
|
||||
ClientSecret: data.Get("client_secret").(string),
|
||||
ClientID: data.Get("client_id").(string),
|
||||
ClientIDFilePath: data.Get("client_id_file_path").(string),
|
||||
ClientSecret: data.Get("client_secret").(string),
|
||||
ClientSecretFilePath: data.Get("client_secret_file_path").(string),
|
||||
},
|
||||
ClientCertificateAuthConfig: auth.ClientCertificateAuthConfig{
|
||||
ClientCertificate: data.Get("client_certificate").(string),
|
||||
ClientCertificatePassword: data.Get("client_certificate_password").(string),
|
||||
ClientCertificatePath: data.Get("client_certificate_path").(string),
|
||||
},
|
||||
@@ -295,6 +328,9 @@ func (b *Backend) configure(ctx context.Context) error {
|
||||
SubscriptionID: data.Get("subscription_id").(string),
|
||||
TenantID: data.Get("tenant_id").(string),
|
||||
},
|
||||
WorkloadIdentityAuthConfig: auth.WorkloadIdentityAuthConfig{
|
||||
UseAKSWorkloadIdentity: data.Get("use_aks_workload_identity").(bool),
|
||||
},
|
||||
}
|
||||
|
||||
// MUST check storage account name and container name before trying to create a client.
|
||||
|
||||
@@ -12,12 +12,17 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||
"github.com/opentofu/opentofu/internal/backend"
|
||||
"github.com/opentofu/opentofu/internal/backend/remote-state/azure/auth"
|
||||
"github.com/opentofu/opentofu/internal/encryption"
|
||||
"github.com/opentofu/opentofu/internal/httpclient"
|
||||
"github.com/opentofu/opentofu/internal/legacy/helper/acctest"
|
||||
)
|
||||
|
||||
@@ -440,5 +445,65 @@ func TestAccBackendManagedServiceIdentity(t *testing.T) {
|
||||
backend.TestBackendStates(t, b)
|
||||
|
||||
// Manually delete all blobs in the container
|
||||
deleteBlobsInMSI(t, storageAccountName, resourceGroupName, containerName)
|
||||
client := httpclient.New(t.Context())
|
||||
|
||||
authCred, err := azidentity.NewManagedIdentityCredential(
|
||||
&azidentity.ManagedIdentityCredentialOptions{ClientOptions: azcore.ClientOptions{
|
||||
Telemetry: policy.TelemetryOptions{
|
||||
Disabled: true,
|
||||
},
|
||||
Transport: client,
|
||||
Cloud: cloud.AzurePublic,
|
||||
}},
|
||||
)
|
||||
if err != nil {
|
||||
t.Logf("Skipping deleting blobs in container %s due to error obtaining credentials: %v", containerName, err)
|
||||
return
|
||||
}
|
||||
|
||||
deleteBlobsManually(t, authCred, storageAccountName, resourceGroupName, containerName)
|
||||
}
|
||||
|
||||
// TestAccBackendAKSWorkloadIdentity tests if the backend functions when using workload identity, on Azure AKS (Kubernetes).
|
||||
// Note: this test does NOT create its own resource group, storage account, or storage container. You must set up that infrastructure
|
||||
// manually, as well as the kubernetes cluster, workload identity, and managed identity which this test depends upon.
|
||||
func TestAccBackendAKSWorkloadIdentity(t *testing.T) {
|
||||
testAccAzureBackend(t)
|
||||
|
||||
storageAccountName := os.Getenv("TF_AZURE_TEST_STORAGE_ACCOUNT_NAME")
|
||||
resourceGroupName := os.Getenv("TF_AZURE_TEST_RESOURCE_GROUP_NAME")
|
||||
containerName := os.Getenv("TF_AZURE_TEST_CONTAINER_NAME")
|
||||
|
||||
if storageAccountName == "" || resourceGroupName == "" || containerName == "" {
|
||||
t.Skip("For MSI tests, all infrastructure must be set up ahead of time and passed through environment variables.")
|
||||
}
|
||||
|
||||
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{
|
||||
"storage_account_name": storageAccountName,
|
||||
"container_name": containerName,
|
||||
"key": "testState",
|
||||
"resource_group_name": resourceGroupName,
|
||||
"use_aks_workload_identity": true,
|
||||
"use_cli": false,
|
||||
})).(*Backend)
|
||||
|
||||
backend.TestBackendStates(t, b)
|
||||
client := httpclient.New(t.Context())
|
||||
|
||||
authCred, err := azidentity.NewWorkloadIdentityCredential(
|
||||
&azidentity.WorkloadIdentityCredentialOptions{ClientOptions: azcore.ClientOptions{
|
||||
Telemetry: policy.TelemetryOptions{
|
||||
Disabled: true,
|
||||
},
|
||||
Transport: client,
|
||||
Cloud: cloud.AzurePublic,
|
||||
}},
|
||||
)
|
||||
if err != nil {
|
||||
t.Logf("Skipping deleting blobs in container %s due to error obtaining credentials: %v", containerName, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Manually delete all blobs in the container
|
||||
deleteBlobsManually(t, authCred, storageAccountName, resourceGroupName, containerName)
|
||||
}
|
||||
|
||||
@@ -15,9 +15,7 @@ import (
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||
@@ -157,22 +155,7 @@ func getSASToken(sharedKey *sas.SharedKeyCredential) (string, error) {
|
||||
return qps.Encode(), nil
|
||||
}
|
||||
|
||||
func deleteBlobsInMSI(t *testing.T, storageAccountName, resourceGroupName, containerName string) {
|
||||
client := httpclient.New(t.Context())
|
||||
|
||||
authCred, err := azidentity.NewManagedIdentityCredential(
|
||||
&azidentity.ManagedIdentityCredentialOptions{ClientOptions: azcore.ClientOptions{
|
||||
Telemetry: policy.TelemetryOptions{
|
||||
Disabled: true,
|
||||
},
|
||||
Transport: client,
|
||||
Cloud: cloud.AzurePublic,
|
||||
}},
|
||||
)
|
||||
if err != nil {
|
||||
t.Logf("Skipping deleting blobs in container %s due to error obtaining credentials: %v", containerName, err)
|
||||
return
|
||||
}
|
||||
func deleteBlobsManually(t *testing.T, authCred azcore.TokenCredential, storageAccountName, resourceGroupName, containerName string) {
|
||||
names := auth.StorageAddresses{
|
||||
CloudConfig: cloud.AzurePublic,
|
||||
ResourceGroup: resourceGroupName,
|
||||
@@ -221,5 +204,6 @@ func emptyAuthConfig() *auth.Config {
|
||||
OIDCAuthConfig: auth.OIDCAuthConfig{},
|
||||
MSIAuthConfig: auth.MSIAuthConfig{},
|
||||
StorageAddresses: auth.StorageAddresses{},
|
||||
WorkloadIdentityAuthConfig: auth.WorkloadIdentityAuthConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
.terraform*
|
||||
terraform.tfstate*
|
||||
certs.pfx
|
||||
test.env
|
||||
|
||||
@@ -50,6 +50,54 @@ ssh_instructions = "ssh azureadmin@xxx.xxx.xxx.xxx"
|
||||
|
||||
In order to tear down msi infrastructure, while keeping the rest of the credentials and setup, simply run the `tofu apply` without the `use_msi=true` variable.
|
||||
|
||||
### AKS Workload Identity
|
||||
|
||||
By default, the Kubernetes cluster, identity, and associated authorizations required for AKS Workload Identity testing are not set up by this workspace. In order to set those up, you need some extra variables:
|
||||
|
||||
```bash
|
||||
$ tofu apply -show-sensitive -var 'use_aks_workload_identity=true' -var 'location=centralus'
|
||||
```
|
||||
|
||||
There are additional instructions listed under `aks_kubectl_instructions`. It should look something like this:
|
||||
|
||||
```bash
|
||||
az aks get-credentials --name "azClusterTestabcd" --resource-group "acctestRG-backend-xxxx-abcd"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
azure.workload.identity/client-id: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
|
||||
name: "workload-identity-abcd"
|
||||
namespace: "default"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: shell-demo
|
||||
namespace: default
|
||||
labels:
|
||||
azure.workload.identity/use: "true"
|
||||
spec:
|
||||
serviceAccountName: "workload-identity-abcd"
|
||||
volumes:
|
||||
- name: shared-data
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /usr/share/nginx/html
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
EOF
|
||||
```
|
||||
|
||||
Copying and pasting this into the shell will authenticate into the newly-created kubernetes cluster and create a service account and pod. This pod will be the workload the test will be running in.
|
||||
|
||||
In order to tear down Kubernetes cluster infrastructure, while keeping the rest of the credentials and setup, simply run the `tofu apply` without the `use_aks_workload_identity=true` variable.
|
||||
|
||||
### Cleanup
|
||||
|
||||
Simply run
|
||||
|
||||
98
internal/backend/remote-state/azure/meta-test/aks/main.tf
Normal file
98
internal/backend/remote-state/azure/meta-test/aks/main.tf
Normal file
@@ -0,0 +1,98 @@
|
||||
resource "time_static" "rg_timestamp" {}
|
||||
|
||||
resource "random_string" "resource_suffix" {
|
||||
length = 4
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
locals {
|
||||
storage_account_name = "acctestsa${random_string.resource_suffix.result}"
|
||||
resource_group_name = "acctestRG-backend-${time_static.rg_timestamp.unix}-${random_string.resource_suffix.result}"
|
||||
container_name = "acctestcont"
|
||||
vm_username = "azureadmin"
|
||||
cluster_name = "azClusterTest${random_string.resource_suffix.result}"
|
||||
dns_prefix = "cluster-${random_string.resource_suffix.result}"
|
||||
|
||||
k8s_sa_name = "workload-identity-${random_string.resource_suffix.result}"
|
||||
}
|
||||
|
||||
resource "azurerm_resource_group" "storage_test" {
|
||||
name = local.resource_group_name
|
||||
location = var.location
|
||||
}
|
||||
|
||||
resource "azurerm_user_assigned_identity" "example" {
|
||||
location = azurerm_resource_group.storage_test.location
|
||||
name = "open-tofu-test-identity"
|
||||
resource_group_name = azurerm_resource_group.storage_test.name
|
||||
}
|
||||
|
||||
resource "azurerm_storage_account" "test_account" {
|
||||
name = local.storage_account_name
|
||||
resource_group_name = azurerm_resource_group.storage_test.name
|
||||
location = azurerm_resource_group.storage_test.location
|
||||
account_tier = "Standard"
|
||||
account_replication_type = "LRS"
|
||||
}
|
||||
|
||||
resource "azurerm_storage_container" "test_container" {
|
||||
name = local.container_name
|
||||
storage_account_id = azurerm_storage_account.test_account.id
|
||||
container_access_type = "private"
|
||||
}
|
||||
|
||||
resource "azurerm_role_assignment" "example" {
|
||||
scope = azurerm_storage_account.test_account.id
|
||||
role_definition_name = "Storage Account Contributor"
|
||||
principal_id = azurerm_user_assigned_identity.example.principal_id
|
||||
}
|
||||
|
||||
resource "azurerm_role_assignment" "blob_contributor" {
|
||||
scope = azurerm_storage_container.test_container.id
|
||||
role_definition_name = "Storage Blob Data Contributor"
|
||||
principal_id = azurerm_user_assigned_identity.example.principal_id
|
||||
}
|
||||
|
||||
resource "azurerm_kubernetes_cluster" "main" {
|
||||
name = local.cluster_name
|
||||
resource_group_name = azurerm_resource_group.storage_test.name
|
||||
location = azurerm_resource_group.storage_test.location
|
||||
dns_prefix = local.dns_prefix
|
||||
|
||||
identity {
|
||||
type = "SystemAssigned"
|
||||
}
|
||||
|
||||
default_node_pool {
|
||||
name = "agentpool"
|
||||
vm_size = "Standard_D2_v2"
|
||||
node_count = 1
|
||||
upgrade_settings {
|
||||
max_surge = "10%"
|
||||
}
|
||||
}
|
||||
linux_profile {
|
||||
admin_username = local.vm_username
|
||||
|
||||
ssh_key {
|
||||
key_data = file(var.ssh_pub_key_path)
|
||||
}
|
||||
}
|
||||
network_profile {
|
||||
network_plugin = "kubenet"
|
||||
load_balancer_sku = "standard"
|
||||
}
|
||||
|
||||
oidc_issuer_enabled = true
|
||||
workload_identity_enabled = true
|
||||
}
|
||||
|
||||
resource "azurerm_federated_identity_credential" "ksa-wif" {
|
||||
name = "k8sworkloadid"
|
||||
resource_group_name = azurerm_resource_group.storage_test.name
|
||||
audience = ["api://AzureADTokenExchange"]
|
||||
issuer = azurerm_kubernetes_cluster.main.oidc_issuer_url
|
||||
parent_id = azurerm_user_assigned_identity.example.id
|
||||
subject = "system:serviceaccount:default:${local.k8s_sa_name}"
|
||||
}
|
||||
23
internal/backend/remote-state/azure/meta-test/aks/outputs.tf
Normal file
23
internal/backend/remote-state/azure/meta-test/aks/outputs.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
output "storage_account_name" {
|
||||
value = local.storage_account_name
|
||||
}
|
||||
|
||||
output "resource_group_name" {
|
||||
value = local.resource_group_name
|
||||
}
|
||||
|
||||
output "container_name" {
|
||||
value = local.container_name
|
||||
}
|
||||
|
||||
output "cluster_name" {
|
||||
value = local.cluster_name
|
||||
}
|
||||
|
||||
output "ksa_name" {
|
||||
value = local.k8s_sa_name
|
||||
}
|
||||
|
||||
output "az_client_id" {
|
||||
value = azurerm_user_assigned_identity.example.client_id
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
variable "location" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ssh_pub_key_path" {
|
||||
type = string
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = "4.35.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -87,6 +87,14 @@ module "msi" {
|
||||
ssh_pub_key_path = var.ssh_pub_key_path
|
||||
}
|
||||
|
||||
module "aks" {
|
||||
source = "./aks"
|
||||
count = var.use_aks_workload_identity ? 1 : 0
|
||||
|
||||
location = var.location
|
||||
ssh_pub_key_path = var.ssh_pub_key_path
|
||||
}
|
||||
|
||||
locals {
|
||||
msi_extra_env_vars = !var.use_msi ? "" : <<EOT
|
||||
export TF_AZURE_TEST_STORAGE_ACCOUNT_NAME=${module.msi[0].storage_account_name}
|
||||
|
||||
@@ -27,3 +27,51 @@ output "msi_env_vars" {
|
||||
EOT
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "aks_kubectl_instructions" {
|
||||
value = !var.use_aks_workload_identity ? "" : <<-EOT
|
||||
Run the following on your machine to finish kubernetes setup
|
||||
az aks get-credentials --name "${module.aks[0].cluster_name}" --resource-group "${module.aks[0].resource_group_name}"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
azure.workload.identity/client-id: "${module.aks[0].az_client_id}"
|
||||
name: "${module.aks[0].ksa_name}"
|
||||
namespace: "default"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: shell-demo
|
||||
namespace: default
|
||||
labels:
|
||||
azure.workload.identity/use: "true"
|
||||
spec:
|
||||
serviceAccountName: "${module.aks[0].ksa_name}"
|
||||
containers:
|
||||
- name: alpine
|
||||
image: alpine
|
||||
command: [ "/bin/sh", "-c", "--" ]
|
||||
args: [ "while true; do sleep 30; done;" ]
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
EOF
|
||||
EOT
|
||||
}
|
||||
|
||||
output "aks_env_vars" {
|
||||
value = !var.use_aks_workload_identity ? "Set use_aks_workload_identity=true to get environment variable set" : <<-EOT
|
||||
Please set the following environment variables in your pod:
|
||||
export TF_AZURE_TEST=1
|
||||
export TF_ACC=1
|
||||
export ARM_LOCATION=centralus
|
||||
export ARM_SUBSCRIPTION_ID='${data.azurerm_client_config.current.subscription_id}'
|
||||
export ARM_TENANT_ID='${data.azurerm_client_config.current.tenant_id}'
|
||||
export TF_AZURE_TEST_STORAGE_ACCOUNT_NAME=${module.aks[0].storage_account_name}
|
||||
export TF_AZURE_TEST_RESOURCE_GROUP_NAME=${module.aks[0].resource_group_name}
|
||||
export TF_AZURE_TEST_CONTAINER_NAME=${module.aks[0].container_name}
|
||||
EOT
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
@@ -4,6 +4,12 @@ variable "use_msi" {
|
||||
description = "Set this to generate the VM infrastructure and managed service identity authorizations required to run the MSI tests."
|
||||
}
|
||||
|
||||
variable "use_aks_workload_identity" {
|
||||
default = false
|
||||
type = bool
|
||||
description = "Set this to generate the kubernetes infrastructure and managed service identity authorizations required to run the AKS workload identity tests."
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
default = "centralus"
|
||||
type = string
|
||||
|
||||
@@ -206,6 +206,27 @@ terraform {
|
||||
|
||||
***
|
||||
|
||||
### AKS Workload Identity
|
||||
|
||||
This example authenticates using workload identity on Azure Kubernetes.
|
||||
|
||||
```hcl
|
||||
terraform {
|
||||
backend "azurerm" {
|
||||
resource_group_name = "tofu-rg"
|
||||
storage_account_name = "tofu123abc"
|
||||
container_name = "tofu-state"
|
||||
key = "prod.terraform.tfstate"
|
||||
|
||||
subscription_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
|
||||
tenant_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
|
||||
use_aks_workload_identity = true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## Data Source Configuration
|
||||
|
||||
Authentication for a data source works equivalently to the remote state authentication shown above, though with slightly different syntax. For example, this is how to obtain the remote state using CLI Authentication:
|
||||
@@ -282,10 +303,22 @@ When authenticating using the Managed Service Identity (MSI) - the following fie
|
||||
|
||||
***
|
||||
|
||||
When authenticating using the AKS Workload Identity - the following fields are also supported:
|
||||
|
||||
* `subscription_id` - (Optional) The Subscription ID in which the Storage Account exists. This can also be sourced from the `ARM_SUBSCRIPTION_ID` environment variable.
|
||||
|
||||
* `tenant_id` - (Optional) The Tenant ID in which the Subscription exists. This can also be sourced from the `ARM_TENANT_ID` environment variable.
|
||||
|
||||
* `use_aks_workload_identity` - (Optional) Set to true to if you want to use Azure's AKS Workload Identity to authenticate to Azure. This will only work if Workload Identity is set up on your Kubernetes workload. This can also be sourced from the `ARM_USE_AKS_WORKLOAD_IDENTITY` environment variable.
|
||||
|
||||
***
|
||||
|
||||
When authenticating using a Service Principal with OpenID Connect (OIDC) - the following fields are also supported:
|
||||
|
||||
* `client_id` - (Optional) The Client ID of the Service Principal. This can also be sourced from the `ARM_CLIENT_ID` environment variable.
|
||||
|
||||
* `client_id_file_path` - (Optional) The path to a file containing the Client ID of the Service Principal. This can also be sourced from the `ARM_CLIENT_ID_FILE_PATH` environment variable.
|
||||
|
||||
* `subscription_id` - (Optional) The Subscription ID in which the Storage Account exists. This can also be sourced from the `ARM_SUBSCRIPTION_ID` environment variable.
|
||||
|
||||
* `tenant_id` - (Optional) The Tenant ID in which the Subscription exists. This can also be sourced from the `ARM_TENANT_ID` environment variable.
|
||||
@@ -335,9 +368,13 @@ When authenticating using a Service Principal with a Client Certificate - the fo
|
||||
|
||||
* `client_id` - (Optional) The Client ID of the Service Principal. This can also be sourced from the `ARM_CLIENT_ID` environment variable.
|
||||
|
||||
* `client_certificate_password` - (Optional) The password associated with the Client Certificate specified in `client_certificate_path`. This can also be sourced from the `ARM_CLIENT_CERTIFICATE_PASSWORD` environment variable.
|
||||
* `client_id_file_path` - (Optional) The path to a file containing the Client ID of the Service Principal. This can also be sourced from the `ARM_CLIENT_ID_FILE_PATH` environment variable.
|
||||
|
||||
* `client_certificate_path` - (Optional) The path to the PFX file used as the Client Certificate when authenticating as a Service Principal. This can also be sourced from the `ARM_CLIENT_CERTIFICATE_PATH` environment variable.
|
||||
* `client_certificate` - (Optional) A Base64-encoded PKCS#12 (PFX, not PEM) certificate used as the Client Certificate when authenticating as a Service Principal. The provided data string must encode both the public certificate and its private key. This can also be sourced from the `ARM_CLIENT_CERTIFICATE` environment variable.
|
||||
|
||||
* `client_certificate_password` - (Optional) The password associated with the Client Certificate specified in `client_certificate` or `client_certificate_path`. This can also be sourced from the `ARM_CLIENT_CERTIFICATE_PASSWORD` environment variable.
|
||||
|
||||
* `client_certificate_path` - (Optional) The path to the PKCS#12 PFX file used as the Client Certificate when authenticating as a Service Principal. The file must encode both the public certificate and its private key. This can also be sourced from the `ARM_CLIENT_CERTIFICATE_PATH` environment variable.
|
||||
|
||||
* `subscription_id` - (Optional) The Subscription ID in which the Storage Account exists. This can also be sourced from the `ARM_SUBSCRIPTION_ID` environment variable.
|
||||
|
||||
@@ -350,8 +387,12 @@ When authenticating using a Service Principal with a Client Secret - the followi
|
||||
|
||||
* `client_id` - (Optional) The Client ID of the Service Principal. This can also be sourced from the `ARM_CLIENT_ID` environment variable.
|
||||
|
||||
* `client_id_file_path` - (Optional) The path to a file containing the Client ID of the Service Principal. This can also be sourced from the `ARM_CLIENT_ID_FILE_PATH` environment variable.
|
||||
|
||||
* `client_secret` - (Optional) The Client Secret of the Service Principal. This can also be sourced from the `ARM_CLIENT_SECRET` environment variable.
|
||||
|
||||
* `client_secret_file_path` - (Optional) The path to a file containing the Client Secret of the Service Principal. This can also be sourced from the `ARM_CLIENT_SECRET_FILE_PATH` environment variable.
|
||||
|
||||
* `subscription_id` - (Optional) The Subscription ID in which the Storage Account exists. This can also be sourced from the `ARM_SUBSCRIPTION_ID` environment variable.
|
||||
|
||||
* `tenant_id` - (Optional) The Tenant ID in which the Subscription exists. This can also be sourced from the `ARM_TENANT_ID` environment variable.
|
||||
|
||||
Reference in New Issue
Block a user