chore(internal/provider): refactor cached_image_resource ()

Addresses some non-blocking comments from :

- Extracts some of the functions in cached_image_resource.go to separate internal packages tfutil and imgutil.
- Some other functions are extracted to helpers.go.
- Extracts non-overridable flags to a package-level variable.
- Pre-allocates some slices where possible.
- Removes some unused code and renames some existing code for readability
This commit is contained in:
Cian Johnston 2024-09-04 16:29:25 +01:00 committed by GitHub
parent 23f2cf5f48
commit 482a446eb3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 472 additions and 420 deletions

103
internal/imgutil/imgutil.go Normal file
View file

@ -0,0 +1,103 @@
package imgutil
import (
"archive/tar"
"context"
"fmt"
"io"
"os"
"path/filepath"
"github.com/coder/envbuilder/constants"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/hashicorp/terraform-plugin-log/tflog"
)
// GetRemoteImage fetches the image manifest of the image.
func GetRemoteImage(imgRef string) (v1.Image, error) {
ref, err := name.ParseReference(imgRef)
if err != nil {
return nil, fmt.Errorf("parse reference: %w", err)
}
img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
if err != nil {
return nil, fmt.Errorf("check remote image: %w", err)
}
return img, nil
}
// ExtractEnvbuilderFromImage reads the image located at imgRef and extracts
// MagicBinaryLocation to destPath.
func ExtractEnvbuilderFromImage(ctx context.Context, imgRef, destPath string) error {
needle := filepath.Clean(constants.MagicBinaryLocation)[1:] // skip leading '/'
img, err := GetRemoteImage(imgRef)
if err != nil {
return fmt.Errorf("check remote image: %w", err)
}
layers, err := img.Layers()
if err != nil {
return fmt.Errorf("get image layers: %w", err)
}
// Check the layers in reverse order. The last layers are more likely to
// include the binary.
for i := len(layers) - 1; i >= 0; i-- {
ul, err := layers[i].Uncompressed()
if err != nil {
return fmt.Errorf("get uncompressed layer: %w", err)
}
tr := tar.NewReader(ul)
for {
th, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("read tar header: %w", err)
}
name := filepath.Clean(th.Name)
if th.Typeflag != tar.TypeReg {
tflog.Debug(ctx, "skip non-regular file", map[string]any{"name": name, "layer_idx": i + 1})
continue
}
if name != needle {
tflog.Debug(ctx, "skip file", map[string]any{"name": name, "layer_idx": i + 1})
continue
}
tflog.Debug(ctx, "found file", map[string]any{"name": name, "layer_idx": i + 1})
if err := os.MkdirAll(filepath.Dir(destPath), 0o755); err != nil {
return fmt.Errorf("create parent directories: %w", err)
}
destF, err := os.Create(destPath)
if err != nil {
return fmt.Errorf("create dest file for writing: %w", err)
}
defer destF.Close()
_, err = io.Copy(destF, tr)
if err != nil {
return fmt.Errorf("copy dest file from image: %w", err)
}
if err := destF.Close(); err != nil {
return fmt.Errorf("close dest file: %w", err)
}
if err := os.Chmod(destPath, 0o755); err != nil {
return fmt.Errorf("chmod file: %w", err)
}
return nil
}
}
return fmt.Errorf("extract envbuilder binary from image %q: %w", imgRef, os.ErrNotExist)
}

View file

@ -1,33 +1,24 @@
package provider
import (
"archive/tar"
"context"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
kconfig "github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/coder/envbuilder"
"github.com/coder/envbuilder/constants"
eblog "github.com/coder/envbuilder/log"
eboptions "github.com/coder/envbuilder/options"
"github.com/coder/serpent"
"github.com/coder/terraform-provider-envbuilder/internal/imgutil"
"github.com/coder/terraform-provider-envbuilder/internal/tfutil"
"github.com/go-git/go-billy/v5/osfs"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/uuid"
"github.com/spf13/pflag"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier"
@ -295,249 +286,13 @@ func (r *CachedImageResource) Configure(ctx context.Context, req resource.Config
r.client = client
}
func optionsFromDataModel(data CachedImageResourceModel) (eboptions.Options, diag.Diagnostics) {
var diags diag.Diagnostics
var opts eboptions.Options
// Required options. Cannot be overridden by extra_env.
opts.CacheRepo = data.CacheRepo.ValueString()
opts.GitURL = data.GitURL.ValueString()
// Other options can be overridden by extra_env, with a warning.
// Keep track of which options are overridden.
overrides := make(map[string]struct{})
if !data.BaseImageCacheDir.IsNull() {
overrides["ENVBUILDER_BASE_IMAGE_CACHE_DIR"] = struct{}{}
opts.BaseImageCacheDir = data.BaseImageCacheDir.ValueString()
}
if !data.BuildContextPath.IsNull() {
overrides["ENVBUILDER_BUILD_CONTEXT_PATH"] = struct{}{}
opts.BuildContextPath = data.BuildContextPath.ValueString()
}
if !data.CacheTTLDays.IsNull() {
overrides["ENVBUILDER_CACHE_TTL_DAYS"] = struct{}{}
opts.CacheTTLDays = data.CacheTTLDays.ValueInt64()
}
if !data.DevcontainerDir.IsNull() {
overrides["ENVBUILDER_DEVCONTAINER_DIR"] = struct{}{}
opts.DevcontainerDir = data.DevcontainerDir.ValueString()
}
if !data.DevcontainerJSONPath.IsNull() {
overrides["ENVBUILDER_DEVCONTAINER_JSON_PATH"] = struct{}{}
opts.DevcontainerJSONPath = data.DevcontainerJSONPath.ValueString()
}
if !data.DockerfilePath.IsNull() {
overrides["ENVBUILDER_DOCKERFILE_PATH"] = struct{}{}
opts.DockerfilePath = data.DockerfilePath.ValueString()
}
if !data.DockerConfigBase64.IsNull() {
overrides["ENVBUILDER_DOCKER_CONFIG_BASE64"] = struct{}{}
opts.DockerConfigBase64 = data.DockerConfigBase64.ValueString()
}
if !data.ExitOnBuildFailure.IsNull() {
overrides["ENVBUILDER_EXIT_ON_BUILD_FAILURE"] = struct{}{}
opts.ExitOnBuildFailure = data.ExitOnBuildFailure.ValueBool()
}
if !data.FallbackImage.IsNull() {
overrides["ENVBUILDER_FALLBACK_IMAGE"] = struct{}{}
opts.FallbackImage = data.FallbackImage.ValueString()
}
if !data.GitCloneDepth.IsNull() {
overrides["ENVBUILDER_GIT_CLONE_DEPTH"] = struct{}{}
opts.GitCloneDepth = data.GitCloneDepth.ValueInt64()
}
if !data.GitCloneSingleBranch.IsNull() {
overrides["ENVBUILDER_GIT_CLONE_SINGLE_BRANCH"] = struct{}{}
opts.GitCloneSingleBranch = data.GitCloneSingleBranch.ValueBool()
}
if !data.GitHTTPProxyURL.IsNull() {
overrides["ENVBUILDER_GIT_HTTP_PROXY_URL"] = struct{}{}
opts.GitHTTPProxyURL = data.GitHTTPProxyURL.ValueString()
}
if !data.GitSSHPrivateKeyPath.IsNull() {
overrides["ENVBUILDER_GIT_SSH_PRIVATE_KEY_PATH"] = struct{}{}
opts.GitSSHPrivateKeyPath = data.GitSSHPrivateKeyPath.ValueString()
}
if !data.GitUsername.IsNull() {
overrides["ENVBUILDER_GIT_USERNAME"] = struct{}{}
opts.GitUsername = data.GitUsername.ValueString()
}
if !data.GitPassword.IsNull() {
overrides["ENVBUILDER_GIT_PASSWORD"] = struct{}{}
opts.GitPassword = data.GitPassword.ValueString()
}
if !data.IgnorePaths.IsNull() {
overrides["ENVBUILDER_IGNORE_PATHS"] = struct{}{}
opts.IgnorePaths = tfListToStringSlice(data.IgnorePaths)
}
if !data.Insecure.IsNull() {
overrides["ENVBUILDER_INSECURE"] = struct{}{}
opts.Insecure = data.Insecure.ValueBool()
}
if data.RemoteRepoBuildMode.IsNull() {
opts.RemoteRepoBuildMode = true
} else {
overrides["ENVBUILDER_REMOTE_REPO_BUILD_MODE"] = struct{}{}
opts.RemoteRepoBuildMode = data.RemoteRepoBuildMode.ValueBool()
}
if !data.SSLCertBase64.IsNull() {
overrides["ENVBUILDER_SSL_CERT_BASE64"] = struct{}{}
opts.SSLCertBase64 = data.SSLCertBase64.ValueString()
}
if !data.Verbose.IsNull() {
overrides["ENVBUILDER_VERBOSE"] = struct{}{}
opts.Verbose = data.Verbose.ValueBool()
}
if !data.WorkspaceFolder.IsNull() {
overrides["ENVBUILDER_WORKSPACE_FOLDER"] = struct{}{}
opts.WorkspaceFolder = data.WorkspaceFolder.ValueString()
}
// convert extraEnv to a map for ease of use.
extraEnv := make(map[string]string)
for k, v := range data.ExtraEnv.Elements() {
extraEnv[k] = tfValueToString(v)
}
diags = append(diags, overrideOptionsFromExtraEnv(&opts, extraEnv, overrides)...)
return opts, diags
}
func overrideOptionsFromExtraEnv(opts *eboptions.Options, extraEnv map[string]string, overrides map[string]struct{}) diag.Diagnostics {
var diags diag.Diagnostics
// Make a map of the options for easy lookup.
optsMap := make(map[string]pflag.Value)
for _, opt := range opts.CLI() {
optsMap[opt.Env] = opt.Value
}
for key, val := range extraEnv {
switch key {
// These options may not be overridden.
case "ENVBUILDER_CACHE_REPO", "ENVBUILDER_GIT_URL":
diags.AddAttributeWarning(path.Root("extra_env"),
"Cannot override required environment variable",
fmt.Sprintf("The key %q in extra_env cannot be overridden.", key),
)
continue
default:
// Check if the option was set on the provider data model and generate a warning if so.
if _, overridden := overrides[key]; overridden {
diags.AddAttributeWarning(path.Root("extra_env"),
"Overriding provider environment variable",
fmt.Sprintf("The key %q in extra_env overrides an option set on the provider.", key),
)
}
// XXX: workaround for serpent behaviour where calling Set() on a
// string slice will append instead of replace: set to empty first.
if key == "ENVBUILDER_IGNORE_PATHS" {
_ = optsMap[key].Set("")
}
opt, found := optsMap[key]
if !found {
// ignore unknown keys
continue
}
if err := opt.Set(val); err != nil {
diags.AddAttributeError(path.Root("extra_env"),
"Invalid value for environment variable",
fmt.Sprintf("The key %q in extra_env has an invalid value: %s", key, err),
)
}
}
}
return diags
}
func computeEnvFromOptions(opts eboptions.Options, extraEnv map[string]string) map[string]string {
allEnvKeys := make(map[string]struct{})
for _, opt := range opts.CLI() {
if opt.Env == "" {
continue
}
allEnvKeys[opt.Env] = struct{}{}
}
// Only set the environment variables from opts that are not legacy options.
// Legacy options are those that are not prefixed with ENVBUILDER_.
// While we can detect when a legacy option is set, overriding it becomes
// problematic. Erring on the side of caution, we will not override legacy options.
isEnvbuilderOption := func(key string) bool {
switch key {
case "CODER_AGENT_URL", "CODER_AGENT_TOKEN", "CODER_AGENT_SUBSYSTEM":
return true // kinda
default:
return strings.HasPrefix(key, "ENVBUILDER_")
}
}
computed := make(map[string]string)
for _, opt := range opts.CLI() {
if opt.Env == "" {
continue
}
// TODO: remove this check once support for legacy options is removed.
if !isEnvbuilderOption(opt.Env) {
continue
}
var val string
if sa, ok := opt.Value.(*serpent.StringArray); ok {
val = strings.Join(sa.GetSlice(), ",")
} else {
val = opt.Value.String()
}
switch val {
case "", "false", "0":
// Skip zero values.
continue
}
computed[opt.Env] = val
}
// Merge in extraEnv, which may override values from opts.
// Skip any keys that are envbuilder options.
for key, val := range extraEnv {
if isEnvbuilderOption(key) {
continue
}
computed[key] = val
}
return computed
}
// setComputedEnv sets data.Env and data.EnvMap based on the values of the
// other fields in the model.
func (data *CachedImageResourceModel) setComputedEnv(ctx context.Context, env map[string]string) diag.Diagnostics {
var diag, ds diag.Diagnostics
data.EnvMap, ds = basetypes.NewMapValueFrom(ctx, types.StringType, env)
diag = append(diag, ds...)
data.Env, ds = basetypes.NewListValueFrom(ctx, types.StringType, sortedKeyValues(env))
data.Env, ds = basetypes.NewListValueFrom(ctx, types.StringType, tfutil.DockerEnv(env))
diag = append(diag, ds...)
return diag
}
@ -558,7 +313,7 @@ func (r *CachedImageResource) Read(ctx context.Context, req resource.ReadRequest
return
}
// Set the expected environment variables.
computedEnv := computeEnvFromOptions(opts, tfMapToStringMap(data.ExtraEnv))
computedEnv := computeEnvFromOptions(opts, tfutil.TFMapToStringMap(data.ExtraEnv))
resp.Diagnostics.Append(data.setComputedEnv(ctx, computedEnv)...)
// If the previous state is that Image == BuilderImage, then we previously did
@ -574,7 +329,7 @@ func (r *CachedImageResource) Read(ctx context.Context, req resource.ReadRequest
}
// Check the remote registry for the image we previously found.
img, err := getRemoteImage(data.Image.ValueString())
img, err := imgutil.GetRemoteImage(data.Image.ValueString())
if err != nil {
if !strings.Contains(err.Error(), "MANIFEST_UNKNOWN") {
// Explicitly not making this an error diag.
@ -629,7 +384,7 @@ func (r *CachedImageResource) Create(ctx context.Context, req resource.CreateReq
}
// Set the expected environment variables.
computedEnv := computeEnvFromOptions(opts, tfMapToStringMap(data.ExtraEnv))
computedEnv := computeEnvFromOptions(opts, tfutil.TFMapToStringMap(data.ExtraEnv))
resp.Diagnostics.Append(data.setComputedEnv(ctx, computedEnv)...)
cachedImg, err := runCacheProbe(ctx, data.BuilderImage.ValueString(), opts)
@ -716,7 +471,7 @@ func runCacheProbe(ctx context.Context, builderImage string, opts eboptions.Opti
// In order to correctly reproduce the final layer of the cached image, we
// need the envbuilder binary used to originally build the image!
envbuilderPath := filepath.Join(tmpDir, "envbuilder")
if err := extractEnvbuilderFromImage(ctx, builderImage, envbuilderPath); err != nil {
if err := imgutil.ExtractEnvbuilderFromImage(ctx, builderImage, envbuilderPath); err != nil {
tflog.Error(ctx, "failed to fetch envbuilder binary from builder image", map[string]any{"err": err})
return nil, fmt.Errorf("failed to fetch the envbuilder binary from the builder image: %s", err.Error())
}
@ -729,7 +484,7 @@ func runCacheProbe(ctx context.Context, builderImage string, opts eboptions.Opti
// We always want to get the cached image.
opts.GetCachedImage = true
// Log to the Terraform logger.
opts.Logger = tfLogFunc(ctx)
opts.Logger = tfutil.TFLogFunc(ctx)
// We don't require users to set a workspace folder, but maybe there's a
// reason someone may need to.
@ -766,165 +521,3 @@ func runCacheProbe(ctx context.Context, builderImage string, opts eboptions.Opti
return envbuilder.RunCacheProbe(ctx, opts)
}
// getRemoteImage fetches the image manifest of the image.
func getRemoteImage(imgRef string) (v1.Image, error) {
ref, err := name.ParseReference(imgRef)
if err != nil {
return nil, fmt.Errorf("parse reference: %w", err)
}
img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
if err != nil {
return nil, fmt.Errorf("check remote image: %w", err)
}
return img, nil
}
// extractEnvbuilderFromImage reads the image located at imgRef and extracts
// MagicBinaryLocation to destPath.
func extractEnvbuilderFromImage(ctx context.Context, imgRef, destPath string) error {
needle := filepath.Clean(constants.MagicBinaryLocation)[1:] // skip leading '/'
img, err := getRemoteImage(imgRef)
if err != nil {
return fmt.Errorf("check remote image: %w", err)
}
layers, err := img.Layers()
if err != nil {
return fmt.Errorf("get image layers: %w", err)
}
// Check the layers in reverse order. The last layers are more likely to
// include the binary.
for i := len(layers) - 1; i >= 0; i-- {
ul, err := layers[i].Uncompressed()
if err != nil {
return fmt.Errorf("get uncompressed layer: %w", err)
}
tr := tar.NewReader(ul)
for {
th, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("read tar header: %w", err)
}
name := filepath.Clean(th.Name)
if th.Typeflag != tar.TypeReg {
tflog.Debug(ctx, "skip non-regular file", map[string]any{"name": name, "layer_idx": i + 1})
continue
}
if name != needle {
tflog.Debug(ctx, "skip file", map[string]any{"name": name, "layer_idx": i + 1})
continue
}
tflog.Debug(ctx, "found file", map[string]any{"name": name, "layer_idx": i + 1})
if err := os.MkdirAll(filepath.Dir(destPath), 0o755); err != nil {
return fmt.Errorf("create parent directories: %w", err)
}
destF, err := os.Create(destPath)
if err != nil {
return fmt.Errorf("create dest file for writing: %w", err)
}
defer destF.Close()
_, err = io.Copy(destF, tr)
if err != nil {
return fmt.Errorf("copy dest file from image: %w", err)
}
if err := destF.Close(); err != nil {
return fmt.Errorf("close dest file: %w", err)
}
if err := os.Chmod(destPath, 0o755); err != nil {
return fmt.Errorf("chmod file: %w", err)
}
return nil
}
}
return fmt.Errorf("extract envbuilder binary from image %q: %w", imgRef, os.ErrNotExist)
}
// tfValueToString converts an attr.Value to its string representation
// based on its Terraform type. This is needed because the String()
// method on an attr.Value creates a 'human-readable' version of the type, which
// leads to quotes, escaped characters, and other assorted sadness.
func tfValueToString(val attr.Value) string {
if val.IsUnknown() || val.IsNull() {
return ""
}
if vs, ok := val.(interface{ ValueString() string }); ok {
return vs.ValueString()
}
if vb, ok := val.(interface{ ValueBool() bool }); ok {
return fmt.Sprintf("%t", vb.ValueBool())
}
if vi, ok := val.(interface{ ValueInt64() int64 }); ok {
return fmt.Sprintf("%d", vi.ValueInt64())
}
panic(fmt.Errorf("tfValueToString: value %T is not a supported type", val))
}
// tfListToStringSlice converts a types.List to a []string by calling
// tfValueToString on each element.
func tfListToStringSlice(l types.List) []string {
var ss []string
for _, el := range l.Elements() {
ss = append(ss, tfValueToString(el))
}
return ss
}
// tfMapToStringMap converts a types.Map to a map[string]string by calling
// tfValueToString on each element.
func tfMapToStringMap(m types.Map) map[string]string {
res := make(map[string]string)
for k, v := range m.Elements() {
res[k] = tfValueToString(v)
}
return res
}
// tfLogFunc is an adapter to envbuilder/log.Func.
func tfLogFunc(ctx context.Context) eblog.Func {
return func(level eblog.Level, format string, args ...any) {
var logFn func(context.Context, string, ...map[string]interface{})
switch level {
case eblog.LevelTrace:
logFn = tflog.Trace
case eblog.LevelDebug:
logFn = tflog.Debug
case eblog.LevelWarn:
logFn = tflog.Warn
case eblog.LevelError:
logFn = tflog.Error
default:
logFn = tflog.Info
}
logFn(ctx, fmt.Sprintf(format, args...))
}
}
// sortedKeyValues returns the keys and values of the map in the form "key=value"
// sorted by key in lexicographical order.
func sortedKeyValues(m map[string]string) []string {
pairs := make([]string, 0, len(m))
var sb strings.Builder
for k := range m {
_, _ = sb.WriteString(k)
_, _ = sb.WriteRune('=')
_, _ = sb.WriteString(m[k])
pairs = append(pairs, sb.String())
sb.Reset()
}
sort.Strings(pairs)
return pairs
}

View file

@ -34,13 +34,17 @@ func TestAccCachedImageResource(t *testing.T) {
".devcontainer/devcontainer.json": `{"image": "localhost:5000/test-ubuntu:latest"}`,
},
extraEnv: map[string]string{
"FOO": testEnvValue,
"CODER_AGENT_TOKEN": "some-token",
"CODER_AGENT_URL": "https://coder.example.com",
"ENVBUILDER_GIT_URL": "https://not.the.real.git/url",
"ENVBUILDER_CACHE_REPO": "not-the-real-cache-repo",
"FOO": testEnvValue,
},
assertEnv: func(t *testing.T, deps testDependencies) resource.TestCheckFunc {
return resource.ComposeAggregateTestCheckFunc(
assertEnv(t,
"CODER_AGENT_TOKEN", "some-token",
"CODER_AGENT_URL", "https://coder.example.com",
"ENVBUILDER_CACHE_REPO", deps.CacheRepo,
"ENVBUILDER_GIT_SSH_PRIVATE_KEY_PATH", deps.Repo.Key,
"ENVBUILDER_GIT_URL", deps.Repo.URL,
@ -62,6 +66,8 @@ func TestAccCachedImageResource(t *testing.T) {
RUN date > /date.txt`,
},
extraEnv: map[string]string{
"CODER_AGENT_TOKEN": "some-token",
"CODER_AGENT_URL": "https://coder.example.com",
"FOO": testEnvValue,
"ENVBUILDER_GIT_URL": "https://not.the.real.git/url",
"ENVBUILDER_CACHE_REPO": "not-the-real-cache-repo",
@ -69,6 +75,8 @@ RUN date > /date.txt`,
assertEnv: func(t *testing.T, deps testDependencies) resource.TestCheckFunc {
return resource.ComposeAggregateTestCheckFunc(
assertEnv(t,
"CODER_AGENT_TOKEN", "some-token",
"CODER_AGENT_URL", "https://coder.example.com",
"ENVBUILDER_CACHE_REPO", deps.CacheRepo,
"ENVBUILDER_GIT_SSH_PRIVATE_KEY_PATH", deps.Repo.Key,
"ENVBUILDER_GIT_URL", deps.Repo.URL,
@ -89,6 +97,8 @@ RUN date > /date.txt`,
RUN date > /date.txt`,
},
extraEnv: map[string]string{
"CODER_AGENT_TOKEN": "some-token",
"CODER_AGENT_URL": "https://coder.example.com",
"FOO": testEnvValue,
"ENVBUILDER_GIT_URL": "https://not.the.real.git/url",
"ENVBUILDER_CACHE_REPO": "not-the-real-cache-repo",
@ -99,6 +109,8 @@ RUN date > /date.txt`,
assertEnv: func(t *testing.T, deps testDependencies) resource.TestCheckFunc {
return resource.ComposeAggregateTestCheckFunc(
assertEnv(t,
"CODER_AGENT_TOKEN", "some-token",
"CODER_AGENT_URL", "https://coder.example.com",
"ENVBUILDER_CACHE_REPO", deps.CacheRepo,
"ENVBUILDER_DEVCONTAINER_DIR", "path/to/.devcontainer",
"ENVBUILDER_DEVCONTAINER_JSON_PATH", "path/to/.devcontainer/devcontainer.json",

View file

@ -0,0 +1,255 @@
package provider
import (
"fmt"
"strings"
eboptions "github.com/coder/envbuilder/options"
"github.com/coder/serpent"
"github.com/coder/terraform-provider-envbuilder/internal/tfutil"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/spf13/pflag"
)
const (
envbuilderOptionPrefix = "ENVBUILDER_"
)
// nonOverrideOptions are options that cannot be overridden by extra_env.
var nonOverrideOptions = map[string]bool{
"ENVBUILDER_CACHE_REPO": true,
"ENVBUILDER_GIT_URL": true,
}
// optionsFromDataModel converts a CachedImageResourceModel into a corresponding set of
// Envbuilder options. It returns the options and any diagnostics encountered.
func optionsFromDataModel(data CachedImageResourceModel) (eboptions.Options, diag.Diagnostics) {
var diags diag.Diagnostics
var opts eboptions.Options
// Required options. Cannot be overridden by extra_env.
opts.CacheRepo = data.CacheRepo.ValueString()
opts.GitURL = data.GitURL.ValueString()
// Other options can be overridden by extra_env, with a warning.
// Keep track of which options are set from the data model so we
// can check if they are being overridden.
providerOpts := make(map[string]bool)
if !data.BaseImageCacheDir.IsNull() {
providerOpts["ENVBUILDER_BASE_IMAGE_CACHE_DIR"] = true
opts.BaseImageCacheDir = data.BaseImageCacheDir.ValueString()
}
if !data.BuildContextPath.IsNull() {
providerOpts["ENVBUILDER_BUILD_CONTEXT_PATH"] = true
opts.BuildContextPath = data.BuildContextPath.ValueString()
}
if !data.CacheTTLDays.IsNull() {
providerOpts["ENVBUILDER_CACHE_TTL_DAYS"] = true
opts.CacheTTLDays = data.CacheTTLDays.ValueInt64()
}
if !data.DevcontainerDir.IsNull() {
providerOpts["ENVBUILDER_DEVCONTAINER_DIR"] = true
opts.DevcontainerDir = data.DevcontainerDir.ValueString()
}
if !data.DevcontainerJSONPath.IsNull() {
providerOpts["ENVBUILDER_DEVCONTAINER_JSON_PATH"] = true
opts.DevcontainerJSONPath = data.DevcontainerJSONPath.ValueString()
}
if !data.DockerfilePath.IsNull() {
providerOpts["ENVBUILDER_DOCKERFILE_PATH"] = true
opts.DockerfilePath = data.DockerfilePath.ValueString()
}
if !data.DockerConfigBase64.IsNull() {
providerOpts["ENVBUILDER_DOCKER_CONFIG_BASE64"] = true
opts.DockerConfigBase64 = data.DockerConfigBase64.ValueString()
}
if !data.ExitOnBuildFailure.IsNull() {
providerOpts["ENVBUILDER_EXIT_ON_BUILD_FAILURE"] = true
opts.ExitOnBuildFailure = data.ExitOnBuildFailure.ValueBool()
}
if !data.FallbackImage.IsNull() {
providerOpts["ENVBUILDER_FALLBACK_IMAGE"] = true
opts.FallbackImage = data.FallbackImage.ValueString()
}
if !data.GitCloneDepth.IsNull() {
providerOpts["ENVBUILDER_GIT_CLONE_DEPTH"] = true
opts.GitCloneDepth = data.GitCloneDepth.ValueInt64()
}
if !data.GitCloneSingleBranch.IsNull() {
providerOpts["ENVBUILDER_GIT_CLONE_SINGLE_BRANCH"] = true
opts.GitCloneSingleBranch = data.GitCloneSingleBranch.ValueBool()
}
if !data.GitHTTPProxyURL.IsNull() {
providerOpts["ENVBUILDER_GIT_HTTP_PROXY_URL"] = true
opts.GitHTTPProxyURL = data.GitHTTPProxyURL.ValueString()
}
if !data.GitSSHPrivateKeyPath.IsNull() {
providerOpts["ENVBUILDER_GIT_SSH_PRIVATE_KEY_PATH"] = true
opts.GitSSHPrivateKeyPath = data.GitSSHPrivateKeyPath.ValueString()
}
if !data.GitUsername.IsNull() {
providerOpts["ENVBUILDER_GIT_USERNAME"] = true
opts.GitUsername = data.GitUsername.ValueString()
}
if !data.GitPassword.IsNull() {
providerOpts["ENVBUILDER_GIT_PASSWORD"] = true
opts.GitPassword = data.GitPassword.ValueString()
}
if !data.IgnorePaths.IsNull() {
providerOpts["ENVBUILDER_IGNORE_PATHS"] = true
opts.IgnorePaths = tfutil.TFListToStringSlice(data.IgnorePaths)
}
if !data.Insecure.IsNull() {
providerOpts["ENVBUILDER_INSECURE"] = true
opts.Insecure = data.Insecure.ValueBool()
}
if data.RemoteRepoBuildMode.IsNull() {
opts.RemoteRepoBuildMode = true
} else {
providerOpts["ENVBUILDER_REMOTE_REPO_BUILD_MODE"] = true
opts.RemoteRepoBuildMode = data.RemoteRepoBuildMode.ValueBool()
}
if !data.SSLCertBase64.IsNull() {
providerOpts["ENVBUILDER_SSL_CERT_BASE64"] = true
opts.SSLCertBase64 = data.SSLCertBase64.ValueString()
}
if !data.Verbose.IsNull() {
providerOpts["ENVBUILDER_VERBOSE"] = true
opts.Verbose = data.Verbose.ValueBool()
}
if !data.WorkspaceFolder.IsNull() {
providerOpts["ENVBUILDER_WORKSPACE_FOLDER"] = true
opts.WorkspaceFolder = data.WorkspaceFolder.ValueString()
}
// convert extraEnv to a map for ease of use.
extraEnv := make(map[string]string)
for k, v := range data.ExtraEnv.Elements() {
extraEnv[k] = tfutil.TFValueToString(v)
}
diags = append(diags, overrideOptionsFromExtraEnv(&opts, extraEnv, providerOpts)...)
return opts, diags
}
// overrideOptionsFromExtraEnv overrides the options in opts with values from extraEnv.
// It returns any diagnostics encountered.
// It will not override certain options, such as ENVBUILDER_CACHE_REPO and ENVBUILDER_GIT_URL.
func overrideOptionsFromExtraEnv(opts *eboptions.Options, extraEnv map[string]string, providerOpts map[string]bool) diag.Diagnostics {
var diags diag.Diagnostics
// Make a map of the options for easy lookup.
optsMap := make(map[string]pflag.Value)
for _, opt := range opts.CLI() {
optsMap[opt.Env] = opt.Value
}
for key, val := range extraEnv {
opt, found := optsMap[key]
if !found {
// ignore unknown keys
continue
}
if nonOverrideOptions[key] {
diags.AddAttributeWarning(path.Root("extra_env"),
"Cannot override required environment variable",
fmt.Sprintf("The key %q in extra_env cannot be overridden.", key),
)
continue
}
// Check if the option was set on the provider data model and generate a warning if so.
if providerOpts[key] {
diags.AddAttributeWarning(path.Root("extra_env"),
"Overriding provider environment variable",
fmt.Sprintf("The key %q in extra_env overrides an option set on the provider.", key),
)
}
// XXX: workaround for serpent behaviour where calling Set() on a
// string slice will append instead of replace: set to empty first.
if key == "ENVBUILDER_IGNORE_PATHS" {
_ = optsMap[key].Set("")
}
if err := opt.Set(val); err != nil {
diags.AddAttributeError(path.Root("extra_env"),
"Invalid value for environment variable",
fmt.Sprintf("The key %q in extra_env has an invalid value: %s", key, err),
)
}
}
return diags
}
// computeEnvFromOptions computes the environment variables to set based on the
// options in opts and the extra environment variables in extraEnv.
// It returns the computed environment variables as a map.
// It will not set certain options, such as ENVBUILDER_CACHE_REPO and ENVBUILDER_GIT_URL.
// It will also not handle legacy Envbuilder options (i.e. those not prefixed with ENVBUILDER_).
func computeEnvFromOptions(opts eboptions.Options, extraEnv map[string]string) map[string]string {
for _, opt := range opts.CLI() {
if opt.Env == "" {
continue
}
}
computed := make(map[string]string)
for _, opt := range opts.CLI() {
if opt.Env == "" {
continue
}
// TODO: remove this check once support for legacy options is removed.
// Only set the environment variables from opts that are not legacy options.
// Legacy options are those that are not prefixed with ENVBUILDER_.
// While we can detect when a legacy option is set, overriding it becomes
// problematic. Erring on the side of caution, we will not override legacy options.
if !strings.HasPrefix(opt.Env, envbuilderOptionPrefix) {
continue
}
var val string
if sa, ok := opt.Value.(*serpent.StringArray); ok {
val = strings.Join(sa.GetSlice(), ",")
} else {
val = opt.Value.String()
}
switch val {
case "", "false", "0":
// Skip zero values.
continue
}
computed[opt.Env] = val
}
// Merge in extraEnv, which may override values from opts.
// Skip any keys that are envbuilder options.
for key, val := range extraEnv {
if strings.HasPrefix(key, envbuilderOptionPrefix) {
continue
}
computed[key] = val
}
return computed
}

View file

@ -284,9 +284,6 @@ func Test_computeEnvFromOptions(t *testing.T) {
"FOO": "bar", // should be included
},
expectEnv: map[string]string{
"CODER_AGENT_SUBSYSTEM": "one,two",
"CODER_AGENT_TOKEN": "string",
"CODER_AGENT_URL": "string",
"ENVBUILDER_BASE_IMAGE_CACHE_DIR": "string",
"ENVBUILDER_BINARY_PATH": "string",
"ENVBUILDER_BUILD_CONTEXT_PATH": "string",

View file

@ -118,7 +118,7 @@ func seedCache(ctx context.Context, t testing.TB, deps testDependencies) {
}
for k, v := range deps.ExtraEnv {
if !strings.HasPrefix(k, "ENVBUILDER_") {
if !strings.HasPrefix(k, envbuilderOptionPrefix) {
continue
}
if _, ok := seedEnv[k]; ok {

92
internal/tfutil/tfutil.go Normal file
View file

@ -0,0 +1,92 @@
package tfutil
import (
"context"
"fmt"
"sort"
"strings"
"github.com/coder/envbuilder/log"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
)
// TFValueToString converts an attr.Value to its string representation
// based on its Terraform type. This is needed because the String()
// method on an attr.Value creates a 'human-readable' version of the type, which
// leads to quotes, escaped characters, and other assorted sadness.
func TFValueToString(val attr.Value) string {
if val.IsUnknown() || val.IsNull() {
return ""
}
if vs, ok := val.(interface{ ValueString() string }); ok {
return vs.ValueString()
}
if vb, ok := val.(interface{ ValueBool() bool }); ok {
return fmt.Sprintf("%t", vb.ValueBool())
}
if vi, ok := val.(interface{ ValueInt64() int64 }); ok {
return fmt.Sprintf("%d", vi.ValueInt64())
}
panic(fmt.Errorf("tfValueToString: value %T is not a supported type", val))
}
// TFListToStringSlice converts a types.List to a []string by calling
// tfValueToString on each element.
func TFListToStringSlice(l types.List) []string {
els := l.Elements()
ss := make([]string, len(els))
for idx, el := range els {
ss[idx] = TFValueToString(el)
}
return ss
}
// TFMapToStringMap converts a types.Map to a map[string]string by calling
// tfValueToString on each element.
func TFMapToStringMap(m types.Map) map[string]string {
els := m.Elements()
res := make(map[string]string, len(els))
for k, v := range els {
res[k] = TFValueToString(v)
}
return res
}
// TFLogFunc is an adapter to envbuilder/log.Func.
func TFLogFunc(ctx context.Context) log.Func {
return func(level log.Level, format string, args ...any) {
var logFn func(context.Context, string, ...map[string]interface{})
switch level {
case log.LevelTrace:
logFn = tflog.Trace
case log.LevelDebug:
logFn = tflog.Debug
case log.LevelWarn:
logFn = tflog.Warn
case log.LevelError:
logFn = tflog.Error
default:
logFn = tflog.Info
}
logFn(ctx, fmt.Sprintf(format, args...))
}
}
// DockerEnv returns the keys and values of the map in the form "key=value"
// sorted by key in lexicographical order. This is the format expected by
// Docker and some other tools that consume environment variables.
func DockerEnv(m map[string]string) []string {
pairs := make([]string, 0, len(m))
var sb strings.Builder
for k := range m {
_, _ = sb.WriteString(k)
_, _ = sb.WriteRune('=')
_, _ = sb.WriteString(m[k])
pairs = append(pairs, sb.String())
sb.Reset()
}
sort.Strings(pairs)
return pairs
}