2017-06-26 23:46:34 +02:00
|
|
|
// Copyright 2016-2017, Pulumi Corporation. All rights reserved.
|
2017-03-06 16:07:24 +01:00
|
|
|
|
2017-09-22 04:18:21 +02:00
|
|
|
package cmd
|
2017-03-06 16:07:24 +01:00
|
|
|
|
|
|
|
import (
|
2017-10-02 22:35:39 +02:00
|
|
|
"fmt"
|
|
|
|
"sort"
|
2017-10-17 01:01:34 +02:00
|
|
|
"strings"
|
2017-10-02 22:35:39 +02:00
|
|
|
|
2017-10-25 19:31:42 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/util/contract"
|
|
|
|
|
2017-10-18 19:10:04 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/pack"
|
2017-10-19 00:37:18 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/resource/config"
|
2017-10-18 19:10:04 +02:00
|
|
|
|
2017-08-31 23:31:33 +02:00
|
|
|
"github.com/pkg/errors"
|
2017-03-06 16:07:24 +01:00
|
|
|
"github.com/spf13/cobra"
|
|
|
|
|
2017-09-22 04:18:21 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/tokens"
|
|
|
|
"github.com/pulumi/pulumi/pkg/util/cmdutil"
|
2017-03-06 16:07:24 +01:00
|
|
|
)
|
|
|
|
|
Make major commands more pleasant
This change eliminates the need to constantly type in the environment
name when performing major commands like configuration, planning, and
deployment. It's probably due to my age, however, I keep fat-fingering
simple commands in front of investors and I am embarrassed!
In the new model, there is a notion of a "current environment", and
I have modeled it kinda sorta just like Git's notion of "current branch."
By default, the current environment is set when you `init` something.
Otherwise, there is the `coco env select <env>` command to change it.
(Running this command w/out a new <env> will show you the current one.)
The major commands `config`, `plan`, `deploy`, and `destroy` will prefer
to use the current environment, unless it is overridden by using the
--env flag. All of the `coco env <cmd> <env>` commands still require the
explicit passing of an environment which seems reasonable since they are,
after all, about manipulating environments.
As part of this, I've overhauled the aging workspace settings cruft,
which had fallen into disrepair since the initial prototype.
2017-03-22 03:23:32 +01:00
|
|
|
func newConfigCmd() *cobra.Command {
|
2017-11-14 22:28:27 +01:00
|
|
|
var stack string
|
|
|
|
var showSecrets bool
|
|
|
|
|
2017-03-06 16:07:24 +01:00
|
|
|
cmd := &cobra.Command{
|
2017-10-19 00:37:18 +02:00
|
|
|
Use: "config",
|
|
|
|
Short: "Manage configuration",
|
2017-11-14 22:28:27 +01:00
|
|
|
Long: "Lists all configuration values for a specific stack. To add a new configuration value, run\n" +
|
|
|
|
"'pulumi config set', to remove and existing value run 'pulumi config rm'. To get the value of\n" +
|
|
|
|
"for a specific configuration key, use 'pulumi config get <key-name>'.",
|
|
|
|
Run: cmdutil.RunFunc(func(cmd *cobra.Command, args []string) error {
|
|
|
|
stackName, err := explicitOrCurrent(stack, backend)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return listConfig(stackName, showSecrets)
|
|
|
|
}),
|
2017-10-19 00:37:18 +02:00
|
|
|
}
|
2017-11-14 22:28:27 +01:00
|
|
|
|
|
|
|
cmd.Flags().BoolVar(
|
|
|
|
&showSecrets, "show-secrets", false,
|
|
|
|
"Show secret values when listing config instead of displaying blinded values")
|
|
|
|
cmd.PersistentFlags().StringVarP(
|
|
|
|
&stack, "stack", "s", "",
|
|
|
|
"Operate on a different stack than the currently selected stack")
|
|
|
|
|
|
|
|
cmd.AddCommand(newConfigGetCmd(&stack))
|
|
|
|
cmd.AddCommand(newConfigRmCmd(&stack))
|
|
|
|
cmd.AddCommand(newConfigSetCmd(&stack))
|
2017-10-19 00:37:18 +02:00
|
|
|
|
|
|
|
return cmd
|
|
|
|
}
|
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
func newConfigGetCmd(stack *string) *cobra.Command {
|
|
|
|
getCmd := &cobra.Command{
|
|
|
|
Use: "get <key>",
|
|
|
|
Short: "Get a single configuration value",
|
|
|
|
Args: cobra.ExactArgs(1),
|
2017-10-19 00:37:18 +02:00
|
|
|
Run: cmdutil.RunFunc(func(cmd *cobra.Command, args []string) error {
|
2017-11-14 22:28:27 +01:00
|
|
|
stackName, err := explicitOrCurrent(*stack, backend)
|
2017-10-25 19:31:42 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-19 00:37:18 +02:00
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
key, err := parseConfigKey(args[0])
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "invalid configuration key")
|
2017-10-19 00:37:18 +02:00
|
|
|
}
|
2017-11-14 22:28:27 +01:00
|
|
|
return getConfig(stackName, key)
|
2017-10-19 00:37:18 +02:00
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
return getCmd
|
2017-10-19 00:37:18 +02:00
|
|
|
}
|
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
func newConfigRmCmd(stack *string) *cobra.Command {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
var all bool
|
|
|
|
var save bool
|
2017-10-19 00:37:18 +02:00
|
|
|
|
|
|
|
rmCmd := &cobra.Command{
|
|
|
|
Use: "rm <key>",
|
|
|
|
Short: "Remove configuration value",
|
|
|
|
Args: cobra.ExactArgs(1),
|
2017-04-12 20:12:25 +02:00
|
|
|
Run: cmdutil.RunFunc(func(cmd *cobra.Command, args []string) error {
|
2017-11-14 22:28:27 +01:00
|
|
|
if all && *stack != "" {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
return errors.New("if --all is specified, an explicit stack can not be provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
var stackName tokens.QName
|
|
|
|
if !all {
|
|
|
|
var err error
|
2017-11-14 22:28:27 +01:00
|
|
|
if stackName, err = explicitOrCurrent(*stack, backend); err != nil {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-10-03 01:37:12 +02:00
|
|
|
|
2017-10-19 00:37:18 +02:00
|
|
|
key, err := parseConfigKey(args[0])
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "invalid configuration key")
|
2017-03-06 16:07:24 +01:00
|
|
|
}
|
2017-03-07 14:47:42 +01:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
if save {
|
|
|
|
return deleteProjectConfiguration(stackName, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
return deleteWorkspaceConfiguration(stackName, key)
|
2017-10-19 00:37:18 +02:00
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
rmCmd.PersistentFlags().BoolVar(
|
|
|
|
&save, "save", false,
|
|
|
|
"Remove the configuration value in the project file instead instead of a locally set value")
|
|
|
|
rmCmd.PersistentFlags().BoolVar(
|
|
|
|
&all, "all", false,
|
|
|
|
"Remove a project wide configuration value that applies to all stacks")
|
2017-10-19 00:37:18 +02:00
|
|
|
|
|
|
|
return rmCmd
|
|
|
|
}
|
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
func newConfigSetCmd(stack *string) *cobra.Command {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
var all bool
|
|
|
|
var save bool
|
2017-11-14 22:28:27 +01:00
|
|
|
var secret bool
|
2017-10-19 00:37:18 +02:00
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
setCmd := &cobra.Command{
|
|
|
|
Use: "set <key> [value]",
|
2017-10-19 00:37:18 +02:00
|
|
|
Short: "Set configuration value",
|
2017-11-14 22:28:27 +01:00
|
|
|
Args: cobra.RangeArgs(1, 2),
|
2017-10-19 00:37:18 +02:00
|
|
|
Run: cmdutil.RunFunc(func(cmd *cobra.Command, args []string) error {
|
2017-11-14 22:28:27 +01:00
|
|
|
if all && *stack != "" {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
return errors.New("if --all is specified, an explicit stack can not be provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
var stackName tokens.QName
|
|
|
|
if !all {
|
|
|
|
var err error
|
2017-11-14 22:28:27 +01:00
|
|
|
if stackName, err = explicitOrCurrent(*stack, backend); err != nil {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-10-19 00:37:18 +02:00
|
|
|
|
2017-10-17 01:01:34 +02:00
|
|
|
key, err := parseConfigKey(args[0])
|
2017-08-31 23:31:33 +02:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "invalid configuration key")
|
|
|
|
}
|
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
var c config.ValueEncrypter
|
|
|
|
if secret {
|
|
|
|
c, err = getSymmetricCrypter()
|
|
|
|
if err != nil {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-10-19 00:37:18 +02:00
|
|
|
|
|
|
|
var value string
|
|
|
|
if len(args) == 2 {
|
|
|
|
value = args[1]
|
2017-11-14 22:28:27 +01:00
|
|
|
} else if !secret {
|
|
|
|
value, err = readConsole("value")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-19 00:37:18 +02:00
|
|
|
} else {
|
|
|
|
value, err = readConsoleNoEchoWithPrompt("value")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-08-31 23:31:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
if !secret {
|
|
|
|
err = setConfiguration(stackName, key, config.NewValue(value), save)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fmt.Printf("Set key '%s' with value '%s' as plaintext\n", args[0], value)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
enc, err := c.EncryptValue(value)
|
2017-10-25 21:01:30 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
err = setConfiguration(stackName, key, config.NewSecureValue(enc), save)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("Set key '%s' with with encrypted value\n", args[0])
|
|
|
|
return nil
|
2017-03-07 14:47:42 +01:00
|
|
|
}),
|
2017-03-06 16:07:24 +01:00
|
|
|
}
|
Make major commands more pleasant
This change eliminates the need to constantly type in the environment
name when performing major commands like configuration, planning, and
deployment. It's probably due to my age, however, I keep fat-fingering
simple commands in front of investors and I am embarrassed!
In the new model, there is a notion of a "current environment", and
I have modeled it kinda sorta just like Git's notion of "current branch."
By default, the current environment is set when you `init` something.
Otherwise, there is the `coco env select <env>` command to change it.
(Running this command w/out a new <env> will show you the current one.)
The major commands `config`, `plan`, `deploy`, and `destroy` will prefer
to use the current environment, unless it is overridden by using the
--env flag. All of the `coco env <cmd> <env>` commands still require the
explicit passing of an environment which seems reasonable since they are,
after all, about manipulating environments.
As part of this, I've overhauled the aging workspace settings cruft,
which had fallen into disrepair since the initial prototype.
2017-03-22 03:23:32 +01:00
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
setCmd.PersistentFlags().BoolVar(
|
|
|
|
&secret, "secret", false,
|
|
|
|
"Encrypt the value instead of storing it in plaintext")
|
|
|
|
setCmd.PersistentFlags().BoolVar(
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
&save, "save", false,
|
|
|
|
"Save the configuration value in the project file instead of locally")
|
2017-11-14 22:28:27 +01:00
|
|
|
setCmd.PersistentFlags().BoolVar(
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
&all, "all", false,
|
|
|
|
"Set a configuration value for all stacks for this project")
|
Make major commands more pleasant
This change eliminates the need to constantly type in the environment
name when performing major commands like configuration, planning, and
deployment. It's probably due to my age, however, I keep fat-fingering
simple commands in front of investors and I am embarrassed!
In the new model, there is a notion of a "current environment", and
I have modeled it kinda sorta just like Git's notion of "current branch."
By default, the current environment is set when you `init` something.
Otherwise, there is the `coco env select <env>` command to change it.
(Running this command w/out a new <env> will show you the current one.)
The major commands `config`, `plan`, `deploy`, and `destroy` will prefer
to use the current environment, unless it is overridden by using the
--env flag. All of the `coco env <cmd> <env>` commands still require the
explicit passing of an environment which seems reasonable since they are,
after all, about manipulating environments.
As part of this, I've overhauled the aging workspace settings cruft,
which had fallen into disrepair since the initial prototype.
2017-03-22 03:23:32 +01:00
|
|
|
|
2017-11-14 22:28:27 +01:00
|
|
|
return setCmd
|
2017-03-06 16:07:24 +01:00
|
|
|
}
|
2017-10-02 22:35:39 +02:00
|
|
|
|
2017-10-17 01:01:34 +02:00
|
|
|
func parseConfigKey(key string) (tokens.ModuleMember, error) {
|
|
|
|
// As a convience, we'll treat any key with no delimiter as if:
|
|
|
|
// <program-name>:config:<key> had been written instead
|
|
|
|
if !strings.Contains(key, tokens.TokenDelimiter) {
|
2017-10-17 21:28:03 +02:00
|
|
|
pkg, err := getPackage()
|
2017-10-17 01:01:34 +02:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return tokens.ParseModuleMember(fmt.Sprintf("%s:config:%s", pkg.Name, key))
|
|
|
|
}
|
|
|
|
|
|
|
|
return tokens.ParseModuleMember(key)
|
|
|
|
}
|
|
|
|
|
|
|
|
func prettyKey(key string) string {
|
2017-10-17 21:28:03 +02:00
|
|
|
pkg, err := getPackage()
|
2017-10-17 01:01:34 +02:00
|
|
|
if err != nil {
|
|
|
|
return key
|
|
|
|
}
|
|
|
|
|
2017-10-18 19:10:04 +02:00
|
|
|
return prettyKeyForPackage(key, pkg)
|
|
|
|
}
|
|
|
|
|
2017-10-17 21:28:03 +02:00
|
|
|
func prettyKeyForPackage(key string, pkg *pack.Package) string {
|
2017-10-17 01:01:34 +02:00
|
|
|
s := key
|
|
|
|
defaultPrefix := fmt.Sprintf("%s:config:", pkg.Name)
|
|
|
|
|
|
|
|
if strings.HasPrefix(s, defaultPrefix) {
|
|
|
|
return s[len(defaultPrefix):]
|
|
|
|
}
|
|
|
|
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
func setConfiguration(stackName tokens.QName, key tokens.ModuleMember, value config.Value, save bool) error {
|
|
|
|
if save {
|
|
|
|
return setProjectConfiguration(stackName, key, value)
|
|
|
|
}
|
|
|
|
|
|
|
|
return setWorkspaceConfiguration(stackName, key, value)
|
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
func listConfig(stackName tokens.QName, showSecrets bool) error {
|
2017-10-19 00:37:18 +02:00
|
|
|
cfg, err := getConfiguration(stackName)
|
2017-10-02 22:35:39 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
var decrypter config.ValueDecrypter = blindingDecrypter{}
|
2017-10-19 00:37:18 +02:00
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
if hasSecureValue(cfg) && showSecrets {
|
2017-10-19 00:37:18 +02:00
|
|
|
decrypter, err = getSymmetricCrypter()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfg != nil {
|
2017-10-02 22:35:39 +02:00
|
|
|
fmt.Printf("%-32s %-32s\n", "KEY", "VALUE")
|
|
|
|
var keys []string
|
2017-10-19 00:37:18 +02:00
|
|
|
for key := range cfg {
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 16:42:14 +01:00
|
|
|
// Note that we use the fully qualified module member here instead of a `prettyKey`, this lets us ensure
|
|
|
|
// that all the config values for the current program are displayed next to one another in the output.
|
2017-10-02 22:35:39 +02:00
|
|
|
keys = append(keys, string(key))
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
for _, key := range keys {
|
2017-10-19 00:37:18 +02:00
|
|
|
decrypted, err := cfg[tokens.ModuleMember(key)].Value(decrypter)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "could not decrypt configuration value")
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("%-32s %-32s\n", prettyKey(key), decrypted)
|
2017-10-02 22:35:39 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-16 21:04:35 +02:00
|
|
|
func getConfig(stackName tokens.QName, key tokens.ModuleMember) error {
|
2017-10-19 00:37:18 +02:00
|
|
|
cfg, err := getConfiguration(stackName)
|
2017-10-02 22:35:39 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-19 00:37:18 +02:00
|
|
|
if cfg != nil {
|
|
|
|
if v, ok := cfg[key]; ok {
|
|
|
|
var decrypter config.ValueDecrypter = panicCrypter{}
|
|
|
|
|
|
|
|
if v.Secure() {
|
|
|
|
decrypter, err = getSymmetricCrypter()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
decrypted, err := v.Value(decrypter)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "could not decrypt configuation value")
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("%v\n", decrypted)
|
|
|
|
|
2017-10-02 22:35:39 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-17 01:01:34 +02:00
|
|
|
return errors.Errorf("configuration key '%v' not found for stack '%v'", prettyKey(key.String()), stackName)
|
2017-10-10 02:09:32 +02:00
|
|
|
}
|
|
|
|
|
2017-10-19 00:37:18 +02:00
|
|
|
func getConfiguration(stackName tokens.QName) (map[tokens.ModuleMember]config.Value, error) {
|
2017-10-25 19:31:42 +02:00
|
|
|
contract.Require(stackName != "", "stackName")
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
workspace, err := newWorkspace()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-10-17 21:28:03 +02:00
|
|
|
pkg, err := getPackage()
|
2017-10-10 02:09:32 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-10-02 22:35:39 +02:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
configs := make([]map[tokens.ModuleMember]config.Value, 4)
|
|
|
|
configs = append(configs, pkg.Config)
|
|
|
|
|
|
|
|
if stackInfo, has := pkg.Stacks[stackName]; has {
|
|
|
|
configs = append(configs, stackInfo.Config)
|
|
|
|
}
|
|
|
|
|
|
|
|
if localAllStackConfig, has := workspace.Settings().Config[""]; has {
|
|
|
|
configs = append(configs, localAllStackConfig)
|
2017-10-17 23:19:47 +02:00
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
if localStackConfig, has := workspace.Settings().Config[stackName]; has {
|
|
|
|
configs = append(configs, localStackConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
return mergeConfigs(configs...), nil
|
2017-10-02 22:35:39 +02:00
|
|
|
}
|
2017-10-10 02:14:21 +02:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
func deleteProjectConfiguration(stackName tokens.QName, key tokens.ModuleMember) error {
|
2017-10-17 21:28:03 +02:00
|
|
|
pkg, err := getPackage()
|
2017-10-10 02:14:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-17 23:19:47 +02:00
|
|
|
if stackName == "" {
|
|
|
|
if pkg.Config != nil {
|
|
|
|
delete(pkg.Config, key)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if pkg.Stacks[stackName].Config != nil {
|
|
|
|
delete(pkg.Stacks[stackName].Config, key)
|
|
|
|
}
|
2017-10-10 02:14:21 +02:00
|
|
|
}
|
|
|
|
|
2017-10-17 21:28:03 +02:00
|
|
|
return savePackage(pkg)
|
2017-10-10 02:14:21 +02:00
|
|
|
}
|
2017-10-10 02:25:44 +02:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
func deleteWorkspaceConfiguration(stackName tokens.QName, key tokens.ModuleMember) error {
|
|
|
|
workspace, err := newWorkspace()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if config, has := workspace.Settings().Config[stackName]; has {
|
|
|
|
delete(config, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
return workspace.Save()
|
|
|
|
}
|
|
|
|
|
|
|
|
func setProjectConfiguration(stackName tokens.QName, key tokens.ModuleMember, value config.Value) error {
|
2017-10-17 21:28:03 +02:00
|
|
|
pkg, err := getPackage()
|
2017-10-10 02:25:44 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-17 23:19:47 +02:00
|
|
|
if stackName == "" {
|
|
|
|
if pkg.Config == nil {
|
2017-10-19 00:37:18 +02:00
|
|
|
pkg.Config = make(map[tokens.ModuleMember]config.Value)
|
2017-10-17 23:19:47 +02:00
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
pkg.Config[key] = value
|
2017-10-17 23:19:47 +02:00
|
|
|
} else {
|
|
|
|
if pkg.Stacks == nil {
|
|
|
|
pkg.Stacks = make(map[tokens.QName]pack.StackInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
if pkg.Stacks[stackName].Config == nil {
|
|
|
|
si := pkg.Stacks[stackName]
|
2017-10-19 00:37:18 +02:00
|
|
|
si.Config = make(map[tokens.ModuleMember]config.Value)
|
2017-10-17 23:19:47 +02:00
|
|
|
pkg.Stacks[stackName] = si
|
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
pkg.Stacks[stackName].Config[key] = value
|
2017-10-17 23:19:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return savePackage(pkg)
|
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
func setWorkspaceConfiguration(stackName tokens.QName, key tokens.ModuleMember, value config.Value) error {
|
|
|
|
workspace, err := newWorkspace()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-10-17 21:28:03 +02:00
|
|
|
}
|
2017-10-10 02:25:44 +02:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
if _, has := workspace.Settings().Config[stackName]; !has {
|
|
|
|
workspace.Settings().Config[stackName] = make(map[tokens.ModuleMember]config.Value)
|
2017-10-10 02:25:44 +02:00
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
workspace.Settings().Config[stackName][key] = value
|
|
|
|
|
|
|
|
return workspace.Save()
|
|
|
|
}
|
|
|
|
|
|
|
|
func mergeConfigs(configs ...map[tokens.ModuleMember]config.Value) map[tokens.ModuleMember]config.Value {
|
2017-10-19 00:37:18 +02:00
|
|
|
merged := make(map[tokens.ModuleMember]config.Value)
|
2017-10-10 02:25:44 +02:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
for _, config := range configs {
|
|
|
|
for key, value := range config {
|
|
|
|
merged[key] = value
|
|
|
|
}
|
2017-10-17 23:19:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return merged
|
2017-10-10 02:25:44 +02:00
|
|
|
}
|