2017-06-26 23:46:34 +02:00
|
|
|
// Copyright 2016-2017, Pulumi Corporation. All rights reserved.
|
2017-03-06 16:07:24 +01:00
|
|
|
|
2017-09-22 04:18:21 +02:00
|
|
|
package cmd
|
2017-03-06 16:07:24 +01:00
|
|
|
|
|
|
|
import (
|
2017-10-02 22:35:39 +02:00
|
|
|
"fmt"
|
|
|
|
"sort"
|
2017-10-17 01:01:34 +02:00
|
|
|
"strings"
|
2017-10-02 22:35:39 +02:00
|
|
|
|
2017-10-25 19:31:42 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/util/contract"
|
|
|
|
|
2017-10-18 19:10:04 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/pack"
|
2017-10-19 00:37:18 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/resource/config"
|
2017-10-18 19:10:04 +02:00
|
|
|
|
2017-08-31 23:31:33 +02:00
|
|
|
"github.com/pkg/errors"
|
2017-03-06 16:07:24 +01:00
|
|
|
"github.com/spf13/cobra"
|
|
|
|
|
2017-09-22 04:18:21 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/tokens"
|
|
|
|
"github.com/pulumi/pulumi/pkg/util/cmdutil"
|
2017-03-06 16:07:24 +01:00
|
|
|
)
|
|
|
|
|
Make major commands more pleasant
This change eliminates the need to constantly type in the environment
name when performing major commands like configuration, planning, and
deployment. It's probably due to my age, however, I keep fat-fingering
simple commands in front of investors and I am embarrassed!
In the new model, there is a notion of a "current environment", and
I have modeled it kinda sorta just like Git's notion of "current branch."
By default, the current environment is set when you `init` something.
Otherwise, there is the `coco env select <env>` command to change it.
(Running this command w/out a new <env> will show you the current one.)
The major commands `config`, `plan`, `deploy`, and `destroy` will prefer
to use the current environment, unless it is overridden by using the
--env flag. All of the `coco env <cmd> <env>` commands still require the
explicit passing of an environment which seems reasonable since they are,
after all, about manipulating environments.
As part of this, I've overhauled the aging workspace settings cruft,
which had fallen into disrepair since the initial prototype.
2017-03-22 03:23:32 +01:00
|
|
|
func newConfigCmd() *cobra.Command {
|
2017-03-06 16:07:24 +01:00
|
|
|
cmd := &cobra.Command{
|
2017-10-19 00:37:18 +02:00
|
|
|
Use: "config",
|
|
|
|
Short: "Manage configuration",
|
|
|
|
}
|
|
|
|
cmd.AddCommand(newConfigLsCmd())
|
|
|
|
cmd.AddCommand(newConfigRmCmd())
|
|
|
|
cmd.AddCommand(newConfigTextCmd())
|
|
|
|
cmd.AddCommand(newConfigSecretCmd())
|
|
|
|
|
|
|
|
return cmd
|
|
|
|
}
|
|
|
|
|
|
|
|
func newConfigLsCmd() *cobra.Command {
|
|
|
|
var stack string
|
2017-10-25 21:01:30 +02:00
|
|
|
var showSecrets bool
|
2017-10-19 00:37:18 +02:00
|
|
|
|
|
|
|
lsCmd := &cobra.Command{
|
|
|
|
Use: "ls [key]",
|
|
|
|
Short: "List configuration for a stack",
|
|
|
|
Args: cobra.MaximumNArgs(1),
|
|
|
|
Run: cmdutil.RunFunc(func(cmd *cobra.Command, args []string) error {
|
2017-11-01 22:55:16 +01:00
|
|
|
stackName, err := explicitOrCurrent(stack, backend)
|
2017-10-25 19:31:42 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-19 00:37:18 +02:00
|
|
|
|
|
|
|
if len(args) == 1 {
|
|
|
|
key, err := parseConfigKey(args[0])
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "invalid configuration key")
|
|
|
|
}
|
|
|
|
return getConfig(stackName, key)
|
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
return listConfig(stackName, showSecrets)
|
2017-10-19 00:37:18 +02:00
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
lsCmd.PersistentFlags().StringVarP(
|
|
|
|
&stack, "stack", "s", "",
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
"List configuration for a different stack than the currently selected stack")
|
2017-10-25 21:01:30 +02:00
|
|
|
lsCmd.PersistentFlags().BoolVar(
|
|
|
|
&showSecrets, "show-secrets", false,
|
|
|
|
"Show secret values when listing config instead of displaying blinded values")
|
2017-10-19 00:37:18 +02:00
|
|
|
|
|
|
|
return lsCmd
|
|
|
|
}
|
|
|
|
|
|
|
|
func newConfigRmCmd() *cobra.Command {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
var all bool
|
|
|
|
var save bool
|
2017-10-19 00:37:18 +02:00
|
|
|
var stack string
|
|
|
|
|
|
|
|
rmCmd := &cobra.Command{
|
|
|
|
Use: "rm <key>",
|
|
|
|
Short: "Remove configuration value",
|
|
|
|
Args: cobra.ExactArgs(1),
|
2017-04-12 20:12:25 +02:00
|
|
|
Run: cmdutil.RunFunc(func(cmd *cobra.Command, args []string) error {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
if all && stack != "" {
|
|
|
|
return errors.New("if --all is specified, an explicit stack can not be provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
var stackName tokens.QName
|
|
|
|
if !all {
|
|
|
|
var err error
|
|
|
|
if stackName, err = explicitOrCurrent(stack, backend); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-10-03 01:37:12 +02:00
|
|
|
|
2017-10-19 00:37:18 +02:00
|
|
|
key, err := parseConfigKey(args[0])
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "invalid configuration key")
|
2017-03-06 16:07:24 +01:00
|
|
|
}
|
2017-03-07 14:47:42 +01:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
if save {
|
|
|
|
return deleteProjectConfiguration(stackName, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
return deleteWorkspaceConfiguration(stackName, key)
|
2017-10-19 00:37:18 +02:00
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
rmCmd.PersistentFlags().StringVarP(
|
|
|
|
&stack, "stack", "s", "",
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
"Target a specific stack instead of the default stack")
|
|
|
|
rmCmd.PersistentFlags().BoolVar(
|
|
|
|
&save, "save", false,
|
|
|
|
"Remove the configuration value in the project file instead instead of a locally set value")
|
|
|
|
rmCmd.PersistentFlags().BoolVar(
|
|
|
|
&all, "all", false,
|
|
|
|
"Remove a project wide configuration value that applies to all stacks")
|
2017-10-19 00:37:18 +02:00
|
|
|
|
|
|
|
return rmCmd
|
|
|
|
}
|
|
|
|
|
|
|
|
func newConfigTextCmd() *cobra.Command {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
var all bool
|
|
|
|
var save bool
|
2017-10-19 00:37:18 +02:00
|
|
|
var stack string
|
|
|
|
|
|
|
|
textCmd := &cobra.Command{
|
|
|
|
Use: "text <key> <value>",
|
|
|
|
Short: "Set configuration value",
|
|
|
|
Args: cobra.ExactArgs(2),
|
|
|
|
Run: cmdutil.RunFunc(func(cmd *cobra.Command, args []string) error {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
if all && stack != "" {
|
|
|
|
return errors.New("if --all is specified, an explicit stack can not be provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
var stackName tokens.QName
|
|
|
|
if !all {
|
|
|
|
var err error
|
|
|
|
if stackName, err = explicitOrCurrent(stack, backend); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-10-19 00:37:18 +02:00
|
|
|
|
2017-10-17 01:01:34 +02:00
|
|
|
key, err := parseConfigKey(args[0])
|
2017-08-31 23:31:33 +02:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "invalid configuration key")
|
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
return setConfiguration(stackName, key, config.NewValue(args[1]), save)
|
2017-10-19 00:37:18 +02:00
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
textCmd.PersistentFlags().StringVarP(
|
|
|
|
&stack, "stack", "s", "",
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
"Target a specific stack instead of the default stack")
|
|
|
|
textCmd.PersistentFlags().BoolVar(
|
|
|
|
&save, "save", false,
|
|
|
|
"Save the configuration value in the project file instead of locally")
|
|
|
|
textCmd.PersistentFlags().BoolVar(
|
|
|
|
&all, "all", false,
|
|
|
|
"Set a configuration value for all stacks for this project")
|
2017-10-19 00:37:18 +02:00
|
|
|
|
|
|
|
return textCmd
|
|
|
|
}
|
|
|
|
|
|
|
|
func newConfigSecretCmd() *cobra.Command {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
var all bool
|
|
|
|
var save bool
|
2017-10-19 00:37:18 +02:00
|
|
|
var stack string
|
|
|
|
|
|
|
|
secretCmd := &cobra.Command{
|
|
|
|
Use: "secret <key> [value]",
|
|
|
|
Short: "Set an encrypted configuration value",
|
|
|
|
Args: cobra.RangeArgs(1, 2),
|
|
|
|
Run: cmdutil.RunFunc(func(cmd *cobra.Command, args []string) error {
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
if all && stack != "" {
|
|
|
|
return errors.New("if --all is specified, an explicit stack can not be provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
var stackName tokens.QName
|
|
|
|
if !all {
|
|
|
|
var err error
|
|
|
|
if stackName, err = explicitOrCurrent(stack, backend); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-10-19 00:37:18 +02:00
|
|
|
|
|
|
|
key, err := parseConfigKey(args[0])
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "invalid configuration key")
|
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
c, err := getSymmetricCrypter()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-19 00:37:18 +02:00
|
|
|
var value string
|
|
|
|
if len(args) == 2 {
|
|
|
|
value = args[1]
|
|
|
|
} else {
|
|
|
|
value, err = readConsoleNoEchoWithPrompt("value")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-08-31 23:31:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
encryptedValue, err := c.EncryptValue(value)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
return setConfiguration(stackName, key, config.NewSecureValue(encryptedValue), save)
|
2017-03-07 14:47:42 +01:00
|
|
|
}),
|
2017-03-06 16:07:24 +01:00
|
|
|
}
|
Make major commands more pleasant
This change eliminates the need to constantly type in the environment
name when performing major commands like configuration, planning, and
deployment. It's probably due to my age, however, I keep fat-fingering
simple commands in front of investors and I am embarrassed!
In the new model, there is a notion of a "current environment", and
I have modeled it kinda sorta just like Git's notion of "current branch."
By default, the current environment is set when you `init` something.
Otherwise, there is the `coco env select <env>` command to change it.
(Running this command w/out a new <env> will show you the current one.)
The major commands `config`, `plan`, `deploy`, and `destroy` will prefer
to use the current environment, unless it is overridden by using the
--env flag. All of the `coco env <cmd> <env>` commands still require the
explicit passing of an environment which seems reasonable since they are,
after all, about manipulating environments.
As part of this, I've overhauled the aging workspace settings cruft,
which had fallen into disrepair since the initial prototype.
2017-03-22 03:23:32 +01:00
|
|
|
|
2017-10-19 00:37:18 +02:00
|
|
|
secretCmd.PersistentFlags().StringVarP(
|
2017-10-16 21:04:35 +02:00
|
|
|
&stack, "stack", "s", "",
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
"Target a specific stack instead of the default stack")
|
|
|
|
secretCmd.PersistentFlags().BoolVar(
|
|
|
|
&save, "save", false,
|
|
|
|
"Save the configuration value in the project file instead of locally")
|
|
|
|
secretCmd.PersistentFlags().BoolVar(
|
|
|
|
&all, "all", false,
|
|
|
|
"Set a configuration value for all stacks for this project")
|
Make major commands more pleasant
This change eliminates the need to constantly type in the environment
name when performing major commands like configuration, planning, and
deployment. It's probably due to my age, however, I keep fat-fingering
simple commands in front of investors and I am embarrassed!
In the new model, there is a notion of a "current environment", and
I have modeled it kinda sorta just like Git's notion of "current branch."
By default, the current environment is set when you `init` something.
Otherwise, there is the `coco env select <env>` command to change it.
(Running this command w/out a new <env> will show you the current one.)
The major commands `config`, `plan`, `deploy`, and `destroy` will prefer
to use the current environment, unless it is overridden by using the
--env flag. All of the `coco env <cmd> <env>` commands still require the
explicit passing of an environment which seems reasonable since they are,
after all, about manipulating environments.
As part of this, I've overhauled the aging workspace settings cruft,
which had fallen into disrepair since the initial prototype.
2017-03-22 03:23:32 +01:00
|
|
|
|
2017-10-19 00:37:18 +02:00
|
|
|
return secretCmd
|
2017-03-06 16:07:24 +01:00
|
|
|
}
|
2017-10-02 22:35:39 +02:00
|
|
|
|
2017-10-17 01:01:34 +02:00
|
|
|
func parseConfigKey(key string) (tokens.ModuleMember, error) {
|
|
|
|
// As a convience, we'll treat any key with no delimiter as if:
|
|
|
|
// <program-name>:config:<key> had been written instead
|
|
|
|
if !strings.Contains(key, tokens.TokenDelimiter) {
|
2017-10-17 21:28:03 +02:00
|
|
|
pkg, err := getPackage()
|
2017-10-17 01:01:34 +02:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return tokens.ParseModuleMember(fmt.Sprintf("%s:config:%s", pkg.Name, key))
|
|
|
|
}
|
|
|
|
|
|
|
|
return tokens.ParseModuleMember(key)
|
|
|
|
}
|
|
|
|
|
|
|
|
func prettyKey(key string) string {
|
2017-10-17 21:28:03 +02:00
|
|
|
pkg, err := getPackage()
|
2017-10-17 01:01:34 +02:00
|
|
|
if err != nil {
|
|
|
|
return key
|
|
|
|
}
|
|
|
|
|
2017-10-18 19:10:04 +02:00
|
|
|
return prettyKeyForPackage(key, pkg)
|
|
|
|
}
|
|
|
|
|
2017-10-17 21:28:03 +02:00
|
|
|
func prettyKeyForPackage(key string, pkg *pack.Package) string {
|
2017-10-17 01:01:34 +02:00
|
|
|
s := key
|
|
|
|
defaultPrefix := fmt.Sprintf("%s:config:", pkg.Name)
|
|
|
|
|
|
|
|
if strings.HasPrefix(s, defaultPrefix) {
|
|
|
|
return s[len(defaultPrefix):]
|
|
|
|
}
|
|
|
|
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
func setConfiguration(stackName tokens.QName, key tokens.ModuleMember, value config.Value, save bool) error {
|
|
|
|
if save {
|
|
|
|
return setProjectConfiguration(stackName, key, value)
|
|
|
|
}
|
|
|
|
|
|
|
|
return setWorkspaceConfiguration(stackName, key, value)
|
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
func listConfig(stackName tokens.QName, showSecrets bool) error {
|
2017-10-19 00:37:18 +02:00
|
|
|
cfg, err := getConfiguration(stackName)
|
2017-10-02 22:35:39 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
var decrypter config.ValueDecrypter = blindingDecrypter{}
|
2017-10-19 00:37:18 +02:00
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
if hasSecureValue(cfg) && showSecrets {
|
2017-10-19 00:37:18 +02:00
|
|
|
decrypter, err = getSymmetricCrypter()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfg != nil {
|
2017-10-02 22:35:39 +02:00
|
|
|
fmt.Printf("%-32s %-32s\n", "KEY", "VALUE")
|
|
|
|
var keys []string
|
2017-10-19 00:37:18 +02:00
|
|
|
for key := range cfg {
|
2017-10-17 01:01:34 +02:00
|
|
|
// Note that we use the fully qualified module member here instead of a `prettyKey`, this lets us ensure that all the config
|
|
|
|
// values for the current program are displayed next to one another in the output.
|
2017-10-02 22:35:39 +02:00
|
|
|
keys = append(keys, string(key))
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
for _, key := range keys {
|
2017-10-19 00:37:18 +02:00
|
|
|
decrypted, err := cfg[tokens.ModuleMember(key)].Value(decrypter)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "could not decrypt configuration value")
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("%-32s %-32s\n", prettyKey(key), decrypted)
|
2017-10-02 22:35:39 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-16 21:04:35 +02:00
|
|
|
func getConfig(stackName tokens.QName, key tokens.ModuleMember) error {
|
2017-10-19 00:37:18 +02:00
|
|
|
cfg, err := getConfiguration(stackName)
|
2017-10-02 22:35:39 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-19 00:37:18 +02:00
|
|
|
if cfg != nil {
|
|
|
|
if v, ok := cfg[key]; ok {
|
|
|
|
var decrypter config.ValueDecrypter = panicCrypter{}
|
|
|
|
|
|
|
|
if v.Secure() {
|
|
|
|
decrypter, err = getSymmetricCrypter()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
decrypted, err := v.Value(decrypter)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "could not decrypt configuation value")
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("%v\n", decrypted)
|
|
|
|
|
2017-10-02 22:35:39 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-17 01:01:34 +02:00
|
|
|
return errors.Errorf("configuration key '%v' not found for stack '%v'", prettyKey(key.String()), stackName)
|
2017-10-10 02:09:32 +02:00
|
|
|
}
|
|
|
|
|
2017-10-19 00:37:18 +02:00
|
|
|
func getConfiguration(stackName tokens.QName) (map[tokens.ModuleMember]config.Value, error) {
|
2017-10-25 19:31:42 +02:00
|
|
|
contract.Require(stackName != "", "stackName")
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
workspace, err := newWorkspace()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-10-17 21:28:03 +02:00
|
|
|
pkg, err := getPackage()
|
2017-10-10 02:09:32 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-10-02 22:35:39 +02:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
configs := make([]map[tokens.ModuleMember]config.Value, 4)
|
|
|
|
configs = append(configs, pkg.Config)
|
|
|
|
|
|
|
|
if stackInfo, has := pkg.Stacks[stackName]; has {
|
|
|
|
configs = append(configs, stackInfo.Config)
|
|
|
|
}
|
|
|
|
|
|
|
|
if localAllStackConfig, has := workspace.Settings().Config[""]; has {
|
|
|
|
configs = append(configs, localAllStackConfig)
|
2017-10-17 23:19:47 +02:00
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
if localStackConfig, has := workspace.Settings().Config[stackName]; has {
|
|
|
|
configs = append(configs, localStackConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
return mergeConfigs(configs...), nil
|
2017-10-02 22:35:39 +02:00
|
|
|
}
|
2017-10-10 02:14:21 +02:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
func deleteProjectConfiguration(stackName tokens.QName, key tokens.ModuleMember) error {
|
2017-10-17 21:28:03 +02:00
|
|
|
pkg, err := getPackage()
|
2017-10-10 02:14:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-17 23:19:47 +02:00
|
|
|
if stackName == "" {
|
|
|
|
if pkg.Config != nil {
|
|
|
|
delete(pkg.Config, key)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if pkg.Stacks[stackName].Config != nil {
|
|
|
|
delete(pkg.Stacks[stackName].Config, key)
|
|
|
|
}
|
2017-10-10 02:14:21 +02:00
|
|
|
}
|
|
|
|
|
2017-10-17 21:28:03 +02:00
|
|
|
return savePackage(pkg)
|
2017-10-10 02:14:21 +02:00
|
|
|
}
|
2017-10-10 02:25:44 +02:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
func deleteWorkspaceConfiguration(stackName tokens.QName, key tokens.ModuleMember) error {
|
|
|
|
workspace, err := newWorkspace()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if config, has := workspace.Settings().Config[stackName]; has {
|
|
|
|
delete(config, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
return workspace.Save()
|
|
|
|
}
|
|
|
|
|
|
|
|
func setProjectConfiguration(stackName tokens.QName, key tokens.ModuleMember, value config.Value) error {
|
2017-10-17 21:28:03 +02:00
|
|
|
pkg, err := getPackage()
|
2017-10-10 02:25:44 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-17 23:19:47 +02:00
|
|
|
if stackName == "" {
|
|
|
|
if pkg.Config == nil {
|
2017-10-19 00:37:18 +02:00
|
|
|
pkg.Config = make(map[tokens.ModuleMember]config.Value)
|
2017-10-17 23:19:47 +02:00
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
pkg.Config[key] = value
|
2017-10-17 23:19:47 +02:00
|
|
|
} else {
|
|
|
|
if pkg.Stacks == nil {
|
|
|
|
pkg.Stacks = make(map[tokens.QName]pack.StackInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
if pkg.Stacks[stackName].Config == nil {
|
|
|
|
si := pkg.Stacks[stackName]
|
2017-10-19 00:37:18 +02:00
|
|
|
si.Config = make(map[tokens.ModuleMember]config.Value)
|
2017-10-17 23:19:47 +02:00
|
|
|
pkg.Stacks[stackName] = si
|
|
|
|
}
|
|
|
|
|
2017-10-25 21:01:30 +02:00
|
|
|
pkg.Stacks[stackName].Config[key] = value
|
2017-10-17 23:19:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return savePackage(pkg)
|
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
func setWorkspaceConfiguration(stackName tokens.QName, key tokens.ModuleMember, value config.Value) error {
|
|
|
|
workspace, err := newWorkspace()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-10-17 21:28:03 +02:00
|
|
|
}
|
2017-10-10 02:25:44 +02:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
if _, has := workspace.Settings().Config[stackName]; !has {
|
|
|
|
workspace.Settings().Config[stackName] = make(map[tokens.ModuleMember]config.Value)
|
2017-10-10 02:25:44 +02:00
|
|
|
}
|
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
workspace.Settings().Config[stackName][key] = value
|
|
|
|
|
|
|
|
return workspace.Save()
|
|
|
|
}
|
|
|
|
|
|
|
|
func mergeConfigs(configs ...map[tokens.ModuleMember]config.Value) map[tokens.ModuleMember]config.Value {
|
2017-10-19 00:37:18 +02:00
|
|
|
merged := make(map[tokens.ModuleMember]config.Value)
|
2017-10-10 02:25:44 +02:00
|
|
|
|
Suport workspace local configuration and use it by default
Previously, we stored configuration information in the Pulumi.yaml
file. This was a change from the old model where configuration was
stored in a special section of the checkpoint file.
While doing things this way has some upsides with being able to flow
configuration changes with your source code (e.g. fixed values for a
production stack that version with the code) it caused some friction
for the local development scinerio. In this case, setting
configuration values would pend changes to Pulumi.yaml and if you
didn't want to publish these changes, you'd have to remember to remove
them before commiting. It also was problematic for our examples, where
it was not clear if we wanted to actually include values like
`aws:config:region` in our samples. Finally, we found that for our
own pulumi service, we'd have values that would differ across each
individual dev stack, and publishing these values to a global
Pulumi.yaml file would just be adding noise to things.
We now adopt a hybrid model, where by default configuration is stored
locally, in the workspace's settings per project. A new flag `--save`
tests commands to actual operate on the configuration information
stored in Pulumi.yaml.
With the following change, we have have four "slots" configuration
values can end up in:
1. In the Pulumi.yaml file, applies to all stacks
2. In the Pulumi.yaml file, applied to a specific stack
3. In the local workspace.json file, applied to all stacks
4. In the local workspace.json file, applied to a specific stack
When computing the configuration information for a stack, we apply
configuration in the above order, overriding values as we go
along.
We also invert the default behavior of the `pulumi config` commands so
they operate on a specific stack (i.e. how they did before
e3610989). If you want to apply configuration to all stacks, `--all`
can be passed to any configuration command.
2017-10-27 23:24:47 +02:00
|
|
|
for _, config := range configs {
|
|
|
|
for key, value := range config {
|
|
|
|
merged[key] = value
|
|
|
|
}
|
2017-10-17 23:19:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return merged
|
2017-10-10 02:25:44 +02:00
|
|
|
}
|