2018-05-22 21:43:36 +02:00
|
|
|
// Copyright 2016-2018, Pulumi Corporation.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2017-11-01 21:34:19 +01:00
|
|
|
|
2018-09-04 21:38:58 +02:00
|
|
|
package filestate
|
2017-11-01 21:34:19 +01:00
|
|
|
|
|
|
|
import (
|
2018-05-08 03:23:03 +02:00
|
|
|
"context"
|
2018-03-03 21:12:54 +01:00
|
|
|
"encoding/json"
|
2018-05-08 00:31:27 +02:00
|
|
|
"fmt"
|
2019-04-25 05:55:39 +02:00
|
|
|
"net/url"
|
2017-11-01 22:55:16 +01:00
|
|
|
"os"
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-17 01:15:10 +02:00
|
|
|
"os/user"
|
2019-04-25 05:55:39 +02:00
|
|
|
"path"
|
2017-11-01 22:55:16 +01:00
|
|
|
"path/filepath"
|
2019-12-30 19:24:48 +01:00
|
|
|
"regexp"
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-17 01:15:10 +02:00
|
|
|
"strings"
|
2018-01-25 03:22:41 +01:00
|
|
|
"time"
|
2017-11-01 21:34:19 +01:00
|
|
|
|
2019-04-17 22:48:38 +02:00
|
|
|
"github.com/pkg/errors"
|
2019-04-25 05:55:39 +02:00
|
|
|
"gocloud.dev/blob"
|
|
|
|
_ "gocloud.dev/blob/azureblob" // driver for azblob://
|
|
|
|
_ "gocloud.dev/blob/fileblob" // driver for file://
|
2019-12-16 18:47:31 +01:00
|
|
|
"gocloud.dev/blob/gcsblob" // driver for gs://
|
2019-04-25 05:55:39 +02:00
|
|
|
_ "gocloud.dev/blob/s3blob" // driver for s3://
|
2019-08-14 20:50:03 +02:00
|
|
|
"gocloud.dev/gcerrors"
|
2019-04-25 05:55:39 +02:00
|
|
|
|
2020-03-18 23:00:30 +01:00
|
|
|
"github.com/pulumi/pulumi/sdk/go/common/apitype"
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/backend"
|
2018-09-05 00:40:15 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/backend/display"
|
2017-12-13 19:46:54 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/diag"
|
2018-09-05 16:20:25 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/diag/colors"
|
2017-11-20 07:28:49 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/encoding"
|
2017-11-01 21:34:19 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/engine"
|
2017-11-20 07:28:49 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/operations"
|
2017-12-22 16:38:21 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/resource/config"
|
2018-03-27 23:28:35 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/resource/deploy"
|
2019-03-14 23:32:10 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/resource/edit"
|
2018-01-05 21:46:13 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/resource/stack"
|
2020-03-18 22:49:56 +01:00
|
|
|
"github.com/pulumi/pulumi/sdk/go/common/tokens"
|
2019-12-16 18:47:31 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/util/cmdutil"
|
2020-03-18 22:40:07 +01:00
|
|
|
"github.com/pulumi/pulumi/sdk/go/common/util/contract"
|
2018-05-16 00:28:00 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/util/logging"
|
2019-03-20 00:21:50 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/util/result"
|
2019-04-26 20:43:16 +02:00
|
|
|
"github.com/pulumi/pulumi/pkg/util/validation"
|
2020-03-18 22:35:53 +01:00
|
|
|
"github.com/pulumi/pulumi/sdk/go/common/workspace"
|
2017-11-01 21:34:19 +01:00
|
|
|
)
|
|
|
|
|
Make some updates based on CR feedback
This change implements some feedback from @ellismg.
* Make backend.Stack an interface and let backends implement it,
enabling dynamic type testing/casting to access information
specific to that backend. For instance, the cloud.Stack conveys
the cloud URL, org name, and PPC name, for each stack.
* Similarly expose specialized backend.Backend interfaces,
local.Backend and cloud.Backend, to convey specific information.
* Redo a bunch of the commands in terms of these.
* Keeping with this theme, turn the CreateStack options into an
opaque interface{}, and let the specific backends expose their
own structures with their own settings (like PPC name in cloud).
* Show both the org and PPC names in the cloud column printed in
the stack ls command, in addition to the Pulumi Cloud URL.
Unrelated, but useful:
* Special case the 401 HTTP response and make a friendly error,
to tell the developer they must use `pulumi login`. This is
better than tossing raw "401: Unauthorized" errors in their face.
* Change the "Updating stack '..' in the Pulumi Cloud" message to
use the correct action verb ("Previewing", "Destroying", etc).
2017-12-03 16:51:18 +01:00
|
|
|
// Backend extends the base backend interface with specific information about local backends.
|
|
|
|
type Backend interface {
|
|
|
|
backend.Backend
|
|
|
|
local() // at the moment, no local specific info, so just use a marker function.
|
|
|
|
}
|
|
|
|
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
type localBackend struct {
|
2019-07-25 16:58:19 +02:00
|
|
|
d diag.Sink
|
|
|
|
|
|
|
|
// originalURL is the URL provided when the localBackend was initialized, for example
|
|
|
|
// "file://~". url is a canonicalized version that should be used when persisting data.
|
|
|
|
// (For example, replacing ~ with the home directory, making an absolute path, etc.)
|
|
|
|
originalURL string
|
|
|
|
url string
|
|
|
|
|
2019-05-20 20:46:00 +02:00
|
|
|
bucket Bucket
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-17 01:15:10 +02:00
|
|
|
}
|
|
|
|
|
2018-04-18 01:37:52 +02:00
|
|
|
type localBackendReference struct {
|
|
|
|
name tokens.QName
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r localBackendReference) String() string {
|
|
|
|
return string(r.name)
|
|
|
|
}
|
|
|
|
|
2018-09-05 16:20:25 +02:00
|
|
|
func (r localBackendReference) Name() tokens.QName {
|
2018-04-18 01:37:52 +02:00
|
|
|
return r.name
|
|
|
|
}
|
|
|
|
|
2019-04-25 05:55:39 +02:00
|
|
|
func IsFileStateBackendURL(urlstr string) bool {
|
|
|
|
u, err := url.Parse(urlstr)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return blob.DefaultURLMux().ValidBucketScheme(u.Scheme)
|
2018-04-05 00:31:01 +02:00
|
|
|
}
|
|
|
|
|
2019-05-20 20:46:00 +02:00
|
|
|
const FilePathPrefix = "file://"
|
|
|
|
|
2019-07-25 16:58:19 +02:00
|
|
|
func New(d diag.Sink, originalURL string) (Backend, error) {
|
|
|
|
if !IsFileStateBackendURL(originalURL) {
|
2019-04-25 05:55:39 +02:00
|
|
|
return nil, errors.Errorf("local URL %s has an illegal prefix; expected one of: %s",
|
2019-07-25 16:58:19 +02:00
|
|
|
originalURL, strings.Join(blob.DefaultURLMux().BucketSchemes(), ", "))
|
2019-04-25 05:55:39 +02:00
|
|
|
}
|
|
|
|
|
2019-07-25 16:58:19 +02:00
|
|
|
u, err := massageBlobPath(originalURL)
|
2019-05-03 01:52:00 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-04-25 05:55:39 +02:00
|
|
|
}
|
|
|
|
|
2019-12-16 18:47:31 +01:00
|
|
|
p, err := url.Parse(u)
|
2019-04-25 05:55:39 +02:00
|
|
|
if err != nil {
|
2019-12-16 18:47:31 +01:00
|
|
|
return nil, err
|
2019-06-03 21:52:45 +02:00
|
|
|
}
|
|
|
|
|
2019-12-16 18:47:31 +01:00
|
|
|
blobmux := blob.DefaultURLMux()
|
|
|
|
|
|
|
|
// for gcp we want to support additional credentials
|
|
|
|
// schemes on top of go-cloud's default credentials mux.
|
|
|
|
if p.Scheme == gcsblob.Scheme {
|
|
|
|
blobmux, err = GoogleCredentialsMux(context.TODO())
|
2019-06-03 21:52:45 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-16 18:47:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bucket, err := blobmux.OpenBucket(context.TODO(), u)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "unable to open bucket %s", u)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !strings.HasPrefix(u, FilePathPrefix) {
|
2019-06-03 21:52:45 +02:00
|
|
|
bucketSubDir := strings.TrimLeft(p.Path, "/")
|
|
|
|
if bucketSubDir != "" {
|
|
|
|
if !strings.HasSuffix(bucketSubDir, "/") {
|
|
|
|
bucketSubDir += "/"
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket = blob.PrefixedBucket(bucket, bucketSubDir)
|
|
|
|
}
|
2018-09-04 19:44:25 +02:00
|
|
|
}
|
2019-04-25 05:55:39 +02:00
|
|
|
|
2018-09-04 19:44:25 +02:00
|
|
|
return &localBackend{
|
2019-07-25 16:58:19 +02:00
|
|
|
d: d,
|
|
|
|
originalURL: originalURL,
|
|
|
|
url: u,
|
|
|
|
bucket: &wrappedBucket{bucket: bucket},
|
2018-09-04 19:44:25 +02:00
|
|
|
}, nil
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2019-05-03 01:52:00 +02:00
|
|
|
// massageBlobPath takes the path the user provided and converts it to an appropriate form go-cloud
|
|
|
|
// can support. Importantly, s3/azblob/gs paths should not be be touched. This will only affect
|
|
|
|
// file:// paths which have a few oddities around them that we want to ensure work properly.
|
|
|
|
func massageBlobPath(path string) (string, error) {
|
2019-05-20 20:46:00 +02:00
|
|
|
if !strings.HasPrefix(path, FilePathPrefix) {
|
2019-07-25 16:58:19 +02:00
|
|
|
// Not a file:// path. Keep this untouched and pass directly to gocloud.
|
2019-05-03 01:52:00 +02:00
|
|
|
return path, nil
|
|
|
|
}
|
|
|
|
|
2019-07-25 16:58:19 +02:00
|
|
|
// Strip off the "file://" portion so we can examine and determine what to do with the rest.
|
2019-05-20 20:46:00 +02:00
|
|
|
path = strings.TrimPrefix(path, FilePathPrefix)
|
2019-05-03 01:52:00 +02:00
|
|
|
|
|
|
|
// We need to specially handle ~. The shell doesn't take care of this for us, and later
|
|
|
|
// functions we run into can't handle this either.
|
|
|
|
//
|
|
|
|
// From https://stackoverflow.com/questions/17609732/expand-tilde-to-home-directory
|
|
|
|
if strings.HasPrefix(path, "~") {
|
|
|
|
usr, err := user.Current()
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "Could not determine current user to resolve `file://~` path.")
|
|
|
|
}
|
|
|
|
|
|
|
|
if path == "~" {
|
|
|
|
path = usr.HomeDir
|
|
|
|
} else {
|
|
|
|
path = filepath.Join(usr.HomeDir, path[2:])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// For file:// backend, ensure a relative path is resolved. fileblob only supports absolute paths.
|
|
|
|
path, err := filepath.Abs(path)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "An IO error occurred during the current operation")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Using example from https://godoc.org/gocloud.dev/blob/fileblob#example-package--OpenBucket
|
2019-07-25 16:58:19 +02:00
|
|
|
// On Windows, convert "\" to "/" and add a leading "/". (See https://gocloud.dev/howto/blob/#local)
|
2019-05-03 01:52:00 +02:00
|
|
|
path = filepath.ToSlash(path)
|
|
|
|
if os.PathSeparator != '/' && !strings.HasPrefix(path, "/") {
|
|
|
|
path = "/" + path
|
|
|
|
}
|
|
|
|
|
2019-05-20 20:46:00 +02:00
|
|
|
return FilePathPrefix + path, nil
|
2019-05-03 01:52:00 +02:00
|
|
|
}
|
|
|
|
|
2019-04-19 02:43:23 +02:00
|
|
|
func Login(d diag.Sink, url string) (Backend, error) {
|
|
|
|
be, err := New(d, url)
|
2018-09-04 19:44:25 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-10-16 00:37:57 +02:00
|
|
|
return be, workspace.StoreAccount(be.URL(), workspace.Account{}, true)
|
2018-04-05 00:31:01 +02:00
|
|
|
}
|
|
|
|
|
2018-09-04 19:44:25 +02:00
|
|
|
func (b *localBackend) local() {}
|
|
|
|
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
func (b *localBackend) Name() string {
|
2018-04-14 07:26:01 +02:00
|
|
|
name, err := os.Hostname()
|
|
|
|
contract.IgnoreError(err)
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
if name == "" {
|
|
|
|
name = "local"
|
|
|
|
}
|
|
|
|
return name
|
|
|
|
}
|
|
|
|
|
2018-09-04 19:44:25 +02:00
|
|
|
func (b *localBackend) URL() string {
|
2019-07-25 16:58:19 +02:00
|
|
|
return b.originalURL
|
2018-09-04 19:44:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *localBackend) StateDir() string {
|
2019-04-25 05:55:39 +02:00
|
|
|
return workspace.BookkeepingDir
|
2018-09-04 19:44:25 +02:00
|
|
|
}
|
|
|
|
|
2019-06-24 06:39:22 +02:00
|
|
|
func (b *localBackend) GetPolicyPack(ctx context.Context, policyPack string,
|
|
|
|
d diag.Sink) (backend.PolicyPack, error) {
|
|
|
|
|
|
|
|
return nil, fmt.Errorf("File state backend does not support resource policy")
|
|
|
|
}
|
|
|
|
|
2020-01-16 21:04:51 +01:00
|
|
|
func (b *localBackend) ListPolicyGroups(ctx context.Context, orgName string) (apitype.ListPolicyGroupsResponse, error) {
|
|
|
|
return apitype.ListPolicyGroupsResponse{}, fmt.Errorf("File state backend does not support resource policy")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *localBackend) ListPolicyPacks(ctx context.Context, orgName string) (apitype.ListPolicyPacksResponse, error) {
|
|
|
|
return apitype.ListPolicyPacksResponse{}, fmt.Errorf("File state backend does not support resource policy")
|
|
|
|
}
|
|
|
|
|
2019-08-12 12:49:22 +02:00
|
|
|
// SupportsOrganizations tells whether a user can belong to multiple organizations in this backend.
|
|
|
|
func (b *localBackend) SupportsOrganizations() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-04-20 08:16:07 +02:00
|
|
|
func (b *localBackend) ParseStackReference(stackRefName string) (backend.StackReference, error) {
|
2018-04-18 01:37:52 +02:00
|
|
|
return localBackendReference{name: tokens.QName(stackRefName)}, nil
|
|
|
|
}
|
|
|
|
|
2019-12-30 19:24:48 +01:00
|
|
|
// ValidateStackName verifies the stack name is valid for the local backend. We use the same rules as the
|
|
|
|
// httpstate backend.
|
|
|
|
func (b *localBackend) ValidateStackName(stackName string) error {
|
|
|
|
if strings.Contains(stackName, "/") {
|
|
|
|
return errors.New("stack names may not contain slashes")
|
|
|
|
}
|
|
|
|
|
|
|
|
validNameRegex := regexp.MustCompile("^[A-Za-z0-9_.-]{1,100}$")
|
|
|
|
if !validNameRegex.MatchString(stackName) {
|
|
|
|
return errors.New("stack names may only contain alphanumeric, hyphens, underscores, or periods")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-12 11:12:17 +02:00
|
|
|
func (b *localBackend) DoesProjectExist(ctx context.Context, projectName string) (bool, error) {
|
|
|
|
// Local backends don't really have multiple projects, so just return false here.
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2018-05-08 03:23:03 +02:00
|
|
|
func (b *localBackend) CreateStack(ctx context.Context, stackRef backend.StackReference,
|
|
|
|
opts interface{}) (backend.Stack, error) {
|
|
|
|
|
Make some updates based on CR feedback
This change implements some feedback from @ellismg.
* Make backend.Stack an interface and let backends implement it,
enabling dynamic type testing/casting to access information
specific to that backend. For instance, the cloud.Stack conveys
the cloud URL, org name, and PPC name, for each stack.
* Similarly expose specialized backend.Backend interfaces,
local.Backend and cloud.Backend, to convey specific information.
* Redo a bunch of the commands in terms of these.
* Keeping with this theme, turn the CreateStack options into an
opaque interface{}, and let the specific backends expose their
own structures with their own settings (like PPC name in cloud).
* Show both the org and PPC names in the cloud column printed in
the stack ls command, in addition to the Pulumi Cloud URL.
Unrelated, but useful:
* Special case the 401 HTTP response and make a friendly error,
to tell the developer they must use `pulumi login`. This is
better than tossing raw "401: Unauthorized" errors in their face.
* Change the "Updating stack '..' in the Pulumi Cloud" message to
use the correct action verb ("Previewing", "Destroying", etc).
2017-12-03 16:51:18 +01:00
|
|
|
contract.Requiref(opts == nil, "opts", "local stacks do not support any options")
|
2017-11-01 22:55:16 +01:00
|
|
|
|
2018-09-05 16:20:25 +02:00
|
|
|
stackName := stackRef.Name()
|
Make some stack-related CLI improvements (#947)
This change includes a handful of stack-related CLI formatting
improvements that I've been noodling on in the background for a while,
based on things that tend to trip up demos and the inner loop workflow.
This includes:
* If `pulumi stack select` is run by itself, use an interactive
CLI menu to let the user select an existing stack, or choose to
create a new one. This looks as follows
$ pulumi stack select
Please choose a stack, or choose to create a new one:
abcdef
babblabblabble
> currentlyselected
defcon
<create a new stack>
and is navigated in the usual way (key up, down, enter).
* If a stack name is passed that does not exist, prompt the user
to ask whether s/he wants to create one on-demand. This hooks
interesting moments in time, like `pulumi stack select foo`,
and cuts down on the need to run additional commands.
* If a current stack is required, but none is currently selected,
then pop the same interactive menu shown above to select one.
Depending on the command being run, we may or may not show the
option to create a new stack (e.g., that doesn't make much sense
when you're running `pulumi destroy`, but might when you're
running `pulumi stack`). This again lets you do with a single
command what would have otherwise entailed an error with multiple
commands to recover from it.
* If you run `pulumi stack init` without any additional arguments,
we interactively prompt for the stack name. Before, we would
error and you'd then need to run `pulumi stack init <name>`.
* Colorize some things nicely; for example, now all prompts will
by default become bright white.
2018-02-17 00:03:54 +01:00
|
|
|
if stackName == "" {
|
|
|
|
return nil, errors.New("invalid empty stack name")
|
|
|
|
}
|
|
|
|
|
2019-04-18 23:01:27 +02:00
|
|
|
if _, _, err := b.getStack(stackName); err == nil {
|
2018-05-08 00:31:27 +02:00
|
|
|
return nil, &backend.StackAlreadyExistsError{StackName: string(stackName)}
|
Make some stack-related CLI improvements (#947)
This change includes a handful of stack-related CLI formatting
improvements that I've been noodling on in the background for a while,
based on things that tend to trip up demos and the inner loop workflow.
This includes:
* If `pulumi stack select` is run by itself, use an interactive
CLI menu to let the user select an existing stack, or choose to
create a new one. This looks as follows
$ pulumi stack select
Please choose a stack, or choose to create a new one:
abcdef
babblabblabble
> currentlyselected
defcon
<create a new stack>
and is navigated in the usual way (key up, down, enter).
* If a stack name is passed that does not exist, prompt the user
to ask whether s/he wants to create one on-demand. This hooks
interesting moments in time, like `pulumi stack select foo`,
and cuts down on the need to run additional commands.
* If a current stack is required, but none is currently selected,
then pop the same interactive menu shown above to select one.
Depending on the command being run, we may or may not show the
option to create a new stack (e.g., that doesn't make much sense
when you're running `pulumi destroy`, but might when you're
running `pulumi stack`). This again lets you do with a single
command what would have otherwise entailed an error with multiple
commands to recover from it.
* If you run `pulumi stack init` without any additional arguments,
we interactively prompt for the stack name. Before, we would
error and you'd then need to run `pulumi stack init <name>`.
* Colorize some things nicely; for example, now all prompts will
by default become bright white.
2018-02-17 00:03:54 +01:00
|
|
|
}
|
|
|
|
|
2019-01-04 22:23:47 +01:00
|
|
|
tags, err := backend.GetEnvironmentTagsForCurrentStack()
|
2018-04-11 19:08:32 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "getting stack tags")
|
|
|
|
}
|
2019-04-26 20:43:16 +02:00
|
|
|
if err = validation.ValidateStackProperties(string(stackName), tags); err != nil {
|
2018-04-11 19:08:32 +02:00
|
|
|
return nil, errors.Wrap(err, "validating stack properties")
|
|
|
|
}
|
|
|
|
|
2019-04-19 21:13:30 +02:00
|
|
|
file, err := b.saveStack(stackName, nil, nil)
|
Make some stack-related CLI improvements (#947)
This change includes a handful of stack-related CLI formatting
improvements that I've been noodling on in the background for a while,
based on things that tend to trip up demos and the inner loop workflow.
This includes:
* If `pulumi stack select` is run by itself, use an interactive
CLI menu to let the user select an existing stack, or choose to
create a new one. This looks as follows
$ pulumi stack select
Please choose a stack, or choose to create a new one:
abcdef
babblabblabble
> currentlyselected
defcon
<create a new stack>
and is navigated in the usual way (key up, down, enter).
* If a stack name is passed that does not exist, prompt the user
to ask whether s/he wants to create one on-demand. This hooks
interesting moments in time, like `pulumi stack select foo`,
and cuts down on the need to run additional commands.
* If a current stack is required, but none is currently selected,
then pop the same interactive menu shown above to select one.
Depending on the command being run, we may or may not show the
option to create a new stack (e.g., that doesn't make much sense
when you're running `pulumi destroy`, but might when you're
running `pulumi stack`). This again lets you do with a single
command what would have otherwise entailed an error with multiple
commands to recover from it.
* If you run `pulumi stack init` without any additional arguments,
we interactively prompt for the stack name. Before, we would
error and you'd then need to run `pulumi stack init <name>`.
* Colorize some things nicely; for example, now all prompts will
by default become bright white.
2018-02-17 00:03:54 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
|
|
|
|
2019-04-18 23:01:27 +02:00
|
|
|
stack := newStack(stackRef, file, nil, b)
|
Make a smattering of CLI UX improvements
Since I was digging around over the weekend after the change to move
away from light black, and the impact it had on less important
information showing more prominently than it used to, I took a step
back and did a deeper tidying up of things. Another side goal of this
exercise was to be a little more respectful of terminal width; when
we could say things with fewer words, I did so.
* Stylize the preview/update summary differently, so that it stands
out as a section. Also highlight the total changes with bold -- it
turns out this has a similar effect to the bright white colorization,
just without the negative effects on e.g. white terminals.
* Eliminate some verbosity in the phrasing of change summaries.
* Make all heading sections stylized consistently. This includes
the color (bright magenta) and the vertical spacing (always a newline
separating headings). We were previously inconsistent on this (e.g.,
outputs were under "---outputs---"). Now the headings are:
Previewing (etc), Diagnostics, Outputs, Resources, Duration, and Permalink.
* Fix an issue where we'd parent things to "global" until the stack
object later showed up. Now we'll simply mock up a stack resource.
* Don't show messages like "no change" or "unchanged". Prior to the
light black removal, these faded into the background of the terminal.
Now they just clutter up the display. Similar to the elision of "*"
for OpSames in a prior commit, just leave these out. Now anything
that's written is actually a meaningful status for the user to note.
* Don't show the "3 info messages," etc. summaries in the Info column
while an update is ongoing. Instead, just show the latest line. This
is more respectful of width -- I often find that the important
messages scroll off the right of my screen before this change.
For discussion:
- I actually wonder if we should eliminate the summary
altogether and always just show the latest line. Or even
blank it out. The summary feels better suited for the
Diagnostics section, and the Status concisely tells us
how a resource's update ended up (failed, succeeded, etc).
- Similarly, I question the idea of showing only the "worst"
message. I'd vote for always showing the latest, and again
leaving it to the Status column for concisely telling the
user about the final state a resource ended up in.
* Stop prepending "info: " to every stdout/stderr message. It adds
no value, clutters up the display, and worsens horizontal usage.
* Lessen the verbosity of update headline messages, so we now instead
of e.g. "Previewing update of stack 'x':", we just say
"Previewing update (x):".
* Eliminate vertical whitespace in the Diagnostics section. Every
independent console.out previously was separated by an entire newline,
which made the section look cluttered to my eyes. These are just
streams of logs, there's no reason for the extra newlines.
* Colorize the resource headers in the Diagnostic section light blue.
Note that this will change various test baselines, which I will
update next. I didn't want those in the same commit.
2018-09-24 17:31:19 +02:00
|
|
|
fmt.Printf("Created stack '%s'\n", stack.Ref())
|
2018-05-08 00:31:27 +02:00
|
|
|
|
|
|
|
return stack, nil
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
|
|
|
|
2018-05-08 03:23:03 +02:00
|
|
|
func (b *localBackend) GetStack(ctx context.Context, stackRef backend.StackReference) (backend.Stack, error) {
|
2018-09-05 16:20:25 +02:00
|
|
|
stackName := stackRef.Name()
|
2019-04-18 23:01:27 +02:00
|
|
|
snapshot, path, err := b.getStack(stackName)
|
2018-03-29 00:22:13 +02:00
|
|
|
switch {
|
2019-08-14 20:50:03 +02:00
|
|
|
case gcerrors.Code(errors.Cause(err)) == gcerrors.NotFound:
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
return nil, nil
|
2018-03-29 00:22:13 +02:00
|
|
|
case err != nil:
|
|
|
|
return nil, err
|
|
|
|
default:
|
2019-04-18 23:01:27 +02:00
|
|
|
return newStack(stackRef, path, snapshot, b), nil
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-14 05:54:42 +02:00
|
|
|
func (b *localBackend) ListStacks(
|
2019-08-22 22:56:43 +02:00
|
|
|
ctx context.Context, _ backend.ListStacksFilter) ([]backend.StackSummary, error) {
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-17 01:15:10 +02:00
|
|
|
stacks, err := b.getLocalStacks()
|
2017-11-01 21:34:19 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-08-22 22:56:43 +02:00
|
|
|
// Note that the provided stack filter is not honored, since fields like
|
|
|
|
// organizations and tags aren't persisted in the local backend.
|
2018-09-14 05:54:42 +02:00
|
|
|
var results []backend.StackSummary
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
for _, stackName := range stacks {
|
2018-05-08 03:23:03 +02:00
|
|
|
stack, err := b.GetStack(ctx, localBackendReference{name: stackName})
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
2018-09-14 05:54:42 +02:00
|
|
|
localStack, ok := stack.(*localStack)
|
|
|
|
contract.Assertf(ok, "localBackend GetStack returned non-localStack")
|
|
|
|
results = append(results, newLocalStackSummary(localStack))
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
|
|
|
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
return results, nil
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
func (b *localBackend) RemoveStack(ctx context.Context, stack backend.Stack, force bool) (bool, error) {
|
|
|
|
stackName := stack.Ref().Name()
|
2019-04-18 23:01:27 +02:00
|
|
|
snapshot, _, err := b.getStack(stackName)
|
2017-11-01 21:34:19 +01:00
|
|
|
if err != nil {
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
return false, err
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Don't remove stacks that still have resources.
|
|
|
|
if !force && snapshot != nil && len(snapshot.Resources) > 0 {
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 16:29:46 +01:00
|
|
|
return true, errors.New("refusing to remove stack because it still contains resources")
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
|
|
|
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-17 01:15:10 +02:00
|
|
|
return false, b.removeStack(stackName)
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
func (b *localBackend) RenameStack(ctx context.Context, stack backend.Stack, newName tokens.QName) error {
|
|
|
|
stackName := stack.Ref().Name()
|
2019-04-18 23:01:27 +02:00
|
|
|
snap, _, err := b.getStack(stackName)
|
2019-03-14 23:32:10 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the destination stack does not already exist.
|
2019-08-14 20:50:03 +02:00
|
|
|
hasExisting, err := b.bucket.Exists(ctx, b.stackPath(newName))
|
|
|
|
if err != nil {
|
2019-03-14 23:32:10 +01:00
|
|
|
return err
|
|
|
|
}
|
2019-08-14 20:50:03 +02:00
|
|
|
if hasExisting {
|
|
|
|
return errors.Errorf("a stack named %s already exists", newName)
|
|
|
|
}
|
2019-03-14 23:32:10 +01:00
|
|
|
|
2019-08-13 20:21:00 +02:00
|
|
|
// If we have a snapshot, we need to rename the URNs inside it to use the new stack name.
|
|
|
|
if snap != nil {
|
2019-09-27 00:23:09 +02:00
|
|
|
if err = edit.RenameStack(snap, newName, ""); err != nil {
|
2019-08-13 20:21:00 +02:00
|
|
|
return err
|
|
|
|
}
|
2019-03-14 23:32:10 +01:00
|
|
|
}
|
|
|
|
|
2019-10-03 18:13:13 +02:00
|
|
|
// Now save the snapshot with a new name (we pass nil to re-use the existing secrets manager from the snapshot).
|
2019-08-13 20:21:00 +02:00
|
|
|
if _, err = b.saveStack(newName, snap, nil); err != nil {
|
2019-03-14 23:32:10 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// To remove the old stack, just make a backup of the file and don't write out anything new.
|
|
|
|
file := b.stackPath(stackName)
|
2019-04-25 05:55:39 +02:00
|
|
|
backupTarget(b.bucket, file)
|
2019-03-14 23:32:10 +01:00
|
|
|
|
2019-08-14 20:50:03 +02:00
|
|
|
// And rename the histoy folder as well.
|
|
|
|
return b.renameHistory(stackName, newName)
|
2019-03-14 23:32:10 +01:00
|
|
|
}
|
|
|
|
|
2018-04-27 01:13:52 +02:00
|
|
|
func (b *localBackend) GetLatestConfiguration(ctx context.Context,
|
2019-10-14 23:30:42 +02:00
|
|
|
stack backend.Stack) (config.Map, error) {
|
2018-04-27 01:13:52 +02:00
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
hist, err := b.GetHistory(ctx, stack.Ref())
|
2018-04-27 01:13:52 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(hist) == 0 {
|
Initial support for passing URLs to `new` and `up` (#1727)
* Initial support for passing URLs to `new` and `up`
This PR adds initial support for `pulumi new` using Git under the covers
to manage Pulumi templates, providing the same experience as before.
You can now also optionally pass a URL to a Git repository, e.g.
`pulumi new [<url>]`, including subdirectories within the repository,
and arbitrary branches, tags, or commits.
The following commands result in the same behavior from the user's
perspective:
- `pulumi new javascript`
- `pulumi new https://github.com/pulumi/templates/templates/javascript`
- `pulumi new https://github.com/pulumi/templates/tree/master/templates/javascript`
- `pulumi new https://github.com/pulumi/templates/tree/HEAD/templates/javascript`
To specify an arbitrary branch, tag, or commit:
- `pulumi new https://github.com/pulumi/templates/tree/<branch>/templates/javascript`
- `pulumi new https://github.com/pulumi/templates/tree/<tag>/templates/javascript`
- `pulumi new https://github.com/pulumi/templates/tree/<commit>/templates/javascript`
Branches and tags can include '/' separators, and `pulumi` will still
find the right subdirectory.
URLs to Gists are also supported, e.g.:
`pulumi new https://gist.github.com/justinvp/6673959ceb9d2ac5a14c6d536cb871a6`
If the specified subdirectory in the repository does not contain a
`Pulumi.yaml`, it will look for subdirectories within containing
`Pulumi.yaml` files, and prompt the user to choose a template, along the
lines of how `pulumi new` behaves when no template is specified.
The following commands result in the CLI prompting to choose a template:
- `pulumi new`
- `pulumi new https://github.com/pulumi/templates/templates`
- `pulumi new https://github.com/pulumi/templates/tree/master/templates`
- `pulumi new https://github.com/pulumi/templates/tree/HEAD/templates`
Of course, arbitrary branches, tags, or commits can be specified as well:
- `pulumi new https://github.com/pulumi/templates/tree/<branch>/templates`
- `pulumi new https://github.com/pulumi/templates/tree/<tag>/templates`
- `pulumi new https://github.com/pulumi/templates/tree/<commit>/templates`
This PR also includes initial support for passing URLs to `pulumi up`,
providing a streamlined way to deploy installable cloud applications
with Pulumi, without having to manage source code locally before doing
a deployment.
For example, `pulumi up https://github.com/justinvp/aws` can be used to
deploy a sample AWS app. The stack can be updated with different
versions, e.g.
`pulumi up https://github.com/justinvp/aws/tree/v2 -s <stack-to-update>`
Config values can optionally be passed via command line flags, e.g.
`pulumi up https://github.com/justinvp/aws -c aws:region=us-west-2 -c foo:bar=blah`
Gists can also be used, e.g.
`pulumi up https://gist.github.com/justinvp/62fde0463f243fcb49f5a7222e51bc76`
* Fix panic when hitting ^C from "choose template" prompt
* Add description to templates
When running `pulumi new` without specifying a template, include the template description along with the name in the "choose template" display.
```
$ pulumi new
Please choose a template:
aws-go A minimal AWS Go program
aws-javascript A minimal AWS JavaScript program
aws-python A minimal AWS Python program
aws-typescript A minimal AWS TypeScript program
> go A minimal Go program
hello-aws-javascript A simple AWS serverless JavaScript program
javascript A minimal JavaScript program
python A minimal Python program
typescript A minimal TypeScript program
```
* React to changes to the pulumi/templates repo.
We restructured the `pulumi/templates` repo to have all the templates in the root instead of in a `templates` subdirectory, so make the change here to no longer look for templates in `templates`.
This also fixes an issue around using `Depth: 1` that I found while testing this. When a named template is used, we attempt to clone or pull from the `pulumi/templates` repo to `~/.pulumi/templates`. Having it go in this well-known directory allows us to maintain previous behavior around allowing offline use of templates. If we use `Depth: 1` for the initial clone, it will fail when attempting to pull when there are updates to the remote repository. Unfortunately, there's no built-in `--unshallow` support in `go-git` and setting a larger `Depth` doesn't appear to help. There may be a workaround, but for now, if we're cloning the pulumi templates directory to `~/.pulumi/templates`, we won't use `Depth: 1`. For template URLs, we will continue to use `Depth: 1` as we clone those to a temp directory (which gets deleted) that we'll never try to update.
* List available templates in help text
* Address PR Feedback
* Don't show "Installing dependencies" message for `up`
* Fix secrets handling
When prompting for config, if the existing stack value is a secret, keep it a secret and mask the prompt. If the template says it should be secret, make it a secret.
* Fix ${PROJECT} and ${DESCRIPTION} handling for `up`
Templates used with `up` should already have a filled-in project name and description, but if it's a `new`-style template, that has `${PROJECT}` and/or `${DESCRIPTION}`, be helpful and just replace these with better values.
* Fix stack handling
Add a bool `setCurrent` param to `requireStack` to control whether the current stack should be saved in workspace settings. For the `up <url>` case, we don't want to save. Also, split the `up` code into two separate functions: one for the `up <url>` case and another for the normal `up` case where you have workspace in your current directory. While we may be able to combine them back into a single function, right now it's a bit cleaner being separate, even with some small amount of duplication.
* Fix panic due to nil crypter
Lazily get the crypter only if needed inside `promptForConfig`.
* Embellish comment
* Harden isPreconfiguredEmptyStack check
Fix the code to check to make sure the URL specified on the command line matches the URL stored in the `pulumi:template` config value, and that the rest of the config from the stack satisfies the config requirements of the template.
2018-08-11 03:08:16 +02:00
|
|
|
return nil, backend.ErrNoPreviousDeployment
|
2018-04-27 01:13:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return hist[0].Config, nil
|
|
|
|
}
|
|
|
|
|
2019-06-24 06:39:22 +02:00
|
|
|
func (b *localBackend) PackPolicies(
|
|
|
|
ctx context.Context, policyPackRef backend.PolicyPackReference,
|
|
|
|
cancellationScopes backend.CancellationScopeSource,
|
|
|
|
callerEventsOpt chan<- engine.Event) result.Result {
|
|
|
|
|
|
|
|
return result.Error("File state backend does not support resource policy")
|
|
|
|
}
|
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
func (b *localBackend) Preview(ctx context.Context, stack backend.Stack,
|
2019-03-20 00:21:50 +01:00
|
|
|
op backend.UpdateOperation) (engine.ResourceChanges, result.Result) {
|
2018-09-05 16:20:25 +02:00
|
|
|
// We can skip PreviewThenPromptThenExecute and just go straight to Execute.
|
2018-09-21 22:57:57 +02:00
|
|
|
opts := backend.ApplierOptions{
|
|
|
|
DryRun: true,
|
|
|
|
ShowLink: true,
|
|
|
|
}
|
|
|
|
return b.apply(ctx, apitype.PreviewUpdate, stack, op, opts, nil /*events*/)
|
2018-09-05 16:20:25 +02:00
|
|
|
}
|
2018-05-08 03:23:03 +02:00
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
func (b *localBackend) Update(ctx context.Context, stack backend.Stack,
|
2019-03-20 00:21:50 +01:00
|
|
|
op backend.UpdateOperation) (engine.ResourceChanges, result.Result) {
|
2018-09-05 16:20:25 +02:00
|
|
|
return backend.PreviewThenPromptThenExecute(ctx, apitype.UpdateUpdate, stack, op, b.apply)
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
func (b *localBackend) Refresh(ctx context.Context, stack backend.Stack,
|
2019-03-20 00:21:50 +01:00
|
|
|
op backend.UpdateOperation) (engine.ResourceChanges, result.Result) {
|
2018-09-05 16:20:25 +02:00
|
|
|
return backend.PreviewThenPromptThenExecute(ctx, apitype.RefreshUpdate, stack, op, b.apply)
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
func (b *localBackend) Destroy(ctx context.Context, stack backend.Stack,
|
2019-03-20 00:21:50 +01:00
|
|
|
op backend.UpdateOperation) (engine.ResourceChanges, result.Result) {
|
2018-09-05 16:20:25 +02:00
|
|
|
return backend.PreviewThenPromptThenExecute(ctx, apitype.DestroyUpdate, stack, op, b.apply)
|
2018-03-08 22:56:59 +01:00
|
|
|
}
|
2018-01-08 22:01:40 +01:00
|
|
|
|
2019-08-12 09:22:42 +02:00
|
|
|
func (b *localBackend) Query(ctx context.Context, op backend.QueryOperation) result.Result {
|
|
|
|
|
|
|
|
return b.query(ctx, op, nil /*events*/)
|
2019-04-30 20:48:41 +02:00
|
|
|
}
|
|
|
|
|
2019-11-06 21:56:29 +01:00
|
|
|
func (b *localBackend) Watch(ctx context.Context, stack backend.Stack,
|
|
|
|
op backend.UpdateOperation) result.Result {
|
|
|
|
return backend.Watch(ctx, b, stack, op, b.apply)
|
|
|
|
}
|
|
|
|
|
2018-09-05 16:20:25 +02:00
|
|
|
// apply actually performs the provided type of update on a locally hosted stack.
|
2019-03-20 00:21:50 +01:00
|
|
|
func (b *localBackend) apply(
|
|
|
|
ctx context.Context, kind apitype.UpdateKind, stack backend.Stack,
|
|
|
|
op backend.UpdateOperation, opts backend.ApplierOptions,
|
|
|
|
events chan<- engine.Event) (engine.ResourceChanges, result.Result) {
|
|
|
|
|
2018-09-05 16:20:25 +02:00
|
|
|
stackRef := stack.Ref()
|
|
|
|
stackName := stackRef.Name()
|
2018-09-21 22:57:57 +02:00
|
|
|
actionLabel := backend.ActionLabel(kind, opts.DryRun)
|
2019-04-26 02:32:31 +02:00
|
|
|
|
2019-11-06 21:56:29 +01:00
|
|
|
if !(op.Opts.Display.JSONDisplay || op.Opts.Display.Type == display.DisplayWatch) {
|
2019-04-26 02:32:31 +02:00
|
|
|
// Print a banner so it's clear this is a local deployment.
|
|
|
|
fmt.Printf(op.Opts.Display.Color.Colorize(
|
|
|
|
colors.SpecHeadline+"%s (%s):"+colors.Reset+"\n"), actionLabel, stackRef)
|
|
|
|
}
|
2018-05-16 02:14:53 +02:00
|
|
|
|
2018-09-05 16:29:23 +02:00
|
|
|
// Start the update.
|
2019-04-19 00:57:54 +02:00
|
|
|
update, err := b.newUpdate(stackName, op)
|
2017-11-01 21:34:19 +01:00
|
|
|
if err != nil {
|
2019-03-20 00:21:50 +01:00
|
|
|
return nil, result.FromError(err)
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 16:20:25 +02:00
|
|
|
// Spawn a display loop to show events on the CLI.
|
|
|
|
displayEvents := make(chan engine.Event)
|
|
|
|
displayDone := make(chan bool)
|
Make a smattering of CLI UX improvements
Since I was digging around over the weekend after the change to move
away from light black, and the impact it had on less important
information showing more prominently than it used to, I took a step
back and did a deeper tidying up of things. Another side goal of this
exercise was to be a little more respectful of terminal width; when
we could say things with fewer words, I did so.
* Stylize the preview/update summary differently, so that it stands
out as a section. Also highlight the total changes with bold -- it
turns out this has a similar effect to the bright white colorization,
just without the negative effects on e.g. white terminals.
* Eliminate some verbosity in the phrasing of change summaries.
* Make all heading sections stylized consistently. This includes
the color (bright magenta) and the vertical spacing (always a newline
separating headings). We were previously inconsistent on this (e.g.,
outputs were under "---outputs---"). Now the headings are:
Previewing (etc), Diagnostics, Outputs, Resources, Duration, and Permalink.
* Fix an issue where we'd parent things to "global" until the stack
object later showed up. Now we'll simply mock up a stack resource.
* Don't show messages like "no change" or "unchanged". Prior to the
light black removal, these faded into the background of the terminal.
Now they just clutter up the display. Similar to the elision of "*"
for OpSames in a prior commit, just leave these out. Now anything
that's written is actually a meaningful status for the user to note.
* Don't show the "3 info messages," etc. summaries in the Info column
while an update is ongoing. Instead, just show the latest line. This
is more respectful of width -- I often find that the important
messages scroll off the right of my screen before this change.
For discussion:
- I actually wonder if we should eliminate the summary
altogether and always just show the latest line. Or even
blank it out. The summary feels better suited for the
Diagnostics section, and the Status concisely tells us
how a resource's update ended up (failed, succeeded, etc).
- Similarly, I question the idea of showing only the "worst"
message. I'd vote for always showing the latest, and again
leaving it to the Status column for concisely telling the
user about the final state a resource ended up in.
* Stop prepending "info: " to every stdout/stderr message. It adds
no value, clutters up the display, and worsens horizontal usage.
* Lessen the verbosity of update headline messages, so we now instead
of e.g. "Previewing update of stack 'x':", we just say
"Previewing update (x):".
* Eliminate vertical whitespace in the Diagnostics section. Every
independent console.out previously was separated by an entire newline,
which made the section look cluttered to my eyes. These are just
streams of logs, there's no reason for the extra newlines.
* Colorize the resource headers in the Diagnostic section light blue.
Note that this will change various test baselines, which I will
update next. I didn't want those in the same commit.
2018-09-24 17:31:19 +02:00
|
|
|
go display.ShowEvents(
|
2018-10-30 23:42:33 +01:00
|
|
|
strings.ToLower(actionLabel), kind, stackName, op.Proj.Name,
|
|
|
|
displayEvents, displayDone, op.Opts.Display, opts.DryRun)
|
2018-09-05 16:20:25 +02:00
|
|
|
|
|
|
|
// Create a separate event channel for engine events that we'll pipe to both listening streams.
|
|
|
|
engineEvents := make(chan engine.Event)
|
2018-04-20 03:59:14 +02:00
|
|
|
|
2018-09-21 22:57:57 +02:00
|
|
|
scope := op.Scopes.NewScope(engineEvents, opts.DryRun)
|
2018-09-05 16:20:25 +02:00
|
|
|
eventsDone := make(chan bool)
|
|
|
|
go func() {
|
|
|
|
// Pull in all events from the engine and send them to the two listeners.
|
|
|
|
for e := range engineEvents {
|
|
|
|
displayEvents <- e
|
|
|
|
|
|
|
|
// If the caller also wants to see the events, stream them there also.
|
|
|
|
if events != nil {
|
|
|
|
events <- e
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
close(eventsDone)
|
|
|
|
}()
|
2017-11-01 21:34:19 +01:00
|
|
|
|
Revise the way previews are controlled
I found the flag --force to be a strange name for skipping a preview,
since that name is usually reserved for operations that might be harmful
and yet you're coercing a tool to do it anyway, knowing there's a chance
you're going to shoot yourself in the foot.
I also found that what I almost always want in the situation where
--force was being used is to actually just run a preview and have the
confirmation auto-accepted. Going straight to --force isn't the right
thing in a CI scenario, where you actually want to run a preview first,
just to ensure there aren't any issues, before doing the update.
In a sense, there are four options here:
1. Run a preview, ask for confirmation, then do an update (the default).
2. Run a preview, auto-accept, and then do an update (the CI scenario).
3. Just run a preview with neither a confirmation nor an update (dry run).
4. Just do an update, without performing a preview beforehand (rare).
This change enables all four workflows in our CLI.
Rather than have an explosion of flags, we have a single flag,
--preview, which can specify the mode that we're operating in. The
following are the values which correlate to the above four modes:
1. "": default (no --preview specified)
2. "auto": auto-accept preview confirmation
3. "only": only run a preview, don't confirm or update
4. "skip": skip the preview altogether
As part of this change, I redid a bit of how the preview modes
were specified. Rather than booleans, which had some illegal
combinations, this change introduces a new enum type. Furthermore,
because the engine is wholly ignorant of these flags -- and only the
backend understands them -- it was confusing to me that
engine.UpdateOptions stored this flag, especially given that all
interesting engine options _also_ accepted a dryRun boolean. As of
this change, the backend.PreviewBehavior controls the preview options.
2018-04-28 23:50:17 +02:00
|
|
|
// Create the management machinery.
|
2019-04-25 00:42:39 +02:00
|
|
|
persister := b.newSnapshotPersister(stackName, op.SecretsManager)
|
2018-05-18 20:15:35 +02:00
|
|
|
manager := backend.NewSnapshotManager(persister, update.GetTarget().Snapshot)
|
2018-11-14 22:33:35 +01:00
|
|
|
engineCtx := &engine.Context{
|
|
|
|
Cancel: scope.Context(),
|
|
|
|
Events: engineEvents,
|
|
|
|
SnapshotManager: manager,
|
|
|
|
BackendClient: backend.NewBackendClient(b),
|
|
|
|
}
|
Revise the way previews are controlled
I found the flag --force to be a strange name for skipping a preview,
since that name is usually reserved for operations that might be harmful
and yet you're coercing a tool to do it anyway, knowing there's a chance
you're going to shoot yourself in the foot.
I also found that what I almost always want in the situation where
--force was being used is to actually just run a preview and have the
confirmation auto-accepted. Going straight to --force isn't the right
thing in a CI scenario, where you actually want to run a preview first,
just to ensure there aren't any issues, before doing the update.
In a sense, there are four options here:
1. Run a preview, ask for confirmation, then do an update (the default).
2. Run a preview, auto-accept, and then do an update (the CI scenario).
3. Just run a preview with neither a confirmation nor an update (dry run).
4. Just do an update, without performing a preview beforehand (rare).
This change enables all four workflows in our CLI.
Rather than have an explosion of flags, we have a single flag,
--preview, which can specify the mode that we're operating in. The
following are the values which correlate to the above four modes:
1. "": default (no --preview specified)
2. "auto": auto-accept preview confirmation
3. "only": only run a preview, don't confirm or update
4. "skip": skip the preview altogether
As part of this change, I redid a bit of how the preview modes
were specified. Rather than booleans, which had some illegal
combinations, this change introduces a new enum type. Furthermore,
because the engine is wholly ignorant of these flags -- and only the
backend understands them -- it was confusing to me that
engine.UpdateOptions stored this flag, especially given that all
interesting engine options _also_ accepted a dryRun boolean. As of
this change, the backend.PreviewBehavior controls the preview options.
2018-04-28 23:50:17 +02:00
|
|
|
|
2018-03-08 22:56:59 +01:00
|
|
|
// Perform the update
|
2018-01-25 03:22:41 +01:00
|
|
|
start := time.Now().Unix()
|
2018-09-05 16:20:25 +02:00
|
|
|
var changes engine.ResourceChanges
|
2019-03-20 00:21:50 +01:00
|
|
|
var updateRes result.Result
|
2018-09-05 16:20:25 +02:00
|
|
|
switch kind {
|
|
|
|
case apitype.PreviewUpdate:
|
2019-03-20 00:21:50 +01:00
|
|
|
changes, updateRes = engine.Update(update, engineCtx, op.Opts.Engine, true)
|
2018-09-05 16:20:25 +02:00
|
|
|
case apitype.UpdateUpdate:
|
2019-03-20 00:21:50 +01:00
|
|
|
changes, updateRes = engine.Update(update, engineCtx, op.Opts.Engine, opts.DryRun)
|
2018-09-05 16:20:25 +02:00
|
|
|
case apitype.RefreshUpdate:
|
2019-03-20 00:21:50 +01:00
|
|
|
changes, updateRes = engine.Refresh(update, engineCtx, op.Opts.Engine, opts.DryRun)
|
2018-09-05 16:20:25 +02:00
|
|
|
case apitype.DestroyUpdate:
|
2019-03-20 00:21:50 +01:00
|
|
|
changes, updateRes = engine.Destroy(update, engineCtx, op.Opts.Engine, opts.DryRun)
|
2018-09-05 16:20:25 +02:00
|
|
|
default:
|
|
|
|
contract.Failf("Unrecognized update kind: %s", kind)
|
|
|
|
}
|
2018-01-25 03:22:41 +01:00
|
|
|
end := time.Now().Unix()
|
2017-11-01 21:34:19 +01:00
|
|
|
|
2018-09-05 16:20:25 +02:00
|
|
|
// Wait for the display to finish showing all the events.
|
|
|
|
<-displayDone
|
2018-09-11 01:42:22 +02:00
|
|
|
scope.Close() // Don't take any cancellations anymore, we're shutting down.
|
2018-09-05 16:20:25 +02:00
|
|
|
close(engineEvents)
|
2018-04-23 23:12:13 +02:00
|
|
|
contract.IgnoreClose(manager)
|
2017-11-01 21:34:19 +01:00
|
|
|
|
2018-09-05 16:20:25 +02:00
|
|
|
// Make sure the goroutine writing to displayEvents and events has exited before proceeding.
|
|
|
|
<-eventsDone
|
2018-10-12 19:29:47 +02:00
|
|
|
close(displayEvents)
|
2018-09-05 16:20:25 +02:00
|
|
|
|
2018-01-25 03:22:41 +01:00
|
|
|
// Save update results.
|
2019-03-13 22:00:01 +01:00
|
|
|
backendUpdateResult := backend.SucceededResult
|
2019-03-20 00:21:50 +01:00
|
|
|
if updateRes != nil {
|
2019-03-13 22:00:01 +01:00
|
|
|
backendUpdateResult = backend.FailedResult
|
2018-01-25 03:22:41 +01:00
|
|
|
}
|
|
|
|
info := backend.UpdateInfo{
|
2018-03-08 22:56:59 +01:00
|
|
|
Kind: kind,
|
|
|
|
StartTime: start,
|
2018-09-05 16:20:25 +02:00
|
|
|
Message: op.M.Message,
|
|
|
|
Environment: op.M.Environment,
|
2018-03-08 22:56:59 +01:00
|
|
|
Config: update.GetTarget().Config,
|
2019-03-13 22:00:01 +01:00
|
|
|
Result: backendUpdateResult,
|
2018-03-08 22:56:59 +01:00
|
|
|
EndTime: end,
|
2018-09-05 00:40:15 +02:00
|
|
|
// IDEA: it would be nice to populate the *Deployment, so that addToHistory below doesn't need to
|
2018-03-08 22:56:59 +01:00
|
|
|
// rudely assume it knows where the checkpoint file is on disk as it makes a copy of it. This isn't
|
|
|
|
// trivial to achieve today given the event driven nature of plan-walking, however.
|
2018-01-25 03:22:41 +01:00
|
|
|
ResourceChanges: changes,
|
|
|
|
}
|
2018-09-05 20:39:58 +02:00
|
|
|
|
2018-01-25 03:22:41 +01:00
|
|
|
var saveErr error
|
2018-02-21 06:05:57 +01:00
|
|
|
var backupErr error
|
2018-09-21 22:57:57 +02:00
|
|
|
if !opts.DryRun {
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-17 01:15:10 +02:00
|
|
|
saveErr = b.addToHistory(stackName, info)
|
|
|
|
backupErr = b.backupStack(stackName)
|
2018-01-25 03:22:41 +01:00
|
|
|
}
|
|
|
|
|
2019-03-20 00:21:50 +01:00
|
|
|
if updateRes != nil {
|
2018-02-21 06:05:57 +01:00
|
|
|
// We swallow saveErr and backupErr as they are less important than the updateErr.
|
2019-03-20 00:21:50 +01:00
|
|
|
return changes, updateRes
|
2018-01-25 03:22:41 +01:00
|
|
|
}
|
2018-09-05 20:39:58 +02:00
|
|
|
|
2018-02-21 06:05:57 +01:00
|
|
|
if saveErr != nil {
|
|
|
|
// We swallow backupErr as it is less important than the saveErr.
|
2019-03-20 00:21:50 +01:00
|
|
|
return changes, result.FromError(errors.Wrap(saveErr, "saving update info"))
|
2018-02-21 06:05:57 +01:00
|
|
|
}
|
2018-09-05 20:39:58 +02:00
|
|
|
|
|
|
|
if backupErr != nil {
|
2019-03-20 00:21:50 +01:00
|
|
|
return changes, result.FromError(errors.Wrap(backupErr, "saving backup"))
|
2018-09-05 20:39:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure to print a link to the stack's checkpoint before exiting.
|
2019-04-26 02:32:31 +02:00
|
|
|
if opts.ShowLink && !op.Opts.Display.JSONDisplay {
|
2019-04-25 05:55:39 +02:00
|
|
|
// Note we get a real signed link for aws/azure/gcp links. But no such option exists for
|
|
|
|
// file:// links so we manually create the link ourselves.
|
|
|
|
var link string
|
2019-05-20 20:46:00 +02:00
|
|
|
if strings.HasPrefix(b.url, FilePathPrefix) {
|
2019-04-25 05:55:39 +02:00
|
|
|
u, _ := url.Parse(b.url)
|
2019-05-20 20:46:00 +02:00
|
|
|
u.Path = filepath.ToSlash(path.Join(u.Path, b.stackPath(stackName)))
|
2019-04-25 05:55:39 +02:00
|
|
|
link = u.String()
|
|
|
|
} else {
|
|
|
|
link, err = b.bucket.SignedURL(context.TODO(), b.stackPath(stackName), nil)
|
|
|
|
if err != nil {
|
2019-12-16 18:47:31 +01:00
|
|
|
// we log a warning here rather then returning an error to avoid exiting
|
|
|
|
// pulumi with an error code.
|
|
|
|
// printing a statefile perma link happens after all the providers have finished
|
|
|
|
// deploying the infrastructure, failing the pulumi update because there was a
|
|
|
|
// problem printing a statefile perma link can be missleading in automated CI environments.
|
|
|
|
cmdutil.Diag().Warningf(diag.Message("", "Could not get signed url for stack location: %v"), err)
|
2019-04-25 05:55:39 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-26 02:32:31 +02:00
|
|
|
fmt.Printf(op.Opts.Display.Color.Colorize(
|
|
|
|
colors.SpecHeadline+"Permalink: "+
|
|
|
|
colors.Underline+colors.BrightBlue+"%s"+colors.Reset+"\n"), link)
|
2018-09-05 20:39:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return changes, nil
|
2018-01-25 03:22:41 +01:00
|
|
|
}
|
|
|
|
|
2019-04-30 20:48:41 +02:00
|
|
|
// query executes a query program against the resource outputs of a locally hosted stack.
|
2019-08-12 09:22:42 +02:00
|
|
|
func (b *localBackend) query(ctx context.Context, op backend.QueryOperation,
|
2019-10-24 00:15:04 +02:00
|
|
|
callerEventsOpt chan<- engine.Event) result.Result {
|
|
|
|
|
2019-10-29 23:19:44 +01:00
|
|
|
return backend.RunQuery(ctx, b, op, callerEventsOpt, b.newQuery)
|
2019-04-30 20:48:41 +02:00
|
|
|
}
|
|
|
|
|
2018-05-08 03:23:03 +02:00
|
|
|
func (b *localBackend) GetHistory(ctx context.Context, stackRef backend.StackReference) ([]backend.UpdateInfo, error) {
|
2018-09-05 16:20:25 +02:00
|
|
|
stackName := stackRef.Name()
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-17 01:15:10 +02:00
|
|
|
updates, err := b.getHistory(stackName)
|
2018-01-25 03:22:41 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return updates, nil
|
2017-11-01 21:34:19 +01:00
|
|
|
}
|
2017-11-01 22:55:16 +01:00
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
func (b *localBackend) GetLogs(ctx context.Context, stack backend.Stack, cfg backend.StackConfiguration,
|
2018-04-18 01:37:52 +02:00
|
|
|
query operations.LogQuery) ([]operations.LogEntry, error) {
|
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
stackName := stack.Ref().Name()
|
2019-04-19 00:57:54 +02:00
|
|
|
target, err := b.getTarget(stackName, cfg.Config, cfg.Decrypter)
|
2017-11-09 21:38:03 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-03-27 23:28:35 +02:00
|
|
|
return GetLogsForTarget(target, query)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetLogsForTarget fetches stack logs using the config, decrypter, and checkpoint in the given target.
|
|
|
|
func GetLogsForTarget(target *deploy.Target, query operations.LogQuery) ([]operations.LogEntry, error) {
|
2017-11-09 21:38:03 +01:00
|
|
|
contract.Assert(target != nil)
|
2018-01-08 22:01:40 +01:00
|
|
|
contract.Assert(target.Snapshot != nil)
|
2017-11-09 21:38:03 +01:00
|
|
|
|
2017-12-05 02:10:40 +01:00
|
|
|
config, err := target.Config.Decrypt(target.Decrypter)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-01-08 22:01:40 +01:00
|
|
|
components := operations.NewResourceTree(target.Snapshot.Resources)
|
2017-12-05 02:10:40 +01:00
|
|
|
ops := components.OperationsProvider(config)
|
2017-11-20 07:28:49 +01:00
|
|
|
logs, err := ops.GetLogs(query)
|
|
|
|
if logs == nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return *logs, err
|
2017-11-09 21:38:03 +01:00
|
|
|
}
|
|
|
|
|
2018-05-08 03:23:03 +02:00
|
|
|
func (b *localBackend) ExportDeployment(ctx context.Context,
|
2019-10-14 23:30:42 +02:00
|
|
|
stk backend.Stack) (*apitype.UntypedDeployment, error) {
|
2018-05-08 03:23:03 +02:00
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
stackName := stk.Ref().Name()
|
2019-04-18 23:01:27 +02:00
|
|
|
snap, _, err := b.getStack(stackName)
|
2018-01-05 21:46:13 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-03-03 21:12:54 +01:00
|
|
|
|
2018-05-25 22:29:59 +02:00
|
|
|
if snap == nil {
|
2019-04-24 21:13:00 +02:00
|
|
|
snap = deploy.NewSnapshot(deploy.Manifest{}, nil, nil, nil)
|
2018-05-25 22:29:59 +02:00
|
|
|
}
|
|
|
|
|
Use existing secrets manager when roundtripping
There are a few operations we do (stack rename, importing and edits)
where we will materialize a `deploy.Snapshot` from an existing
deployment, mutate it in somewhay, and then store it.
In these cases, we will just re-use the secrets manager that was used
to build the snapshot when we re-serialize it. This is less than ideal
in some cases, because many of these operations could run on an
"encrypted" copy of the Snapshot, where Inputs and Outputs have not
been decrypted.
Unfortunately, our system now is not set up in a great way to support
this and adding something like a `deploy.EncryptedSnapshot` would
require large scale code duplications.
So, for now, we'll take the hit of decrypting and re-encrypting, but
long term introducing a `deploy.EncryptedSnapshot` may be nice as it
would let us elide the encryption/decryption steps in some places and
would also make it clear what parts of our system have access to the
plaintext values of secrets.
2019-04-24 21:21:30 +02:00
|
|
|
sdep, err := stack.SerializeDeployment(snap, snap.SecretsManager)
|
2019-04-17 22:48:38 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "serializing deployment")
|
|
|
|
}
|
|
|
|
|
|
|
|
data, err := json.Marshal(sdep)
|
2018-03-03 21:12:54 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-04-18 01:01:52 +02:00
|
|
|
return &apitype.UntypedDeployment{
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
Version: 3,
|
2018-04-18 01:01:52 +02:00
|
|
|
Deployment: json.RawMessage(data),
|
|
|
|
}, nil
|
2018-01-05 21:46:13 +01:00
|
|
|
}
|
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
func (b *localBackend) ImportDeployment(ctx context.Context, stk backend.Stack,
|
2018-05-08 03:23:03 +02:00
|
|
|
deployment *apitype.UntypedDeployment) error {
|
|
|
|
|
2019-10-14 23:30:42 +02:00
|
|
|
stackName := stk.Ref().Name()
|
2019-04-18 23:01:27 +02:00
|
|
|
_, _, err := b.getStack(stackName)
|
2018-01-05 21:46:13 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-01 19:33:52 +02:00
|
|
|
snap, err := stack.DeserializeUntypedDeployment(deployment, stack.DefaultSecretsProvider)
|
2018-01-05 21:46:13 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Use existing secrets manager when roundtripping
There are a few operations we do (stack rename, importing and edits)
where we will materialize a `deploy.Snapshot` from an existing
deployment, mutate it in somewhay, and then store it.
In these cases, we will just re-use the secrets manager that was used
to build the snapshot when we re-serialize it. This is less than ideal
in some cases, because many of these operations could run on an
"encrypted" copy of the Snapshot, where Inputs and Outputs have not
been decrypted.
Unfortunately, our system now is not set up in a great way to support
this and adding something like a `deploy.EncryptedSnapshot` would
require large scale code duplications.
So, for now, we'll take the hit of decrypting and re-encrypting, but
long term introducing a `deploy.EncryptedSnapshot` may be nice as it
would let us elide the encryption/decryption steps in some places and
would also make it clear what parts of our system have access to the
plaintext values of secrets.
2019-04-24 21:21:30 +02:00
|
|
|
_, err = b.saveStack(stackName, snap, snap.SecretsManager)
|
Make some stack-related CLI improvements (#947)
This change includes a handful of stack-related CLI formatting
improvements that I've been noodling on in the background for a while,
based on things that tend to trip up demos and the inner loop workflow.
This includes:
* If `pulumi stack select` is run by itself, use an interactive
CLI menu to let the user select an existing stack, or choose to
create a new one. This looks as follows
$ pulumi stack select
Please choose a stack, or choose to create a new one:
abcdef
babblabblabble
> currentlyselected
defcon
<create a new stack>
and is navigated in the usual way (key up, down, enter).
* If a stack name is passed that does not exist, prompt the user
to ask whether s/he wants to create one on-demand. This hooks
interesting moments in time, like `pulumi stack select foo`,
and cuts down on the need to run additional commands.
* If a current stack is required, but none is currently selected,
then pop the same interactive menu shown above to select one.
Depending on the command being run, we may or may not show the
option to create a new stack (e.g., that doesn't make much sense
when you're running `pulumi destroy`, but might when you're
running `pulumi stack`). This again lets you do with a single
command what would have otherwise entailed an error with multiple
commands to recover from it.
* If you run `pulumi stack init` without any additional arguments,
we interactively prompt for the stack name. Before, we would
error and you'd then need to run `pulumi stack init <name>`.
* Colorize some things nicely; for example, now all prompts will
by default become bright white.
2018-02-17 00:03:54 +01:00
|
|
|
return err
|
2017-11-09 21:38:03 +01:00
|
|
|
}
|
|
|
|
|
2018-04-05 00:31:01 +02:00
|
|
|
func (b *localBackend) Logout() error {
|
2019-10-16 00:37:57 +02:00
|
|
|
return workspace.DeleteAccount(b.originalURL)
|
2018-04-05 00:31:01 +02:00
|
|
|
}
|
|
|
|
|
2018-06-15 17:44:33 +02:00
|
|
|
func (b *localBackend) CurrentUser() (string, error) {
|
2018-09-04 19:44:25 +02:00
|
|
|
user, err := user.Current()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return user.Username, nil
|
2018-06-15 17:44:33 +02:00
|
|
|
}
|
|
|
|
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-17 01:15:10 +02:00
|
|
|
func (b *localBackend) getLocalStacks() ([]tokens.QName, error) {
|
2017-11-01 22:55:16 +01:00
|
|
|
var stacks []tokens.QName
|
|
|
|
|
|
|
|
// Read the stack directory.
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-17 01:15:10 +02:00
|
|
|
path := b.stackPath("")
|
2017-11-01 22:55:16 +01:00
|
|
|
|
2019-04-25 05:55:39 +02:00
|
|
|
files, err := listBucket(b.bucket, path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "error listing stacks")
|
2017-11-01 22:55:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, file := range files {
|
|
|
|
// Ignore directories.
|
2019-04-25 05:55:39 +02:00
|
|
|
if file.IsDir {
|
2017-11-01 22:55:16 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip files without valid extensions (e.g., *.bak files).
|
2019-04-25 05:55:39 +02:00
|
|
|
stackfn := objectName(file)
|
2017-11-01 22:55:16 +01:00
|
|
|
ext := filepath.Ext(stackfn)
|
|
|
|
if _, has := encoding.Marshalers[ext]; !has {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read in this stack's information.
|
|
|
|
name := tokens.QName(stackfn[:len(stackfn)-len(ext)])
|
2019-04-18 23:01:27 +02:00
|
|
|
_, _, err := b.getStack(name)
|
2017-11-01 22:55:16 +01:00
|
|
|
if err != nil {
|
2018-05-16 00:28:00 +02:00
|
|
|
logging.V(5).Infof("error reading stack: %v (%v) skipping", name, err)
|
2017-11-01 22:55:16 +01:00
|
|
|
continue // failure reading the stack information.
|
|
|
|
}
|
|
|
|
|
|
|
|
stacks = append(stacks, name)
|
|
|
|
}
|
|
|
|
|
|
|
|
return stacks, nil
|
|
|
|
}
|
2019-01-04 22:23:47 +01:00
|
|
|
|
|
|
|
// GetStackTags fetches the stack's existing tags.
|
|
|
|
func (b *localBackend) GetStackTags(ctx context.Context,
|
2019-10-14 23:30:42 +02:00
|
|
|
stack backend.Stack) (map[apitype.StackTagName]string, error) {
|
2019-01-04 22:23:47 +01:00
|
|
|
|
|
|
|
// The local backend does not currently persist tags.
|
2019-02-20 01:07:57 +01:00
|
|
|
return nil, errors.New("stack tags not supported in --local mode")
|
2019-01-04 22:23:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateStackTags updates the stacks's tags, replacing all existing tags.
|
|
|
|
func (b *localBackend) UpdateStackTags(ctx context.Context,
|
2019-10-14 23:30:42 +02:00
|
|
|
stack backend.Stack, tags map[apitype.StackTagName]string) error {
|
2019-01-04 22:23:47 +01:00
|
|
|
|
|
|
|
// The local backend does not currently persist tags.
|
2019-02-20 01:07:57 +01:00
|
|
|
return errors.New("stack tags not supported in --local mode")
|
2019-01-04 22:23:47 +01:00
|
|
|
}
|