2018-05-22 21:43:36 +02:00
|
|
|
// Copyright 2016-2018, Pulumi Corporation.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2017-08-30 03:24:12 +02:00
|
|
|
|
2017-08-23 01:56:15 +02:00
|
|
|
package engine
|
|
|
|
|
|
|
|
import (
|
2019-07-01 01:34:39 +02:00
|
|
|
"context"
|
2020-03-08 22:11:55 +01:00
|
|
|
"encoding/json"
|
2020-01-30 22:31:41 +01:00
|
|
|
"fmt"
|
2019-12-16 23:51:02 +01:00
|
|
|
"path/filepath"
|
2020-03-08 22:11:55 +01:00
|
|
|
"sort"
|
2020-01-30 22:31:41 +01:00
|
|
|
"strings"
|
2018-08-07 01:46:17 +02:00
|
|
|
"sync"
|
2017-08-23 01:56:15 +02:00
|
|
|
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 20:07:56 +02:00
|
|
|
"github.com/blang/semver"
|
2019-12-16 23:51:02 +01:00
|
|
|
"github.com/pkg/errors"
|
2020-04-14 10:30:25 +02:00
|
|
|
resourceanalyzer "github.com/pulumi/pulumi/pkg/v2/resource/analyzer"
|
|
|
|
"github.com/pulumi/pulumi/pkg/v2/resource/deploy"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v2/go/common/diag"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v2/go/common/resource"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v2/go/common/resource/plugin"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v2/go/common/tokens"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v2/go/common/util/contract"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v2/go/common/util/logging"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v2/go/common/util/result"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v2/go/common/workspace"
|
2017-08-23 01:56:15 +02:00
|
|
|
)
|
|
|
|
|
2019-07-01 01:34:39 +02:00
|
|
|
// RequiredPolicy represents a set of policies to apply during an update.
|
|
|
|
type RequiredPolicy interface {
|
|
|
|
// Name provides the user-specified name of the PolicyPack.
|
|
|
|
Name() string
|
|
|
|
// Version of the PolicyPack.
|
|
|
|
Version() string
|
|
|
|
// Install will install the PolicyPack locally, returning the path it was installed to.
|
|
|
|
Install(ctx context.Context) (string, error)
|
2020-03-08 22:11:55 +01:00
|
|
|
// Config returns the PolicyPack's configuration.
|
|
|
|
Config() map[string]*json.RawMessage
|
2019-07-01 01:34:39 +02:00
|
|
|
}
|
|
|
|
|
2020-01-30 22:31:41 +01:00
|
|
|
// LocalPolicyPack represents a set of local Policy Packs to apply during an update.
|
|
|
|
type LocalPolicyPack struct {
|
|
|
|
// Name provides the user-specified name of the Policy Pack.
|
|
|
|
Name string
|
|
|
|
// Path of the local Policy Pack.
|
|
|
|
Path string
|
2020-03-08 22:11:55 +01:00
|
|
|
// Path of the local Policy Pack's JSON config file.
|
|
|
|
Config string
|
2020-01-30 22:31:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// MakeLocalPolicyPacks is a helper function for converting the list of local Policy
|
|
|
|
// Pack paths to list of LocalPolicyPack. The name of the Local Policy Pack is not set
|
|
|
|
// since we must load up the Policy Pack plugin to determine its name.
|
2020-03-08 22:11:55 +01:00
|
|
|
func MakeLocalPolicyPacks(localPaths []string, configPaths []string) []LocalPolicyPack {
|
|
|
|
// If we have any configPaths, we should have already validated that the length of
|
|
|
|
// the localPaths and configPaths are the same.
|
|
|
|
contract.Assert(len(configPaths) == 0 || len(configPaths) == len(localPaths))
|
|
|
|
|
2020-01-30 22:31:41 +01:00
|
|
|
r := make([]LocalPolicyPack, len(localPaths))
|
|
|
|
for i, p := range localPaths {
|
2020-03-08 22:11:55 +01:00
|
|
|
var config string
|
|
|
|
if len(configPaths) > 0 {
|
|
|
|
config = configPaths[i]
|
|
|
|
}
|
2020-01-30 22:31:41 +01:00
|
|
|
r[i] = LocalPolicyPack{
|
2020-03-08 22:11:55 +01:00
|
|
|
Path: p,
|
|
|
|
Config: config,
|
2020-01-30 22:31:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConvertLocalPolicyPacksToPaths is a helper function for converting the list of LocalPolicyPacks
|
|
|
|
// to a list of paths.
|
|
|
|
func ConvertLocalPolicyPacksToPaths(localPolicyPack []LocalPolicyPack) []string {
|
|
|
|
r := make([]string, len(localPolicyPack))
|
|
|
|
for i, p := range localPolicyPack {
|
|
|
|
r[i] = p.Name
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2018-01-18 20:10:15 +01:00
|
|
|
// UpdateOptions contains all the settings for customizing how an update (deploy, preview, or destroy) is performed.
|
2018-11-05 22:36:35 +01:00
|
|
|
//
|
2019-10-09 22:50:28 +02:00
|
|
|
// This structure is embedded in another which uses some of the unexported fields, which trips up the `structcheck`
|
2018-11-05 22:36:35 +01:00
|
|
|
// linter.
|
|
|
|
// nolint: structcheck
|
2018-01-18 20:10:15 +01:00
|
|
|
type UpdateOptions struct {
|
2020-01-30 22:31:41 +01:00
|
|
|
// LocalPolicyPacks contains an optional set of policy packs to run as part of this deployment.
|
|
|
|
LocalPolicyPacks []LocalPolicyPack
|
2018-04-14 07:26:01 +02:00
|
|
|
|
2019-07-01 01:34:39 +02:00
|
|
|
// RequiredPolicies is the set of policies that are required to run as part of the update.
|
|
|
|
RequiredPolicies []RequiredPolicy
|
|
|
|
|
2018-04-14 07:26:01 +02:00
|
|
|
// the degree of parallelism for resource operations (<=1 for serial).
|
|
|
|
Parallel int
|
|
|
|
|
|
|
|
// true if debugging output it enabled
|
|
|
|
Debug bool
|
Implement first-class providers. (#1695)
### First-Class Providers
These changes implement support for first-class providers. First-class
providers are provider plugins that are exposed as resources via the
Pulumi programming model so that they may be explicitly and multiply
instantiated. Each instance of a provider resource may be configured
differently, and configuration parameters may be source from the
outputs of other resources.
### Provider Plugin Changes
In order to accommodate the need to verify and diff provider
configuration and configure providers without complete configuration
information, these changes adjust the high-level provider plugin
interface. Two new methods for validating a provider's configuration
and diffing changes to the same have been added (`CheckConfig` and
`DiffConfig`, respectively), and the type of the configuration bag
accepted by `Configure` has been changed to a `PropertyMap`.
These changes have not yet been reflected in the provider plugin gRPC
interface. We will do this in a set of follow-up changes. Until then,
these methods are implemented by adapters:
- `CheckConfig` validates that all configuration parameters are string
or unknown properties. This is necessary because existing plugins
only accept string-typed configuration values.
- `DiffConfig` either returns "never replace" if all configuration
values are known or "must replace" if any configuration value is
unknown. The justification for this behavior is given
[here](https://github.com/pulumi/pulumi/pull/1695/files#diff-a6cd5c7f337665f5bb22e92ca5f07537R106)
- `Configure` converts the config bag to a legacy config map and
configures the provider plugin if all config values are known. If any
config value is unknown, the underlying plugin is not configured and
the provider may only perform `Check`, `Read`, and `Invoke`, all of
which return empty results. We justify this behavior becuase it is
only possible during a preview and provides the best experience we
can manage with the existing gRPC interface.
### Resource Model Changes
Providers are now exposed as resources that participate in a stack's
dependency graph. Like other resources, they are explicitly created,
may have multiple instances, and may have dependencies on other
resources. Providers are referred to using provider references, which
are a combination of the provider's URN and its ID. This design
addresses the need during a preview to refer to providers that have not
yet been physically created and therefore have no ID.
All custom resources that are not themselves providers must specify a
single provider via a provider reference. The named provider will be
used to manage that resource's CRUD operations. If a resource's
provider reference changes, the resource must be replaced. Though its
URN is not present in the resource's dependency list, the provider
should be treated as a dependency of the resource when topologically
sorting the dependency graph.
Finally, `Invoke` operations must now specify a provider to use for the
invocation via a provider reference.
### Engine Changes
First-class providers support requires a few changes to the engine:
- The engine must have some way to map from provider references to
provider plugins. It must be possible to add providers from a stack's
checkpoint to this map and to register new/updated providers during
the execution of a plan in response to CRUD operations on provider
resources.
- In order to support updating existing stacks using existing Pulumi
programs that may not explicitly instantiate providers, the engine
must be able to manage the "default" providers for each package
referenced by a checkpoint or Pulumi program. The configuration for
a "default" provider is taken from the stack's configuration data.
The former need is addressed by adding a provider registry type that is
responsible for managing all of the plugins required by a plan. In
addition to loading plugins froma checkpoint and providing the ability
to map from a provider reference to a provider plugin, this type serves
as the provider plugin for providers themselves (i.e. it is the
"provider provider").
The latter need is solved via two relatively self-contained changes to
plan setup and the eval source.
During plan setup, the old checkpoint is scanned for custom resources
that do not have a provider reference in order to compute the set of
packages that require a default provider. Once this set has been
computed, the required default provider definitions are conjured and
prepended to the checkpoint's resource list. Each resource that
requires a default provider is then updated to refer to the default
provider for its package.
While an eval source is running, each custom resource registration,
resource read, and invoke that does not name a provider is trapped
before being returned by the source iterator. If no default provider
for the appropriate package has been registered, the eval source
synthesizes an appropriate registration, waits for it to complete, and
records the registered provider's reference. This reference is injected
into the original request, which is then processed as usual. If a
default provider was already registered, the recorded reference is
used and no new registration occurs.
### SDK Changes
These changes only expose first-class providers from the Node.JS SDK.
- A new abstract class, `ProviderResource`, can be subclassed and used
to instantiate first-class providers.
- A new field in `ResourceOptions`, `provider`, can be used to supply
a particular provider instance to manage a `CustomResource`'s CRUD
operations.
- A new type, `InvokeOptions`, can be used to specify options that
control the behavior of a call to `pulumi.runtime.invoke`. This type
includes a `provider` field that is analogous to
`ResourceOptions.provider`.
2018-08-07 02:50:29 +02:00
|
|
|
|
2018-08-23 02:52:46 +02:00
|
|
|
// true if the plan should refresh before executing.
|
|
|
|
Refresh bool
|
|
|
|
|
2019-09-18 03:14:10 +02:00
|
|
|
// Specific resources to refresh during a refresh operation.
|
|
|
|
RefreshTargets []resource.URN
|
|
|
|
|
2019-10-31 01:16:55 +01:00
|
|
|
// Specific resources to replace during an update operation.
|
|
|
|
ReplaceTargets []resource.URN
|
|
|
|
|
2019-09-20 04:28:14 +02:00
|
|
|
// Specific resources to destroy during a destroy operation.
|
|
|
|
DestroyTargets []resource.URN
|
|
|
|
|
2019-10-01 08:41:56 +02:00
|
|
|
// Specific resources to update during an update operation.
|
|
|
|
UpdateTargets []resource.URN
|
|
|
|
|
2019-11-19 05:28:25 +01:00
|
|
|
// true if we're allowing dependent targets to change, even if not specified in one of the above
|
|
|
|
// XXXTargets lists.
|
|
|
|
TargetDependents bool
|
|
|
|
|
2019-07-01 21:34:19 +02:00
|
|
|
// true if the engine should use legacy diffing behavior during an update.
|
|
|
|
UseLegacyDiff bool
|
|
|
|
|
2020-10-09 22:13:55 +02:00
|
|
|
// true if the engine should disable provider previews.
|
|
|
|
DisableProviderPreview bool
|
|
|
|
|
2018-08-09 23:45:39 +02:00
|
|
|
// true if we should report events for steps that involve default providers.
|
|
|
|
reportDefaultProviderSteps bool
|
|
|
|
|
Implement first-class providers. (#1695)
### First-Class Providers
These changes implement support for first-class providers. First-class
providers are provider plugins that are exposed as resources via the
Pulumi programming model so that they may be explicitly and multiply
instantiated. Each instance of a provider resource may be configured
differently, and configuration parameters may be source from the
outputs of other resources.
### Provider Plugin Changes
In order to accommodate the need to verify and diff provider
configuration and configure providers without complete configuration
information, these changes adjust the high-level provider plugin
interface. Two new methods for validating a provider's configuration
and diffing changes to the same have been added (`CheckConfig` and
`DiffConfig`, respectively), and the type of the configuration bag
accepted by `Configure` has been changed to a `PropertyMap`.
These changes have not yet been reflected in the provider plugin gRPC
interface. We will do this in a set of follow-up changes. Until then,
these methods are implemented by adapters:
- `CheckConfig` validates that all configuration parameters are string
or unknown properties. This is necessary because existing plugins
only accept string-typed configuration values.
- `DiffConfig` either returns "never replace" if all configuration
values are known or "must replace" if any configuration value is
unknown. The justification for this behavior is given
[here](https://github.com/pulumi/pulumi/pull/1695/files#diff-a6cd5c7f337665f5bb22e92ca5f07537R106)
- `Configure` converts the config bag to a legacy config map and
configures the provider plugin if all config values are known. If any
config value is unknown, the underlying plugin is not configured and
the provider may only perform `Check`, `Read`, and `Invoke`, all of
which return empty results. We justify this behavior becuase it is
only possible during a preview and provides the best experience we
can manage with the existing gRPC interface.
### Resource Model Changes
Providers are now exposed as resources that participate in a stack's
dependency graph. Like other resources, they are explicitly created,
may have multiple instances, and may have dependencies on other
resources. Providers are referred to using provider references, which
are a combination of the provider's URN and its ID. This design
addresses the need during a preview to refer to providers that have not
yet been physically created and therefore have no ID.
All custom resources that are not themselves providers must specify a
single provider via a provider reference. The named provider will be
used to manage that resource's CRUD operations. If a resource's
provider reference changes, the resource must be replaced. Though its
URN is not present in the resource's dependency list, the provider
should be treated as a dependency of the resource when topologically
sorting the dependency graph.
Finally, `Invoke` operations must now specify a provider to use for the
invocation via a provider reference.
### Engine Changes
First-class providers support requires a few changes to the engine:
- The engine must have some way to map from provider references to
provider plugins. It must be possible to add providers from a stack's
checkpoint to this map and to register new/updated providers during
the execution of a plan in response to CRUD operations on provider
resources.
- In order to support updating existing stacks using existing Pulumi
programs that may not explicitly instantiate providers, the engine
must be able to manage the "default" providers for each package
referenced by a checkpoint or Pulumi program. The configuration for
a "default" provider is taken from the stack's configuration data.
The former need is addressed by adding a provider registry type that is
responsible for managing all of the plugins required by a plan. In
addition to loading plugins froma checkpoint and providing the ability
to map from a provider reference to a provider plugin, this type serves
as the provider plugin for providers themselves (i.e. it is the
"provider provider").
The latter need is solved via two relatively self-contained changes to
plan setup and the eval source.
During plan setup, the old checkpoint is scanned for custom resources
that do not have a provider reference in order to compute the set of
packages that require a default provider. Once this set has been
computed, the required default provider definitions are conjured and
prepended to the checkpoint's resource list. Each resource that
requires a default provider is then updated to refer to the default
provider for its package.
While an eval source is running, each custom resource registration,
resource read, and invoke that does not name a provider is trapped
before being returned by the source iterator. If no default provider
for the appropriate package has been registered, the eval source
synthesizes an appropriate registration, waits for it to complete, and
records the registered provider's reference. This reference is injected
into the original request, which is then processed as usual. If a
default provider was already registered, the recorded reference is
used and no new registration occurs.
### SDK Changes
These changes only expose first-class providers from the Node.JS SDK.
- A new abstract class, `ProviderResource`, can be subclassed and used
to instantiate first-class providers.
- A new field in `ResourceOptions`, `provider`, can be used to supply
a particular provider instance to manage a `CustomResource`'s CRUD
operations.
- A new type, `InvokeOptions`, can be used to specify options that
control the behavior of a call to `pulumi.runtime.invoke`. This type
includes a `provider` field that is analogous to
`ResourceOptions.provider`.
2018-08-07 02:50:29 +02:00
|
|
|
// the plugin host to use for this update
|
2020-10-15 19:35:09 +02:00
|
|
|
Host plugin.Host
|
2017-08-23 01:56:15 +02:00
|
|
|
}
|
|
|
|
|
2018-01-20 21:07:03 +01:00
|
|
|
// ResourceChanges contains the aggregate resource changes by operation type.
|
|
|
|
type ResourceChanges map[deploy.StepOp]int
|
|
|
|
|
2018-05-05 20:57:09 +02:00
|
|
|
// HasChanges returns true if there are any non-same changes in the resulting summary.
|
|
|
|
func (changes ResourceChanges) HasChanges() bool {
|
|
|
|
var c int
|
|
|
|
for op, count := range changes {
|
2019-09-07 07:10:34 +02:00
|
|
|
if op != deploy.OpSame &&
|
|
|
|
op != deploy.OpRead &&
|
|
|
|
op != deploy.OpReadDiscard &&
|
|
|
|
op != deploy.OpReadReplacement {
|
2018-05-05 20:57:09 +02:00
|
|
|
c += count
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return c > 0
|
|
|
|
}
|
|
|
|
|
2019-03-20 00:21:50 +01:00
|
|
|
func Update(u UpdateInfo, ctx *Context, opts UpdateOptions, dryRun bool) (ResourceChanges, result.Result) {
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 16:45:23 +02:00
|
|
|
contract.Require(u != nil, "update")
|
2018-04-20 03:59:14 +02:00
|
|
|
contract.Require(ctx != nil, "ctx")
|
2017-09-09 22:43:51 +02:00
|
|
|
|
2018-04-20 03:59:14 +02:00
|
|
|
defer func() { ctx.Events <- cancelEvent() }()
|
2017-10-23 00:52:00 +02:00
|
|
|
|
2020-11-18 20:16:30 +01:00
|
|
|
info, err := newDeploymentContext(u, "update", ctx.ParentSpan)
|
2017-08-23 01:56:15 +02:00
|
|
|
if err != nil {
|
2019-03-20 00:21:50 +01:00
|
|
|
return nil, result.FromError(err)
|
2017-08-23 01:56:15 +02:00
|
|
|
}
|
2018-04-20 03:59:14 +02:00
|
|
|
defer info.Close()
|
2017-10-05 23:08:46 +02:00
|
|
|
|
2018-08-23 00:32:54 +02:00
|
|
|
emitter, err := makeEventEmitter(ctx.Events, u)
|
|
|
|
if err != nil {
|
2019-03-20 00:21:50 +01:00
|
|
|
return nil, result.FromError(err)
|
2018-08-23 00:32:54 +02:00
|
|
|
}
|
2019-10-16 00:47:40 +02:00
|
|
|
defer emitter.Close()
|
|
|
|
|
2020-11-18 20:16:30 +01:00
|
|
|
return update(ctx, info, deploymentOptions{
|
2018-01-18 20:10:15 +01:00
|
|
|
UpdateOptions: opts,
|
2018-03-29 17:57:25 +02:00
|
|
|
SourceFunc: newUpdateSource,
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 16:45:23 +02:00
|
|
|
Events: emitter,
|
Implement status sinks
This commit reverts most of #1853 and replaces it with functionally
identical logic, using the notion of status message-specific sinks.
In other words, where the original commit implemented ephemeral status
messages by adding an `isStatus` parameter to most of the logging
methdos in pulumi/pulumi, this implements ephemeral status messages as a
parallel logging sink, which emits _only_ ephemeral status messages.
The original commit message in that PR was:
> Allow log events to be marked "status" events
>
> This commit will introduce a field, IsStatus to LogRequest. A "status"
> logging event will be displayed in the Info column of the main
> display, but will not be printed out at the end, when resource
> operations complete.
>
> For example, for complex resource initialization, we'd like to display
> a series of intermediate results: [1/4] Service object created, for
> example. We'd like these to appear in the Info column, but not at the
> end, where they are not helpful to the user.
2018-08-31 22:12:40 +02:00
|
|
|
Diag: newEventSink(emitter, false),
|
|
|
|
StatusDiag: newEventSink(emitter, true),
|
2018-04-14 07:26:01 +02:00
|
|
|
}, dryRun)
|
2017-08-23 01:56:15 +02:00
|
|
|
}
|
|
|
|
|
2019-11-08 23:37:40 +01:00
|
|
|
// RunInstallPlugins calls installPlugins and just returns the error (avoids having to export pluginSet).
|
|
|
|
func RunInstallPlugins(
|
|
|
|
proj *workspace.Project, pwd, main string, target *deploy.Target, plugctx *plugin.Context) error {
|
.NET: Report plugin install errors during `pulumi new` (#5760)
The way `pulumi new` installs dependencies for .NET projects is slightly different from other languages. For Node.js, Python, and Go, `pulumi new` runs the appropriate command to install project dependencies (e.g. `npm install`, `pip install`, or `go mod download`). For .NET, it calls the same routine used during `preview|up` to ensure required plugins are installed. For .NET, this ends up running `dotnet build` which implicitly installs Nuget packages, builds the project, and also attempts to determine and install the needed Pulumi plugins. When this operation runs during `preview|up`, and there are failures installing a plugin, the error is logged, but deliberately not returned, because an error will be shown for missing plugins later on during the `preview|up` operation. However, during `pulumi new`, we should show any plugin install errors.
2020-11-17 06:56:13 +01:00
|
|
|
_, _, err := installPlugins(proj, pwd, main, target, plugctx, true /*returnInstallErrors*/)
|
2019-11-08 23:37:40 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 20:07:56 +02:00
|
|
|
func installPlugins(
|
Add `--server` to `pulumi plugin install`
Previously, when the CLI wanted to install a plugin, it used a special
method, `DownloadPlugin` on the `httpstate` backend to actually fetch
the tarball that had the plugin. The reason for this is largely tied
to history, at one point during a closed beta, we required presenting
an API key to download plugins (as a way to enforce folks outside the
beta could not download them) and because of that it was natural to
bake that functionality into the part of the code that interfaced with
the rest of the API from the Pulumi Service.
The downside here is that it means we need to host all the plugins on
`api.pulumi.com` which prevents community folks from being able to
easily write resource providers, since they have to manually manage
the process of downloading a provider to a machine and getting it on
the `$PATH` or putting it in the plugin cache.
To make this easier, we add a `--server` argument you can pass to
`pulumi plugin install` to control the URL that it attempts to fetch
the tarball from. We still have perscriptive guidence on how the
tarball must be
named (`pulumi-[<type>]-[<provider-name>]-vX.Y.Z.tar.gz`) but the base
URL can now be configured.
Folks publishing packages can use install scripts to run `pulumi
plugin install` passing a custom `--server` argument, if needed.
There are two improvements we can make to provide a nicer end to end
story here:
- We can augment the GetRequiredPlugins method on the language
provider to also return information about an optional server to use
when downloading the provider.
- We can pass information about a server to download plugins from as
part of a resource registration or creation of a first class
provider.
These help out in cases where for one reason or another where `pulumi
plugin install` doesn't get run before an update takes place and would
allow us to either do the right thing ahead of time or provide better
error messages with the correct `--server` argument. But, for now,
this unblocks a majority of the cases we care about and provides a
path forward for folks that want to develop and host their own
resource providers.
2019-05-30 22:56:55 +02:00
|
|
|
proj *workspace.Project, pwd, main string, target *deploy.Target,
|
.NET: Report plugin install errors during `pulumi new` (#5760)
The way `pulumi new` installs dependencies for .NET projects is slightly different from other languages. For Node.js, Python, and Go, `pulumi new` runs the appropriate command to install project dependencies (e.g. `npm install`, `pip install`, or `go mod download`). For .NET, it calls the same routine used during `preview|up` to ensure required plugins are installed. For .NET, this ends up running `dotnet build` which implicitly installs Nuget packages, builds the project, and also attempts to determine and install the needed Pulumi plugins. When this operation runs during `preview|up`, and there are failures installing a plugin, the error is logged, but deliberately not returned, because an error will be shown for missing plugins later on during the `preview|up` operation. However, during `pulumi new`, we should show any plugin install errors.
2020-11-17 06:56:13 +01:00
|
|
|
plugctx *plugin.Context, returnInstallErrors bool) (pluginSet, map[tokens.Package]*semver.Version, error) {
|
2018-04-14 07:26:01 +02:00
|
|
|
|
2019-03-15 23:01:37 +01:00
|
|
|
// Before launching the source, ensure that we have all of the plugins that we need in order to proceed.
|
|
|
|
//
|
|
|
|
// There are two places that we need to look for plugins:
|
|
|
|
// 1. The language host, which reports to us the set of plugins that the program that's about to execute
|
|
|
|
// needs in order to create new resources. This is purely advisory by the language host and not all
|
|
|
|
// languages implement this (notably Python).
|
|
|
|
// 2. The snapshot. The snapshot contains plugins in two locations: first, in the manifest, all plugins
|
|
|
|
// that were loaded are recorded. Second, all first class providers record the version of the plugin
|
|
|
|
// to which they are bound.
|
|
|
|
//
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 20:07:56 +02:00
|
|
|
// In order to get a complete view of the set of plugins that we need for an update or query, we must
|
|
|
|
// consult both sources and merge their results into a list of plugins.
|
2019-03-15 23:01:37 +01:00
|
|
|
languagePlugins, err := gatherPluginsFromProgram(plugctx, plugin.ProgInfo{
|
2018-03-29 17:57:25 +02:00
|
|
|
Proj: proj,
|
|
|
|
Pwd: pwd,
|
|
|
|
Program: main,
|
2019-03-15 23:01:37 +01:00
|
|
|
})
|
2018-03-29 17:57:25 +02:00
|
|
|
if err != nil {
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 20:07:56 +02:00
|
|
|
return nil, nil, err
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 16:45:23 +02:00
|
|
|
}
|
2019-03-15 23:01:37 +01:00
|
|
|
snapshotPlugins, err := gatherPluginsFromSnapshot(plugctx, target)
|
|
|
|
if err != nil {
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 20:07:56 +02:00
|
|
|
return nil, nil, err
|
2019-03-15 23:01:37 +01:00
|
|
|
}
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 20:07:56 +02:00
|
|
|
|
2019-03-15 23:01:37 +01:00
|
|
|
allPlugins := languagePlugins.Union(snapshotPlugins)
|
|
|
|
|
Add `--server` to `pulumi plugin install`
Previously, when the CLI wanted to install a plugin, it used a special
method, `DownloadPlugin` on the `httpstate` backend to actually fetch
the tarball that had the plugin. The reason for this is largely tied
to history, at one point during a closed beta, we required presenting
an API key to download plugins (as a way to enforce folks outside the
beta could not download them) and because of that it was natural to
bake that functionality into the part of the code that interfaced with
the rest of the API from the Pulumi Service.
The downside here is that it means we need to host all the plugins on
`api.pulumi.com` which prevents community folks from being able to
easily write resource providers, since they have to manually manage
the process of downloading a provider to a machine and getting it on
the `$PATH` or putting it in the plugin cache.
To make this easier, we add a `--server` argument you can pass to
`pulumi plugin install` to control the URL that it attempts to fetch
the tarball from. We still have perscriptive guidence on how the
tarball must be
named (`pulumi-[<type>]-[<provider-name>]-vX.Y.Z.tar.gz`) but the base
URL can now be configured.
Folks publishing packages can use install scripts to run `pulumi
plugin install` passing a custom `--server` argument, if needed.
There are two improvements we can make to provide a nicer end to end
story here:
- We can augment the GetRequiredPlugins method on the language
provider to also return information about an optional server to use
when downloading the provider.
- We can pass information about a server to download plugins from as
part of a resource registration or creation of a first class
provider.
These help out in cases where for one reason or another where `pulumi
plugin install` doesn't get run before an update takes place and would
allow us to either do the right thing ahead of time or provide better
error messages with the correct `--server` argument. But, for now,
this unblocks a majority of the cases we care about and provides a
path forward for folks that want to develop and host their own
resource providers.
2019-05-30 22:56:55 +02:00
|
|
|
// If there are any plugins that are not available, we can attempt to install them here.
|
2019-03-15 23:01:37 +01:00
|
|
|
//
|
|
|
|
// Note that this is purely a best-effort thing. If we can't install missing plugins, just proceed; we'll fail later
|
.NET: Report plugin install errors during `pulumi new` (#5760)
The way `pulumi new` installs dependencies for .NET projects is slightly different from other languages. For Node.js, Python, and Go, `pulumi new` runs the appropriate command to install project dependencies (e.g. `npm install`, `pip install`, or `go mod download`). For .NET, it calls the same routine used during `preview|up` to ensure required plugins are installed. For .NET, this ends up running `dotnet build` which implicitly installs Nuget packages, builds the project, and also attempts to determine and install the needed Pulumi plugins. When this operation runs during `preview|up`, and there are failures installing a plugin, the error is logged, but deliberately not returned, because an error will be shown for missing plugins later on during the `preview|up` operation. However, during `pulumi new`, we should show any plugin install errors.
2020-11-17 06:56:13 +01:00
|
|
|
// with an error message indicating exactly what plugins are missing. If `returnInstallErrors` is set, then return
|
|
|
|
// the error.
|
Add `--server` to `pulumi plugin install`
Previously, when the CLI wanted to install a plugin, it used a special
method, `DownloadPlugin` on the `httpstate` backend to actually fetch
the tarball that had the plugin. The reason for this is largely tied
to history, at one point during a closed beta, we required presenting
an API key to download plugins (as a way to enforce folks outside the
beta could not download them) and because of that it was natural to
bake that functionality into the part of the code that interfaced with
the rest of the API from the Pulumi Service.
The downside here is that it means we need to host all the plugins on
`api.pulumi.com` which prevents community folks from being able to
easily write resource providers, since they have to manually manage
the process of downloading a provider to a machine and getting it on
the `$PATH` or putting it in the plugin cache.
To make this easier, we add a `--server` argument you can pass to
`pulumi plugin install` to control the URL that it attempts to fetch
the tarball from. We still have perscriptive guidence on how the
tarball must be
named (`pulumi-[<type>]-[<provider-name>]-vX.Y.Z.tar.gz`) but the base
URL can now be configured.
Folks publishing packages can use install scripts to run `pulumi
plugin install` passing a custom `--server` argument, if needed.
There are two improvements we can make to provide a nicer end to end
story here:
- We can augment the GetRequiredPlugins method on the language
provider to also return information about an optional server to use
when downloading the provider.
- We can pass information about a server to download plugins from as
part of a resource registration or creation of a first class
provider.
These help out in cases where for one reason or another where `pulumi
plugin install` doesn't get run before an update takes place and would
allow us to either do the right thing ahead of time or provide better
error messages with the correct `--server` argument. But, for now,
this unblocks a majority of the cases we care about and provides a
path forward for folks that want to develop and host their own
resource providers.
2019-05-30 22:56:55 +02:00
|
|
|
if err := ensurePluginsAreInstalled(allPlugins); err != nil {
|
.NET: Report plugin install errors during `pulumi new` (#5760)
The way `pulumi new` installs dependencies for .NET projects is slightly different from other languages. For Node.js, Python, and Go, `pulumi new` runs the appropriate command to install project dependencies (e.g. `npm install`, `pip install`, or `go mod download`). For .NET, it calls the same routine used during `preview|up` to ensure required plugins are installed. For .NET, this ends up running `dotnet build` which implicitly installs Nuget packages, builds the project, and also attempts to determine and install the needed Pulumi plugins. When this operation runs during `preview|up`, and there are failures installing a plugin, the error is logged, but deliberately not returned, because an error will be shown for missing plugins later on during the `preview|up` operation. However, during `pulumi new`, we should show any plugin install errors.
2020-11-17 06:56:13 +01:00
|
|
|
if returnInstallErrors {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2019-03-15 23:01:37 +01:00
|
|
|
logging.V(7).Infof("newUpdateSource(): failed to install missing plugins: %v", err)
|
|
|
|
}
|
2018-03-29 17:57:25 +02:00
|
|
|
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 20:07:56 +02:00
|
|
|
// Collect the version information for default providers.
|
|
|
|
defaultProviderVersions := computeDefaultProviderPlugins(languagePlugins, allPlugins)
|
|
|
|
|
|
|
|
return allPlugins, defaultProviderVersions, nil
|
|
|
|
}
|
|
|
|
|
2020-03-08 22:11:55 +01:00
|
|
|
func installAndLoadPolicyPlugins(plugctx *plugin.Context, d diag.Sink, policies []RequiredPolicy,
|
|
|
|
localPolicyPacks []LocalPolicyPack, opts *plugin.PolicyAnalyzerOptions) error {
|
|
|
|
|
|
|
|
var allValidationErrors []string
|
|
|
|
appendValidationErrors := func(policyPackName, policyPackVersion string, validationErrors []string) {
|
|
|
|
for _, validationError := range validationErrors {
|
|
|
|
allValidationErrors = append(allValidationErrors,
|
|
|
|
fmt.Sprintf("validating policy config: %s %s %s",
|
|
|
|
policyPackName, policyPackVersion, validationError))
|
|
|
|
}
|
|
|
|
}
|
2019-12-16 23:51:02 +01:00
|
|
|
|
|
|
|
// Install and load required policy packs.
|
2019-07-01 01:34:39 +02:00
|
|
|
for _, policy := range policies {
|
|
|
|
policyPath, err := policy.Install(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-08 22:11:55 +01:00
|
|
|
analyzer, err := plugctx.Host.PolicyAnalyzer(tokens.QName(policy.Name()), policyPath, opts)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
analyzerInfo, err := analyzer.GetAnalyzerInfo()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the config, reconcile & validate it, and pass it to the policy pack.
|
|
|
|
if !analyzerInfo.SupportsConfig {
|
|
|
|
if len(policy.Config()) > 0 {
|
|
|
|
logging.V(7).Infof("policy pack %q does not support config; skipping configure", analyzerInfo.Name)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
configFromAPI, err := resourceanalyzer.ParsePolicyPackConfigFromAPI(policy.Config())
|
2019-12-16 23:51:02 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-30 21:52:05 +02:00
|
|
|
config, validationErrors, err := resourceanalyzer.ReconcilePolicyPackConfig(
|
|
|
|
analyzerInfo.Policies, analyzerInfo.InitialConfig, configFromAPI)
|
2020-03-08 22:11:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "reconciling config for %q", analyzerInfo.Name)
|
|
|
|
}
|
|
|
|
appendValidationErrors(analyzerInfo.Name, analyzerInfo.Version, validationErrors)
|
|
|
|
if err = analyzer.Configure(config); err != nil {
|
|
|
|
return errors.Wrapf(err, "configuring policy pack %q", analyzerInfo.Name)
|
|
|
|
}
|
2019-12-16 23:51:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Load local policy packs.
|
2020-01-30 22:31:41 +01:00
|
|
|
for i, pack := range localPolicyPacks {
|
|
|
|
abs, err := filepath.Abs(pack.Path)
|
2019-12-16 23:51:02 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-01-30 22:31:41 +01:00
|
|
|
analyzer, err := plugctx.Host.PolicyAnalyzer(tokens.QName(abs), pack.Path, opts)
|
2019-07-01 01:34:39 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-12-16 23:51:02 +01:00
|
|
|
} else if analyzer == nil {
|
2020-03-08 22:11:55 +01:00
|
|
|
return errors.Errorf("policy analyzer could not be loaded from path %q", pack.Path)
|
2019-07-01 01:34:39 +02:00
|
|
|
}
|
|
|
|
|
2020-01-30 22:31:41 +01:00
|
|
|
// Update the Policy Pack names now that we have loaded the plugins and can access the name.
|
|
|
|
analyzerInfo, err := analyzer.GetAnalyzerInfo()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
localPolicyPacks[i].Name = analyzerInfo.Name
|
2020-03-08 22:11:55 +01:00
|
|
|
|
|
|
|
// Load config, reconcile & validate it, and pass it to the policy pack.
|
|
|
|
if !analyzerInfo.SupportsConfig {
|
|
|
|
if pack.Config != "" {
|
|
|
|
return errors.Errorf("policy pack %q at %q does not support config", analyzerInfo.Name, pack.Path)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var configFromFile map[string]plugin.AnalyzerPolicyConfig
|
|
|
|
if pack.Config != "" {
|
|
|
|
configFromFile, err = resourceanalyzer.LoadPolicyPackConfigFromFile(pack.Config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-03-30 21:52:05 +02:00
|
|
|
config, validationErrors, err := resourceanalyzer.ReconcilePolicyPackConfig(
|
|
|
|
analyzerInfo.Policies, analyzerInfo.InitialConfig, configFromFile)
|
2020-03-08 22:11:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "reconciling policy config for %q at %q", analyzerInfo.Name, pack.Path)
|
|
|
|
}
|
|
|
|
appendValidationErrors(analyzerInfo.Name, analyzerInfo.Version, validationErrors)
|
|
|
|
if err = analyzer.Configure(config); err != nil {
|
|
|
|
return errors.Wrapf(err, "configuring policy pack %q at %q", analyzerInfo.Name, pack.Path)
|
|
|
|
}
|
2020-01-30 22:31:41 +01:00
|
|
|
}
|
2020-03-08 22:11:55 +01:00
|
|
|
|
|
|
|
// Report any policy config validation errors and return an error.
|
|
|
|
if len(allValidationErrors) > 0 {
|
|
|
|
sort.Strings(allValidationErrors)
|
|
|
|
for _, validationError := range allValidationErrors {
|
|
|
|
plugctx.Diag.Errorf(diag.Message("", validationError))
|
|
|
|
}
|
|
|
|
return errors.New("validating policy config")
|
|
|
|
}
|
|
|
|
|
2019-07-01 01:34:39 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 20:07:56 +02:00
|
|
|
func newUpdateSource(
|
2020-11-18 20:16:30 +01:00
|
|
|
client deploy.BackendClient, opts deploymentOptions, proj *workspace.Project, pwd, main string,
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 20:07:56 +02:00
|
|
|
target *deploy.Target, plugctx *plugin.Context, dryRun bool) (deploy.Source, error) {
|
|
|
|
|
2019-07-01 01:34:39 +02:00
|
|
|
//
|
|
|
|
// Step 1: Install and load plugins.
|
|
|
|
//
|
|
|
|
|
Add `--server` to `pulumi plugin install`
Previously, when the CLI wanted to install a plugin, it used a special
method, `DownloadPlugin` on the `httpstate` backend to actually fetch
the tarball that had the plugin. The reason for this is largely tied
to history, at one point during a closed beta, we required presenting
an API key to download plugins (as a way to enforce folks outside the
beta could not download them) and because of that it was natural to
bake that functionality into the part of the code that interfaced with
the rest of the API from the Pulumi Service.
The downside here is that it means we need to host all the plugins on
`api.pulumi.com` which prevents community folks from being able to
easily write resource providers, since they have to manually manage
the process of downloading a provider to a machine and getting it on
the `$PATH` or putting it in the plugin cache.
To make this easier, we add a `--server` argument you can pass to
`pulumi plugin install` to control the URL that it attempts to fetch
the tarball from. We still have perscriptive guidence on how the
tarball must be
named (`pulumi-[<type>]-[<provider-name>]-vX.Y.Z.tar.gz`) but the base
URL can now be configured.
Folks publishing packages can use install scripts to run `pulumi
plugin install` passing a custom `--server` argument, if needed.
There are two improvements we can make to provide a nicer end to end
story here:
- We can augment the GetRequiredPlugins method on the language
provider to also return information about an optional server to use
when downloading the provider.
- We can pass information about a server to download plugins from as
part of a resource registration or creation of a first class
provider.
These help out in cases where for one reason or another where `pulumi
plugin install` doesn't get run before an update takes place and would
allow us to either do the right thing ahead of time or provide better
error messages with the correct `--server` argument. But, for now,
this unblocks a majority of the cases we care about and provides a
path forward for folks that want to develop and host their own
resource providers.
2019-05-30 22:56:55 +02:00
|
|
|
allPlugins, defaultProviderVersions, err := installPlugins(proj, pwd, main, target,
|
.NET: Report plugin install errors during `pulumi new` (#5760)
The way `pulumi new` installs dependencies for .NET projects is slightly different from other languages. For Node.js, Python, and Go, `pulumi new` runs the appropriate command to install project dependencies (e.g. `npm install`, `pip install`, or `go mod download`). For .NET, it calls the same routine used during `preview|up` to ensure required plugins are installed. For .NET, this ends up running `dotnet build` which implicitly installs Nuget packages, builds the project, and also attempts to determine and install the needed Pulumi plugins. When this operation runs during `preview|up`, and there are failures installing a plugin, the error is logged, but deliberately not returned, because an error will be shown for missing plugins later on during the `preview|up` operation. However, during `pulumi new`, we should show any plugin install errors.
2020-11-17 06:56:13 +01:00
|
|
|
plugctx, false /*returnInstallErrors*/)
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 20:07:56 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-03-15 23:01:37 +01:00
|
|
|
// Once we've installed all of the plugins we need, make sure that all analyzers and language plugins are
|
|
|
|
// loaded up and ready to go. Provider plugins are loaded lazily by the provider registry and thus don't
|
|
|
|
// need to be loaded here.
|
Implement first-class providers. (#1695)
### First-Class Providers
These changes implement support for first-class providers. First-class
providers are provider plugins that are exposed as resources via the
Pulumi programming model so that they may be explicitly and multiply
instantiated. Each instance of a provider resource may be configured
differently, and configuration parameters may be source from the
outputs of other resources.
### Provider Plugin Changes
In order to accommodate the need to verify and diff provider
configuration and configure providers without complete configuration
information, these changes adjust the high-level provider plugin
interface. Two new methods for validating a provider's configuration
and diffing changes to the same have been added (`CheckConfig` and
`DiffConfig`, respectively), and the type of the configuration bag
accepted by `Configure` has been changed to a `PropertyMap`.
These changes have not yet been reflected in the provider plugin gRPC
interface. We will do this in a set of follow-up changes. Until then,
these methods are implemented by adapters:
- `CheckConfig` validates that all configuration parameters are string
or unknown properties. This is necessary because existing plugins
only accept string-typed configuration values.
- `DiffConfig` either returns "never replace" if all configuration
values are known or "must replace" if any configuration value is
unknown. The justification for this behavior is given
[here](https://github.com/pulumi/pulumi/pull/1695/files#diff-a6cd5c7f337665f5bb22e92ca5f07537R106)
- `Configure` converts the config bag to a legacy config map and
configures the provider plugin if all config values are known. If any
config value is unknown, the underlying plugin is not configured and
the provider may only perform `Check`, `Read`, and `Invoke`, all of
which return empty results. We justify this behavior becuase it is
only possible during a preview and provides the best experience we
can manage with the existing gRPC interface.
### Resource Model Changes
Providers are now exposed as resources that participate in a stack's
dependency graph. Like other resources, they are explicitly created,
may have multiple instances, and may have dependencies on other
resources. Providers are referred to using provider references, which
are a combination of the provider's URN and its ID. This design
addresses the need during a preview to refer to providers that have not
yet been physically created and therefore have no ID.
All custom resources that are not themselves providers must specify a
single provider via a provider reference. The named provider will be
used to manage that resource's CRUD operations. If a resource's
provider reference changes, the resource must be replaced. Though its
URN is not present in the resource's dependency list, the provider
should be treated as a dependency of the resource when topologically
sorting the dependency graph.
Finally, `Invoke` operations must now specify a provider to use for the
invocation via a provider reference.
### Engine Changes
First-class providers support requires a few changes to the engine:
- The engine must have some way to map from provider references to
provider plugins. It must be possible to add providers from a stack's
checkpoint to this map and to register new/updated providers during
the execution of a plan in response to CRUD operations on provider
resources.
- In order to support updating existing stacks using existing Pulumi
programs that may not explicitly instantiate providers, the engine
must be able to manage the "default" providers for each package
referenced by a checkpoint or Pulumi program. The configuration for
a "default" provider is taken from the stack's configuration data.
The former need is addressed by adding a provider registry type that is
responsible for managing all of the plugins required by a plan. In
addition to loading plugins froma checkpoint and providing the ability
to map from a provider reference to a provider plugin, this type serves
as the provider plugin for providers themselves (i.e. it is the
"provider provider").
The latter need is solved via two relatively self-contained changes to
plan setup and the eval source.
During plan setup, the old checkpoint is scanned for custom resources
that do not have a provider reference in order to compute the set of
packages that require a default provider. Once this set has been
computed, the required default provider definitions are conjured and
prepended to the checkpoint's resource list. Each resource that
requires a default provider is then updated to refer to the default
provider for its package.
While an eval source is running, each custom resource registration,
resource read, and invoke that does not name a provider is trapped
before being returned by the source iterator. If no default provider
for the appropriate package has been registered, the eval source
synthesizes an appropriate registration, waits for it to complete, and
records the registered provider's reference. This reference is injected
into the original request, which is then processed as usual. If a
default provider was already registered, the recorded reference is
used and no new registration occurs.
### SDK Changes
These changes only expose first-class providers from the Node.JS SDK.
- A new abstract class, `ProviderResource`, can be subclassed and used
to instantiate first-class providers.
- A new field in `ResourceOptions`, `provider`, can be used to supply
a particular provider instance to manage a `CustomResource`'s CRUD
operations.
- A new type, `InvokeOptions`, can be used to specify options that
control the behavior of a call to `pulumi.runtime.invoke`. This type
includes a `provider` field that is analogous to
`ResourceOptions.provider`.
2018-08-07 02:50:29 +02:00
|
|
|
const kinds = plugin.AnalyzerPlugins | plugin.LanguagePlugins
|
2019-03-15 23:01:37 +01:00
|
|
|
if err := ensurePluginsAreLoaded(plugctx, allPlugins, kinds); err != nil {
|
2018-03-29 17:57:25 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-07-01 01:34:39 +02:00
|
|
|
//
|
|
|
|
// Step 2: Install and load policy plugins.
|
|
|
|
//
|
|
|
|
|
2019-12-16 23:51:02 +01:00
|
|
|
// Decrypt the configuration.
|
|
|
|
config, err := target.Config.Decrypt(target.Decrypter)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
analyzerOpts := plugin.PolicyAnalyzerOptions{
|
|
|
|
Project: proj.Name.String(),
|
|
|
|
Stack: target.Name.String(),
|
|
|
|
Config: config,
|
|
|
|
DryRun: dryRun,
|
|
|
|
}
|
2020-03-08 22:11:55 +01:00
|
|
|
if err := installAndLoadPolicyPlugins(plugctx, opts.Diag, opts.RequiredPolicies, opts.LocalPolicyPacks,
|
2019-12-16 23:51:02 +01:00
|
|
|
&analyzerOpts); err != nil {
|
2019-07-01 01:34:39 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-09-15 02:40:17 +02:00
|
|
|
// If we are connecting to an existing client, stash the address of the engine in its arguments.
|
|
|
|
var args []string
|
|
|
|
if proj.Runtime.Name() == clientRuntimeName {
|
|
|
|
args = []string{plugctx.Host.ServerAddr()}
|
|
|
|
}
|
|
|
|
|
2018-03-29 17:57:25 +02:00
|
|
|
// If that succeeded, create a new source that will perform interpretation of the compiled program.
|
|
|
|
// TODO[pulumi/pulumi#88]: we are passing `nil` as the arguments map; we need to allow a way to pass these.
|
|
|
|
return deploy.NewEvalSource(plugctx, &deploy.EvalRunInfo{
|
|
|
|
Proj: proj,
|
|
|
|
Pwd: pwd,
|
|
|
|
Program: main,
|
2020-09-15 02:40:17 +02:00
|
|
|
Args: args,
|
2018-03-29 17:57:25 +02:00
|
|
|
Target: target,
|
Implement first-class providers. (#1695)
### First-Class Providers
These changes implement support for first-class providers. First-class
providers are provider plugins that are exposed as resources via the
Pulumi programming model so that they may be explicitly and multiply
instantiated. Each instance of a provider resource may be configured
differently, and configuration parameters may be source from the
outputs of other resources.
### Provider Plugin Changes
In order to accommodate the need to verify and diff provider
configuration and configure providers without complete configuration
information, these changes adjust the high-level provider plugin
interface. Two new methods for validating a provider's configuration
and diffing changes to the same have been added (`CheckConfig` and
`DiffConfig`, respectively), and the type of the configuration bag
accepted by `Configure` has been changed to a `PropertyMap`.
These changes have not yet been reflected in the provider plugin gRPC
interface. We will do this in a set of follow-up changes. Until then,
these methods are implemented by adapters:
- `CheckConfig` validates that all configuration parameters are string
or unknown properties. This is necessary because existing plugins
only accept string-typed configuration values.
- `DiffConfig` either returns "never replace" if all configuration
values are known or "must replace" if any configuration value is
unknown. The justification for this behavior is given
[here](https://github.com/pulumi/pulumi/pull/1695/files#diff-a6cd5c7f337665f5bb22e92ca5f07537R106)
- `Configure` converts the config bag to a legacy config map and
configures the provider plugin if all config values are known. If any
config value is unknown, the underlying plugin is not configured and
the provider may only perform `Check`, `Read`, and `Invoke`, all of
which return empty results. We justify this behavior becuase it is
only possible during a preview and provides the best experience we
can manage with the existing gRPC interface.
### Resource Model Changes
Providers are now exposed as resources that participate in a stack's
dependency graph. Like other resources, they are explicitly created,
may have multiple instances, and may have dependencies on other
resources. Providers are referred to using provider references, which
are a combination of the provider's URN and its ID. This design
addresses the need during a preview to refer to providers that have not
yet been physically created and therefore have no ID.
All custom resources that are not themselves providers must specify a
single provider via a provider reference. The named provider will be
used to manage that resource's CRUD operations. If a resource's
provider reference changes, the resource must be replaced. Though its
URN is not present in the resource's dependency list, the provider
should be treated as a dependency of the resource when topologically
sorting the dependency graph.
Finally, `Invoke` operations must now specify a provider to use for the
invocation via a provider reference.
### Engine Changes
First-class providers support requires a few changes to the engine:
- The engine must have some way to map from provider references to
provider plugins. It must be possible to add providers from a stack's
checkpoint to this map and to register new/updated providers during
the execution of a plan in response to CRUD operations on provider
resources.
- In order to support updating existing stacks using existing Pulumi
programs that may not explicitly instantiate providers, the engine
must be able to manage the "default" providers for each package
referenced by a checkpoint or Pulumi program. The configuration for
a "default" provider is taken from the stack's configuration data.
The former need is addressed by adding a provider registry type that is
responsible for managing all of the plugins required by a plan. In
addition to loading plugins froma checkpoint and providing the ability
to map from a provider reference to a provider plugin, this type serves
as the provider plugin for providers themselves (i.e. it is the
"provider provider").
The latter need is solved via two relatively self-contained changes to
plan setup and the eval source.
During plan setup, the old checkpoint is scanned for custom resources
that do not have a provider reference in order to compute the set of
packages that require a default provider. Once this set has been
computed, the required default provider definitions are conjured and
prepended to the checkpoint's resource list. Each resource that
requires a default provider is then updated to refer to the default
provider for its package.
While an eval source is running, each custom resource registration,
resource read, and invoke that does not name a provider is trapped
before being returned by the source iterator. If no default provider
for the appropriate package has been registered, the eval source
synthesizes an appropriate registration, waits for it to complete, and
records the registered provider's reference. This reference is injected
into the original request, which is then processed as usual. If a
default provider was already registered, the recorded reference is
used and no new registration occurs.
### SDK Changes
These changes only expose first-class providers from the Node.JS SDK.
- A new abstract class, `ProviderResource`, can be subclassed and used
to instantiate first-class providers.
- A new field in `ResourceOptions`, `provider`, can be used to supply
a particular provider instance to manage a `CustomResource`'s CRUD
operations.
- A new type, `InvokeOptions`, can be used to specify options that
control the behavior of a call to `pulumi.runtime.invoke`. This type
includes a `provider` field that is analogous to
`ResourceOptions.provider`.
2018-08-07 02:50:29 +02:00
|
|
|
}, defaultProviderVersions, dryRun), nil
|
2017-08-23 01:56:15 +02:00
|
|
|
}
|
|
|
|
|
2020-11-18 20:16:30 +01:00
|
|
|
func update(ctx *Context, info *deploymentContext, opts deploymentOptions,
|
|
|
|
preview bool) (ResourceChanges, result.Result) {
|
2020-03-27 21:21:28 +01:00
|
|
|
|
2020-10-14 13:51:53 +02:00
|
|
|
// Refresh and Import do not execute Policy Packs.
|
2020-11-18 20:16:30 +01:00
|
|
|
policies := map[string]string{}
|
2020-10-14 13:51:53 +02:00
|
|
|
if !opts.isRefresh && !opts.isImport {
|
2020-03-27 21:21:28 +01:00
|
|
|
for _, p := range opts.RequiredPolicies {
|
|
|
|
policies[p.Name()] = p.Version()
|
|
|
|
}
|
|
|
|
for _, pack := range opts.LocalPolicyPacks {
|
|
|
|
path := abbreviateFilePath(pack.Path)
|
|
|
|
packName := fmt.Sprintf("%s (%s)", pack.Name, path)
|
|
|
|
policies[packName] = "(local)"
|
|
|
|
}
|
2019-10-09 22:50:28 +02:00
|
|
|
}
|
2019-09-26 03:42:30 +02:00
|
|
|
|
2020-11-18 20:16:30 +01:00
|
|
|
// Create an appropriate set of event listeners.
|
|
|
|
var actions runActions
|
|
|
|
if preview {
|
|
|
|
actions = newPreviewActions(opts)
|
|
|
|
} else {
|
|
|
|
actions = newUpdateActions(ctx, info.Update, opts)
|
|
|
|
}
|
2019-09-26 03:42:30 +02:00
|
|
|
|
2020-11-18 20:16:30 +01:00
|
|
|
deployment, err := newDeployment(ctx, info, opts, preview)
|
|
|
|
if err != nil {
|
|
|
|
return nil, result.FromError(err)
|
2017-08-23 01:56:15 +02:00
|
|
|
}
|
2020-11-18 20:16:30 +01:00
|
|
|
defer contract.IgnoreClose(deployment)
|
|
|
|
|
|
|
|
return deployment.run(ctx, actions, policies, preview)
|
2017-08-23 01:56:15 +02:00
|
|
|
}
|
|
|
|
|
2020-01-30 22:31:41 +01:00
|
|
|
// abbreviateFilePath is a helper function that cleans up and shortens a provided file path.
|
|
|
|
// If the path is long, it will keep the first two and last two directories and then replace the
|
|
|
|
// middle directories with `...`.
|
|
|
|
func abbreviateFilePath(path string) string {
|
|
|
|
path = filepath.Clean(path)
|
|
|
|
if len(path) > 75 {
|
|
|
|
// Do some shortening.
|
|
|
|
separator := "/"
|
|
|
|
dirs := strings.Split(path, separator)
|
|
|
|
|
|
|
|
// If we get no splits, we will try to use the backslashes in support of a Windows path.
|
|
|
|
if len(dirs) == 1 {
|
|
|
|
separator = `\`
|
|
|
|
dirs = strings.Split(path, separator)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(dirs) > 4 {
|
|
|
|
back := dirs[len(dirs)-2:]
|
|
|
|
dirs = append(dirs[:2], "...")
|
|
|
|
dirs = append(dirs, back...)
|
|
|
|
}
|
|
|
|
path = strings.Join(dirs, separator)
|
|
|
|
}
|
|
|
|
return path
|
|
|
|
}
|
|
|
|
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 16:45:23 +02:00
|
|
|
// updateActions pretty-prints the plan application process as it goes.
|
|
|
|
type updateActions struct {
|
2020-11-18 20:16:30 +01:00
|
|
|
Context *Context
|
|
|
|
Steps int
|
|
|
|
Ops map[deploy.StepOp]int
|
|
|
|
Seen map[resource.URN]deploy.Step
|
|
|
|
MapLock sync.Mutex
|
|
|
|
Update UpdateInfo
|
|
|
|
Opts deploymentOptions
|
|
|
|
|
|
|
|
maybeCorrupt bool
|
2017-08-23 01:56:15 +02:00
|
|
|
}
|
|
|
|
|
2020-11-18 20:16:30 +01:00
|
|
|
func newUpdateActions(context *Context, u UpdateInfo, opts deploymentOptions) *updateActions {
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 16:45:23 +02:00
|
|
|
return &updateActions{
|
2018-04-20 03:59:14 +02:00
|
|
|
Context: context,
|
|
|
|
Ops: make(map[deploy.StepOp]int),
|
|
|
|
Seen: make(map[resource.URN]deploy.Step),
|
|
|
|
Update: u,
|
|
|
|
Opts: opts,
|
2017-11-17 03:21:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 16:45:23 +02:00
|
|
|
func (acts *updateActions) OnResourceStepPre(step deploy.Step) (interface{}, error) {
|
2018-02-03 01:02:50 +01:00
|
|
|
// Ensure we've marked this step as observed.
|
2018-08-07 01:46:17 +02:00
|
|
|
acts.MapLock.Lock()
|
2020-07-09 16:19:12 +02:00
|
|
|
acts.Seen[step.URN()] = step
|
2018-08-07 01:46:17 +02:00
|
|
|
acts.MapLock.Unlock()
|
2018-02-03 01:02:50 +01:00
|
|
|
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
// Skip reporting if necessary.
|
|
|
|
if shouldReportStep(step, acts.Opts) {
|
2018-08-09 23:45:39 +02:00
|
|
|
acts.Opts.Events.resourcePreEvent(step, false /*planning*/, acts.Opts.Debug)
|
2018-08-07 01:46:17 +02:00
|
|
|
}
|
|
|
|
|
2017-10-21 18:31:01 +02:00
|
|
|
// Inform the snapshot service that we are about to perform a step.
|
2018-04-26 02:20:08 +02:00
|
|
|
return acts.Context.SnapshotManager.BeginMutation(step)
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 16:42:14 +01:00
|
|
|
}
|
2017-08-23 01:56:15 +02:00
|
|
|
|
2018-12-19 22:19:56 +01:00
|
|
|
func (acts *updateActions) OnResourceStepPost(
|
|
|
|
ctx interface{}, step deploy.Step,
|
|
|
|
status resource.Status, err error) error {
|
|
|
|
|
2018-08-07 01:46:17 +02:00
|
|
|
acts.MapLock.Lock()
|
2018-02-03 01:02:50 +01:00
|
|
|
assertSeen(acts.Seen, step)
|
2018-08-07 01:46:17 +02:00
|
|
|
acts.MapLock.Unlock()
|
2018-02-03 01:02:50 +01:00
|
|
|
|
2018-04-20 03:59:14 +02:00
|
|
|
// If we've already been terminated, exit without writing the checkpoint. We explicitly want to leave the
|
|
|
|
// checkpoint in an inconsistent state in this event.
|
|
|
|
if acts.Context.Cancel.TerminateErr() != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
reportStep := shouldReportStep(step, acts.Opts)
|
2018-08-09 23:45:39 +02:00
|
|
|
|
2017-10-03 19:27:59 +02:00
|
|
|
// Report the result of the step.
|
2017-08-23 01:56:15 +02:00
|
|
|
if err != nil {
|
2018-02-04 10:18:06 +01:00
|
|
|
if status == resource.StatusUnknown {
|
2020-11-18 20:16:30 +01:00
|
|
|
acts.maybeCorrupt = true
|
2017-08-23 01:56:15 +02:00
|
|
|
}
|
2018-02-04 10:18:06 +01:00
|
|
|
|
2018-08-09 23:45:39 +02:00
|
|
|
errorURN := resource.URN("")
|
|
|
|
if reportStep {
|
2020-07-09 16:19:12 +02:00
|
|
|
errorURN = step.URN()
|
2018-08-09 23:45:39 +02:00
|
|
|
}
|
|
|
|
|
2018-02-04 10:18:06 +01:00
|
|
|
// Issue a true, bonafide error.
|
2019-07-30 03:51:11 +02:00
|
|
|
acts.Opts.Diag.Errorf(diag.GetResourceOperationFailedError(errorURN), err)
|
2018-08-09 23:45:39 +02:00
|
|
|
if reportStep {
|
|
|
|
acts.Opts.Events.resourceOperationFailedEvent(step, status, acts.Steps, acts.Opts.Debug)
|
|
|
|
}
|
|
|
|
} else if reportStep {
|
2018-08-23 02:52:46 +02:00
|
|
|
op, record := step.Op(), step.Logical()
|
|
|
|
if acts.Opts.isRefresh && op == deploy.OpRefresh {
|
|
|
|
// Refreshes are handled specially.
|
|
|
|
op, record = step.(*deploy.RefreshStep).ResultOp(), true
|
|
|
|
}
|
|
|
|
|
2018-12-19 22:19:56 +01:00
|
|
|
if step.Op() == deploy.OpRead {
|
|
|
|
record = ShouldRecordReadStep(step)
|
|
|
|
}
|
|
|
|
|
2018-08-23 02:52:46 +02:00
|
|
|
if record {
|
2017-11-29 20:27:32 +01:00
|
|
|
// Increment the counters.
|
2018-08-07 01:46:17 +02:00
|
|
|
acts.MapLock.Lock()
|
2017-11-29 20:27:32 +01:00
|
|
|
acts.Steps++
|
2018-08-23 02:52:46 +02:00
|
|
|
acts.Ops[op]++
|
2018-08-07 01:46:17 +02:00
|
|
|
acts.MapLock.Unlock()
|
2017-11-29 20:27:32 +01:00
|
|
|
}
|
|
|
|
|
2018-04-20 20:52:33 +02:00
|
|
|
// Also show outputs here for custom resources, since there might be some from the initial registration. We do
|
|
|
|
// not show outputs for component resources at this point: any that exist must be from a previous execution of
|
|
|
|
// the Pulumi program, as component resources only report outputs via calls to RegisterResourceOutputs.
|
2018-08-23 02:52:46 +02:00
|
|
|
if step.Res().Custom || acts.Opts.Refresh && step.Op() == deploy.OpRefresh {
|
|
|
|
acts.Opts.Events.resourceOutputsEvent(op, step, false /*planning*/, acts.Opts.Debug)
|
2018-04-20 20:44:28 +02:00
|
|
|
}
|
2017-08-23 01:56:15 +02:00
|
|
|
}
|
2017-10-02 23:27:50 +02:00
|
|
|
|
2018-10-09 20:19:31 +02:00
|
|
|
// See pulumi/pulumi#2011 for details. Terraform always returns the existing state with the diff applied to it in
|
|
|
|
// the event of an update failure. It's appropriate that we save this new state in the output of the resource, but
|
|
|
|
// it is not appropriate to save the inputs, because the resource that exists was not created or updated
|
|
|
|
// successfully with those inputs.
|
|
|
|
//
|
|
|
|
// If we were doing an update and got a `StatusPartialFailure`, the resource that ultimately gets persisted in the
|
|
|
|
// snapshot should be old inputs and new outputs. We accomplish that here by clobbering the new resource's inputs
|
|
|
|
// with the old inputs.
|
|
|
|
//
|
|
|
|
// This is a little kludgy given that these resources are global state. However, given the way that we have
|
|
|
|
// implemented the snapshot manager and engine today, it's the easiest way to accomplish what we are trying to do.
|
|
|
|
if status == resource.StatusPartialFailure && step.Op() == deploy.OpUpdate {
|
|
|
|
logging.V(7).Infof(
|
|
|
|
"OnResourceStepPost(%s): Step is partially-failed update, saving old inputs instead of new inputs",
|
2020-07-09 16:19:12 +02:00
|
|
|
step.URN())
|
2018-10-09 20:19:31 +02:00
|
|
|
new := step.New()
|
|
|
|
old := step.Old()
|
|
|
|
contract.Assert(new != nil)
|
|
|
|
contract.Assert(old != nil)
|
|
|
|
new.Inputs = make(resource.PropertyMap)
|
|
|
|
for key, value := range old.Inputs {
|
|
|
|
new.Inputs[key] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Implement resource protection (#751)
This change implements resource protection, as per pulumi/pulumi#689.
The overall idea is that a resource can be marked as "protect: true",
which will prevent deletion of that resource for any reason whatsoever
(straight deletion, replacement, etc). This is expressed in the
program. To "unprotect" a resource, one must perform an update setting
"protect: false", and then afterwards, they can delete the resource.
For example:
let res = new MyResource("precious", { .. }, { protect: true });
Afterwards, the resource will display in the CLI with a lock icon, and
any attempts to remove it will fail in the usual ways (in planning or,
worst case, during an actual update).
This was done by adding a new ResourceOptions bag parameter to the
base Resource types. This is unfortunately a breaking change, but now
is the right time to take this one. We had been adding new settings
one by one -- like parent and dependsOn -- and this new approach will
set us up to add any number of additional settings down the road,
without needing to worry about breaking anything ever again.
This is related to protected stacks, as described in
pulumi/pulumi-service#399. Most likely this will serve as a foundational
building block that enables the coarser grained policy management.
2017-12-20 23:31:07 +01:00
|
|
|
// Write out the current snapshot. Note that even if a failure has occurred, we should still have a
|
2018-04-26 02:20:08 +02:00
|
|
|
// safe checkpoint. Note that any error that occurs when writing the checkpoint trumps the error
|
|
|
|
// reported above.
|
2018-05-02 19:36:55 +02:00
|
|
|
return ctx.(SnapshotMutation).End(step, err == nil || status == resource.StatusPartialFailure)
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 16:42:14 +01:00
|
|
|
}
|
2017-10-03 19:27:59 +02:00
|
|
|
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 16:45:23 +02:00
|
|
|
func (acts *updateActions) OnResourceOutputs(step deploy.Step) error {
|
2018-08-07 01:46:17 +02:00
|
|
|
acts.MapLock.Lock()
|
2018-02-03 01:02:50 +01:00
|
|
|
assertSeen(acts.Seen, step)
|
2018-08-07 01:46:17 +02:00
|
|
|
acts.MapLock.Unlock()
|
2018-02-03 01:02:50 +01:00
|
|
|
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
// Skip reporting if necessary.
|
|
|
|
if shouldReportStep(step, acts.Opts) {
|
2018-08-23 02:52:46 +02:00
|
|
|
acts.Opts.Events.resourceOutputsEvent(step.Op(), step, false /*planning*/, acts.Opts.Debug)
|
2018-08-09 23:45:39 +02:00
|
|
|
}
|
2018-04-17 08:04:56 +02:00
|
|
|
|
|
|
|
// There's a chance there are new outputs that weren't written out last time.
|
|
|
|
// We need to perform another snapshot write to ensure they get written out.
|
2018-04-26 02:20:08 +02:00
|
|
|
return acts.Context.SnapshotManager.RegisterResourceOutputs(step)
|
2017-08-23 01:56:15 +02:00
|
|
|
}
|
2019-06-11 00:20:44 +02:00
|
|
|
|
|
|
|
func (acts *updateActions) OnPolicyViolation(urn resource.URN, d plugin.AnalyzeDiagnostic) {
|
|
|
|
acts.Opts.Events.policyViolationEvent(urn, d)
|
|
|
|
}
|
2020-11-18 20:16:30 +01:00
|
|
|
|
|
|
|
func (acts *updateActions) MaybeCorrupt() bool {
|
|
|
|
return acts.maybeCorrupt
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *updateActions) Changes() ResourceChanges {
|
|
|
|
return ResourceChanges(acts.Ops)
|
|
|
|
}
|
|
|
|
|
|
|
|
type previewActions struct {
|
|
|
|
Ops map[deploy.StepOp]int
|
|
|
|
Opts deploymentOptions
|
|
|
|
Seen map[resource.URN]deploy.Step
|
|
|
|
MapLock sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func shouldReportStep(step deploy.Step, opts deploymentOptions) bool {
|
|
|
|
return step.Op() != deploy.OpRemovePendingReplace &&
|
|
|
|
(opts.reportDefaultProviderSteps || !isDefaultProviderStep(step))
|
|
|
|
}
|
|
|
|
|
|
|
|
func ShouldRecordReadStep(step deploy.Step) bool {
|
|
|
|
contract.Assertf(step.Op() == deploy.OpRead, "Only call this on a Read step")
|
|
|
|
|
|
|
|
// If reading a resource didn't result in any change to the resource, we then want to
|
|
|
|
// record this as a 'same'. That way, when things haven't actually changed, but a user
|
|
|
|
// app did any 'reads' these don't show up in the resource summary at the end.
|
|
|
|
return step.Old() != nil &&
|
|
|
|
step.New() != nil &&
|
|
|
|
step.Old().Outputs != nil &&
|
|
|
|
step.New().Outputs != nil &&
|
|
|
|
step.Old().Outputs.Diff(step.New().Outputs) != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func newPreviewActions(opts deploymentOptions) *previewActions {
|
|
|
|
return &previewActions{
|
|
|
|
Ops: make(map[deploy.StepOp]int),
|
|
|
|
Opts: opts,
|
|
|
|
Seen: make(map[resource.URN]deploy.Step),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *previewActions) OnResourceStepPre(step deploy.Step) (interface{}, error) {
|
|
|
|
acts.MapLock.Lock()
|
|
|
|
acts.Seen[step.URN()] = step
|
|
|
|
acts.MapLock.Unlock()
|
|
|
|
|
|
|
|
// Skip reporting if necessary.
|
|
|
|
if !shouldReportStep(step, acts.Opts) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
acts.Opts.Events.resourcePreEvent(step, true /*planning*/, acts.Opts.Debug)
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *previewActions) OnResourceStepPost(ctx interface{},
|
|
|
|
step deploy.Step, status resource.Status, err error) error {
|
|
|
|
acts.MapLock.Lock()
|
|
|
|
assertSeen(acts.Seen, step)
|
|
|
|
acts.MapLock.Unlock()
|
|
|
|
|
|
|
|
reportStep := shouldReportStep(step, acts.Opts)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
// We always want to report a failure. If we intend to elide this step overall, though, we report it as a
|
|
|
|
// global message.
|
|
|
|
reportedURN := resource.URN("")
|
|
|
|
if reportStep {
|
|
|
|
reportedURN = step.URN()
|
|
|
|
}
|
|
|
|
|
|
|
|
acts.Opts.Diag.Errorf(diag.GetPreviewFailedError(reportedURN), err)
|
|
|
|
} else if reportStep {
|
|
|
|
op, record := step.Op(), step.Logical()
|
|
|
|
if acts.Opts.isRefresh && op == deploy.OpRefresh {
|
|
|
|
// Refreshes are handled specially.
|
|
|
|
op, record = step.(*deploy.RefreshStep).ResultOp(), true
|
|
|
|
}
|
|
|
|
|
|
|
|
if step.Op() == deploy.OpRead {
|
|
|
|
record = ShouldRecordReadStep(step)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Track the operation if shown and/or if it is a logically meaningful operation.
|
|
|
|
if record {
|
|
|
|
acts.MapLock.Lock()
|
|
|
|
acts.Ops[op]++
|
|
|
|
acts.MapLock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
acts.Opts.Events.resourceOutputsEvent(op, step, true /*planning*/, acts.Opts.Debug)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *previewActions) OnResourceOutputs(step deploy.Step) error {
|
|
|
|
acts.MapLock.Lock()
|
|
|
|
assertSeen(acts.Seen, step)
|
|
|
|
acts.MapLock.Unlock()
|
|
|
|
|
|
|
|
// Skip reporting if necessary.
|
|
|
|
if !shouldReportStep(step, acts.Opts) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Print the resource outputs separately.
|
|
|
|
acts.Opts.Events.resourceOutputsEvent(step.Op(), step, true /*planning*/, acts.Opts.Debug)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *previewActions) OnPolicyViolation(urn resource.URN, d plugin.AnalyzeDiagnostic) {
|
|
|
|
acts.Opts.Events.policyViolationEvent(urn, d)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *previewActions) MaybeCorrupt() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *previewActions) Changes() ResourceChanges {
|
|
|
|
return ResourceChanges(acts.Ops)
|
|
|
|
}
|