2018-05-22 21:43:36 +02:00
|
|
|
// Copyright 2016-2018, Pulumi Corporation.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2017-02-22 03:31:43 +01:00
|
|
|
|
2017-10-16 21:04:35 +02:00
|
|
|
package stack
|
2017-02-22 03:31:43 +01:00
|
|
|
|
|
|
|
import (
|
2018-05-25 22:29:59 +02:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2017-02-22 23:32:03 +01:00
|
|
|
"reflect"
|
|
|
|
|
2018-08-03 23:06:00 +02:00
|
|
|
"github.com/blang/semver"
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
"github.com/pkg/errors"
|
2020-03-19 01:27:02 +01:00
|
|
|
"github.com/pulumi/pulumi/pkg/resource/deploy"
|
|
|
|
"github.com/pulumi/pulumi/pkg/secrets"
|
2020-03-18 23:00:30 +01:00
|
|
|
"github.com/pulumi/pulumi/sdk/go/common/apitype"
|
|
|
|
"github.com/pulumi/pulumi/sdk/go/common/apitype/migrate"
|
2020-03-18 21:36:19 +01:00
|
|
|
"github.com/pulumi/pulumi/sdk/go/common/resource"
|
2020-03-18 23:03:37 +01:00
|
|
|
"github.com/pulumi/pulumi/sdk/go/common/resource/config"
|
2020-03-18 22:40:07 +01:00
|
|
|
"github.com/pulumi/pulumi/sdk/go/common/util/contract"
|
2020-03-18 22:35:53 +01:00
|
|
|
"github.com/pulumi/pulumi/sdk/go/common/workspace"
|
2017-02-22 03:31:43 +01:00
|
|
|
)
|
|
|
|
|
2018-05-25 22:29:59 +02:00
|
|
|
const (
|
|
|
|
// DeploymentSchemaVersionOldestSupported is the oldest deployment schema that we
|
|
|
|
// still support, i.e. we can produce a `deploy.Snapshot` from. This will generally
|
|
|
|
// need to be at least one less than the current schema version so that old deployments can
|
|
|
|
// be migrated to the current schema.
|
|
|
|
DeploymentSchemaVersionOldestSupported = 1
|
2019-11-21 23:58:30 +01:00
|
|
|
|
|
|
|
// computedValue is a magic number we emit for a value of a resource.Property value
|
|
|
|
// whenever we need to serialize a resource.Computed. (Since the real/actual value
|
|
|
|
// is not known.) This allows us to persist engine events and resource states that
|
|
|
|
// indicate a value will changed... but is unknown what it will change to.
|
|
|
|
computedValuePlaceholder = "04da6b54-80e4-46f7-96ec-b56ff0331ba9"
|
2018-05-25 22:29:59 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// ErrDeploymentSchemaVersionTooOld is returned from `DeserializeDeployment` if the
|
|
|
|
// untyped deployment being deserialized is too old to understand.
|
|
|
|
ErrDeploymentSchemaVersionTooOld = fmt.Errorf("this stack's deployment is too old")
|
|
|
|
|
|
|
|
// ErrDeploymentSchemaVersionTooNew is returned from `DeserializeDeployment` if the
|
|
|
|
// untyped deployment being deserialized is too new to understand.
|
|
|
|
ErrDeploymentSchemaVersionTooNew = fmt.Errorf("this stack's deployment version is too new")
|
|
|
|
)
|
|
|
|
|
Make more progress on the new deployment model
This change restructures a lot more pertaining to deployments, snapshots,
environments, and the like.
The most notable change is that the notion of a deploy.Source is introduced,
which splits the responsibility between the deploy.Plan -- which simply
understands how to compute and carry out deployment plans -- and the idea
of something that can produce new objects on-demand during deployment.
The primary such implementation is evalSource, which encapsulates an
interpreter and takes a package, args, and config map, and proceeds to run
the interpreter in a distinct goroutine. It synchronizes as needed to
poke and prod the interpreter along its path to create new resource objects.
There are two other sources, however. First, a nullSource, which simply
refuses to create new objects. This can be handy when writing isolated
tests but is also used to simulate the "empty" environment as necessary to
do a complete teardown of the target environment. Second, a fixedSource,
which takes a pre-computed array of objects, and hands those, in order, to
the planning engine; this is mostly useful as a testing technique.
Boatloads of code is now changed and updated in the various CLI commands.
This further chugs along towards pulumi/lumi#90. The end is in sight.
2017-06-10 20:50:47 +02:00
|
|
|
// SerializeDeployment serializes an entire snapshot as a deploy record.
|
2019-04-17 22:48:38 +02:00
|
|
|
func SerializeDeployment(snap *deploy.Snapshot, sm secrets.Manager) (*apitype.DeploymentV3, error) {
|
2018-05-25 22:29:59 +02:00
|
|
|
contract.Require(snap != nil, "snap")
|
|
|
|
|
2017-12-01 22:50:32 +01:00
|
|
|
// Capture the version information into a manifest.
|
2018-07-20 22:31:41 +02:00
|
|
|
manifest := apitype.ManifestV1{
|
2017-12-01 22:50:32 +01:00
|
|
|
Time: snap.Manifest.Time,
|
|
|
|
Magic: snap.Manifest.Magic,
|
|
|
|
Version: snap.Manifest.Version,
|
|
|
|
}
|
|
|
|
for _, plug := range snap.Manifest.Plugins {
|
2018-02-06 18:57:32 +01:00
|
|
|
var version string
|
|
|
|
if plug.Version != nil {
|
|
|
|
version = plug.Version.String()
|
|
|
|
}
|
2018-07-20 22:31:41 +02:00
|
|
|
manifest.Plugins = append(manifest.Plugins, apitype.PluginInfoV1{
|
2017-12-01 22:50:32 +01:00
|
|
|
Name: plug.Name,
|
2018-02-21 19:32:31 +01:00
|
|
|
Path: plug.Path,
|
2018-02-06 18:57:32 +01:00
|
|
|
Type: plug.Kind,
|
|
|
|
Version: version,
|
2017-12-01 22:50:32 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-04-24 21:13:00 +02:00
|
|
|
// If a specific secrets manager was not provided, use the one in the snapshot, if present.
|
|
|
|
if sm == nil {
|
|
|
|
sm = snap.SecretsManager
|
|
|
|
}
|
|
|
|
|
2019-04-19 21:13:30 +02:00
|
|
|
var enc config.Encrypter
|
|
|
|
if sm != nil {
|
|
|
|
e, err := sm.Encrypter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "getting encrypter for deployment")
|
|
|
|
}
|
|
|
|
enc = e
|
|
|
|
} else {
|
|
|
|
enc = config.NewPanicCrypter()
|
2019-04-17 22:48:38 +02:00
|
|
|
}
|
|
|
|
|
2017-02-22 03:31:43 +01:00
|
|
|
// Serialize all vertices and only include a vertex section if non-empty.
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
var resources []apitype.ResourceV3
|
2017-10-05 23:08:35 +02:00
|
|
|
for _, res := range snap.Resources {
|
2019-04-17 22:48:38 +02:00
|
|
|
sres, err := SerializeResource(res, enc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "serializing resources")
|
|
|
|
}
|
|
|
|
resources = append(resources, sres)
|
2017-02-22 03:31:43 +01:00
|
|
|
}
|
|
|
|
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
var operations []apitype.OperationV2
|
Add a list of in-flight operations to the deployment (#1759)
* Add a list of in-flight operations to the deployment
This commit augments 'DeploymentV2' with a list of operations that are
currently in flight. This information is used by the engine to keep
track of whether or not a particular deployment is in a valid state.
The SnapshotManager is responsible for inserting and removing operations
from the in-flight operation list. When the engine registers an intent
to perform an operation, SnapshotManager inserts an Operation into this
list and saves it to the snapshot. When an operation completes, the
SnapshotManager removes it from the snapshot. From this, the engine can
infer that if it ever sees a deployment with pending operations, the
Pulumi CLI must have crashed or otherwise abnormally terminated before
seeing whether or not an operation completed successfully.
To remedy this state, this commit also adds code to 'pulumi stack
import' that clears all pending operations from a deployment, as well as
code to plan generation that will reject any deployments that have
pending operations present.
At the CLI level, if we see that we are in a state where pending
operations were in-flight when the engine died, we'll issue a
human-friendly error message that indicates which resources are in a bad
state and how to recover their stack.
* CR: Multi-line string literals, renaming in-flight -> pending
* CR: Add enum to apitype for operation type, also name status -> type for clarity
* Fix the yaml type
* Fix missed renames
* Add implementation for lifecycle_test.go
* Rebase against master
2018-08-11 06:39:59 +02:00
|
|
|
for _, op := range snap.PendingOperations {
|
2019-04-17 22:48:38 +02:00
|
|
|
sop, err := SerializeOperation(op, enc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
operations = append(operations, sop)
|
|
|
|
}
|
|
|
|
|
2019-05-21 20:50:02 +02:00
|
|
|
var secretsProvider *apitype.SecretsProvidersV1
|
2019-04-19 21:13:30 +02:00
|
|
|
if sm != nil {
|
2019-05-21 20:50:02 +02:00
|
|
|
secretsProvider = &apitype.SecretsProvidersV1{
|
|
|
|
Type: sm.Type(),
|
|
|
|
}
|
2019-04-19 21:13:30 +02:00
|
|
|
if state := sm.State(); state != nil {
|
|
|
|
rm, err := json.Marshal(state)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
secretsProvider.State = rm
|
2019-04-17 22:48:38 +02:00
|
|
|
}
|
Add a list of in-flight operations to the deployment (#1759)
* Add a list of in-flight operations to the deployment
This commit augments 'DeploymentV2' with a list of operations that are
currently in flight. This information is used by the engine to keep
track of whether or not a particular deployment is in a valid state.
The SnapshotManager is responsible for inserting and removing operations
from the in-flight operation list. When the engine registers an intent
to perform an operation, SnapshotManager inserts an Operation into this
list and saves it to the snapshot. When an operation completes, the
SnapshotManager removes it from the snapshot. From this, the engine can
infer that if it ever sees a deployment with pending operations, the
Pulumi CLI must have crashed or otherwise abnormally terminated before
seeing whether or not an operation completed successfully.
To remedy this state, this commit also adds code to 'pulumi stack
import' that clears all pending operations from a deployment, as well as
code to plan generation that will reject any deployments that have
pending operations present.
At the CLI level, if we see that we are in a state where pending
operations were in-flight when the engine died, we'll issue a
human-friendly error message that indicates which resources are in a bad
state and how to recover their stack.
* CR: Multi-line string literals, renaming in-flight -> pending
* CR: Add enum to apitype for operation type, also name status -> type for clarity
* Fix the yaml type
* Fix missed renames
* Add implementation for lifecycle_test.go
* Rebase against master
2018-08-11 06:39:59 +02:00
|
|
|
}
|
|
|
|
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
return &apitype.DeploymentV3{
|
Add a list of in-flight operations to the deployment (#1759)
* Add a list of in-flight operations to the deployment
This commit augments 'DeploymentV2' with a list of operations that are
currently in flight. This information is used by the engine to keep
track of whether or not a particular deployment is in a valid state.
The SnapshotManager is responsible for inserting and removing operations
from the in-flight operation list. When the engine registers an intent
to perform an operation, SnapshotManager inserts an Operation into this
list and saves it to the snapshot. When an operation completes, the
SnapshotManager removes it from the snapshot. From this, the engine can
infer that if it ever sees a deployment with pending operations, the
Pulumi CLI must have crashed or otherwise abnormally terminated before
seeing whether or not an operation completed successfully.
To remedy this state, this commit also adds code to 'pulumi stack
import' that clears all pending operations from a deployment, as well as
code to plan generation that will reject any deployments that have
pending operations present.
At the CLI level, if we see that we are in a state where pending
operations were in-flight when the engine died, we'll issue a
human-friendly error message that indicates which resources are in a bad
state and how to recover their stack.
* CR: Multi-line string literals, renaming in-flight -> pending
* CR: Add enum to apitype for operation type, also name status -> type for clarity
* Fix the yaml type
* Fix missed renames
* Add implementation for lifecycle_test.go
* Rebase against master
2018-08-11 06:39:59 +02:00
|
|
|
Manifest: manifest,
|
|
|
|
Resources: resources,
|
2019-05-21 20:50:02 +02:00
|
|
|
SecretsProviders: secretsProvider,
|
Add a list of in-flight operations to the deployment (#1759)
* Add a list of in-flight operations to the deployment
This commit augments 'DeploymentV2' with a list of operations that are
currently in flight. This information is used by the engine to keep
track of whether or not a particular deployment is in a valid state.
The SnapshotManager is responsible for inserting and removing operations
from the in-flight operation list. When the engine registers an intent
to perform an operation, SnapshotManager inserts an Operation into this
list and saves it to the snapshot. When an operation completes, the
SnapshotManager removes it from the snapshot. From this, the engine can
infer that if it ever sees a deployment with pending operations, the
Pulumi CLI must have crashed or otherwise abnormally terminated before
seeing whether or not an operation completed successfully.
To remedy this state, this commit also adds code to 'pulumi stack
import' that clears all pending operations from a deployment, as well as
code to plan generation that will reject any deployments that have
pending operations present.
At the CLI level, if we see that we are in a state where pending
operations were in-flight when the engine died, we'll issue a
human-friendly error message that indicates which resources are in a bad
state and how to recover their stack.
* CR: Multi-line string literals, renaming in-flight -> pending
* CR: Add enum to apitype for operation type, also name status -> type for clarity
* Fix the yaml type
* Fix missed renames
* Add implementation for lifecycle_test.go
* Rebase against master
2018-08-11 06:39:59 +02:00
|
|
|
PendingOperations: operations,
|
2019-04-17 22:48:38 +02:00
|
|
|
}, nil
|
2017-02-22 03:31:43 +01:00
|
|
|
}
|
|
|
|
|
2018-08-03 23:06:00 +02:00
|
|
|
// DeserializeUntypedDeployment deserializes an untyped deployment and produces a `deploy.Snapshot`
|
2018-05-25 22:29:59 +02:00
|
|
|
// from it. DeserializeDeployment will return an error if the untyped deployment's version is
|
|
|
|
// not within the range `DeploymentSchemaVersionCurrent` and `DeploymentSchemaVersionOldestSupported`.
|
2019-08-01 19:33:52 +02:00
|
|
|
func DeserializeUntypedDeployment(
|
|
|
|
deployment *apitype.UntypedDeployment, secretsProv SecretsProvider) (*deploy.Snapshot, error) {
|
|
|
|
|
2018-05-25 22:29:59 +02:00
|
|
|
contract.Require(deployment != nil, "deployment")
|
|
|
|
switch {
|
|
|
|
case deployment.Version > apitype.DeploymentSchemaVersionCurrent:
|
|
|
|
return nil, ErrDeploymentSchemaVersionTooNew
|
|
|
|
case deployment.Version < DeploymentSchemaVersionOldestSupported:
|
|
|
|
return nil, ErrDeploymentSchemaVersionTooOld
|
|
|
|
}
|
|
|
|
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
var v3deployment apitype.DeploymentV3
|
2018-07-20 22:31:41 +02:00
|
|
|
switch deployment.Version {
|
|
|
|
case 1:
|
2018-08-03 23:06:00 +02:00
|
|
|
var v1deployment apitype.DeploymentV1
|
|
|
|
if err := json.Unmarshal([]byte(deployment.Deployment), &v1deployment); err != nil {
|
2018-07-20 22:31:41 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
v2deployment := migrate.UpToDeploymentV2(v1deployment)
|
|
|
|
v3deployment = migrate.UpToDeploymentV3(v2deployment)
|
2018-08-03 23:06:00 +02:00
|
|
|
case 2:
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
var v2deployment apitype.DeploymentV2
|
2018-08-03 23:06:00 +02:00
|
|
|
if err := json.Unmarshal([]byte(deployment.Deployment), &v2deployment); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
v3deployment = migrate.UpToDeploymentV3(v2deployment)
|
|
|
|
case 3:
|
|
|
|
if err := json.Unmarshal([]byte(deployment.Deployment), &v3deployment); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-08-03 23:06:00 +02:00
|
|
|
default:
|
|
|
|
contract.Failf("unrecognized version: %d", deployment.Version)
|
|
|
|
}
|
|
|
|
|
2019-08-01 19:33:52 +02:00
|
|
|
return DeserializeDeploymentV3(v3deployment, secretsProv)
|
2018-08-03 23:06:00 +02:00
|
|
|
}
|
|
|
|
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
// DeserializeDeploymentV3 deserializes a typed DeploymentV3 into a `deploy.Snapshot`.
|
2019-08-01 19:33:52 +02:00
|
|
|
func DeserializeDeploymentV3(deployment apitype.DeploymentV3, secretsProv SecretsProvider) (*deploy.Snapshot, error) {
|
2018-08-03 23:06:00 +02:00
|
|
|
// Unpack the versions.
|
|
|
|
manifest := deploy.Manifest{
|
|
|
|
Time: deployment.Manifest.Time,
|
|
|
|
Magic: deployment.Manifest.Magic,
|
|
|
|
Version: deployment.Manifest.Version,
|
|
|
|
}
|
|
|
|
for _, plug := range deployment.Manifest.Plugins {
|
|
|
|
var version *semver.Version
|
|
|
|
if v := plug.Version; v != "" {
|
|
|
|
sv, err := semver.ParseTolerant(v)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
version = &sv
|
|
|
|
}
|
|
|
|
manifest.Plugins = append(manifest.Plugins, workspace.PluginInfo{
|
|
|
|
Name: plug.Name,
|
|
|
|
Kind: plug.Type,
|
|
|
|
Version: version,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-04-24 21:13:00 +02:00
|
|
|
var secretsManager secrets.Manager
|
2019-05-21 20:50:02 +02:00
|
|
|
if deployment.SecretsProviders != nil && deployment.SecretsProviders.Type != "" {
|
2019-08-01 19:33:52 +02:00
|
|
|
if secretsProv == nil {
|
|
|
|
return nil, errors.New("deployment uses a SecretsProvider but no SecretsProvider was provided")
|
2019-04-24 21:13:00 +02:00
|
|
|
}
|
2019-04-26 21:00:35 +02:00
|
|
|
|
2019-08-01 19:33:52 +02:00
|
|
|
sm, err := secretsProv.OfType(deployment.SecretsProviders.Type, deployment.SecretsProviders.State)
|
2019-04-26 21:00:35 +02:00
|
|
|
if err != nil {
|
2019-08-01 19:33:52 +02:00
|
|
|
return nil, err
|
2019-04-26 21:00:35 +02:00
|
|
|
}
|
|
|
|
secretsManager = sm
|
2019-04-24 21:13:00 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 22:48:38 +02:00
|
|
|
var dec config.Decrypter
|
2019-04-24 21:13:00 +02:00
|
|
|
if secretsManager == nil {
|
2019-04-17 22:48:38 +02:00
|
|
|
dec = config.NewPanicCrypter()
|
2019-04-24 21:13:00 +02:00
|
|
|
} else {
|
|
|
|
d, err := secretsManager.Decrypter()
|
2019-04-17 22:48:38 +02:00
|
|
|
if err != nil {
|
2019-04-24 21:13:00 +02:00
|
|
|
return nil, err
|
2019-04-17 22:48:38 +02:00
|
|
|
}
|
2019-04-24 21:13:00 +02:00
|
|
|
dec = d
|
2019-04-17 22:48:38 +02:00
|
|
|
}
|
|
|
|
|
2018-08-03 23:06:00 +02:00
|
|
|
// For every serialized resource vertex, create a ResourceDeployment out of it.
|
|
|
|
var resources []*resource.State
|
|
|
|
for _, res := range deployment.Resources {
|
2019-04-17 22:48:38 +02:00
|
|
|
desres, err := DeserializeResource(res, dec)
|
2018-08-03 23:06:00 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
resources = append(resources, desres)
|
2018-05-25 22:29:59 +02:00
|
|
|
}
|
|
|
|
|
Add a list of in-flight operations to the deployment (#1759)
* Add a list of in-flight operations to the deployment
This commit augments 'DeploymentV2' with a list of operations that are
currently in flight. This information is used by the engine to keep
track of whether or not a particular deployment is in a valid state.
The SnapshotManager is responsible for inserting and removing operations
from the in-flight operation list. When the engine registers an intent
to perform an operation, SnapshotManager inserts an Operation into this
list and saves it to the snapshot. When an operation completes, the
SnapshotManager removes it from the snapshot. From this, the engine can
infer that if it ever sees a deployment with pending operations, the
Pulumi CLI must have crashed or otherwise abnormally terminated before
seeing whether or not an operation completed successfully.
To remedy this state, this commit also adds code to 'pulumi stack
import' that clears all pending operations from a deployment, as well as
code to plan generation that will reject any deployments that have
pending operations present.
At the CLI level, if we see that we are in a state where pending
operations were in-flight when the engine died, we'll issue a
human-friendly error message that indicates which resources are in a bad
state and how to recover their stack.
* CR: Multi-line string literals, renaming in-flight -> pending
* CR: Add enum to apitype for operation type, also name status -> type for clarity
* Fix the yaml type
* Fix missed renames
* Add implementation for lifecycle_test.go
* Rebase against master
2018-08-11 06:39:59 +02:00
|
|
|
var ops []resource.Operation
|
|
|
|
for _, op := range deployment.PendingOperations {
|
2019-04-17 22:48:38 +02:00
|
|
|
desop, err := DeserializeOperation(op, dec)
|
Add a list of in-flight operations to the deployment (#1759)
* Add a list of in-flight operations to the deployment
This commit augments 'DeploymentV2' with a list of operations that are
currently in flight. This information is used by the engine to keep
track of whether or not a particular deployment is in a valid state.
The SnapshotManager is responsible for inserting and removing operations
from the in-flight operation list. When the engine registers an intent
to perform an operation, SnapshotManager inserts an Operation into this
list and saves it to the snapshot. When an operation completes, the
SnapshotManager removes it from the snapshot. From this, the engine can
infer that if it ever sees a deployment with pending operations, the
Pulumi CLI must have crashed or otherwise abnormally terminated before
seeing whether or not an operation completed successfully.
To remedy this state, this commit also adds code to 'pulumi stack
import' that clears all pending operations from a deployment, as well as
code to plan generation that will reject any deployments that have
pending operations present.
At the CLI level, if we see that we are in a state where pending
operations were in-flight when the engine died, we'll issue a
human-friendly error message that indicates which resources are in a bad
state and how to recover their stack.
* CR: Multi-line string literals, renaming in-flight -> pending
* CR: Add enum to apitype for operation type, also name status -> type for clarity
* Fix the yaml type
* Fix missed renames
* Add implementation for lifecycle_test.go
* Rebase against master
2018-08-11 06:39:59 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
ops = append(ops, desop)
|
|
|
|
}
|
|
|
|
|
2019-04-24 21:13:00 +02:00
|
|
|
return deploy.NewSnapshot(manifest, secretsManager, resources, ops), nil
|
2018-05-25 22:29:59 +02:00
|
|
|
}
|
|
|
|
|
2017-10-05 23:08:35 +02:00
|
|
|
// SerializeResource turns a resource into a structure suitable for serialization.
|
2019-04-17 22:48:38 +02:00
|
|
|
func SerializeResource(res *resource.State, enc config.Encrypter) (apitype.ResourceV3, error) {
|
2017-02-22 03:31:43 +01:00
|
|
|
contract.Assert(res != nil)
|
2017-10-05 23:08:35 +02:00
|
|
|
contract.Assertf(string(res.URN) != "", "Unexpected empty resource resource.URN")
|
2017-02-22 03:31:43 +01:00
|
|
|
|
2017-06-05 04:24:48 +02:00
|
|
|
// Serialize all input and output properties recursively, and add them if non-empty.
|
2017-06-07 01:42:14 +02:00
|
|
|
var inputs map[string]interface{}
|
2017-08-01 03:26:15 +02:00
|
|
|
if inp := res.Inputs; inp != nil {
|
2019-04-17 22:48:38 +02:00
|
|
|
sinp, err := SerializeProperties(inp, enc)
|
|
|
|
if err != nil {
|
|
|
|
return apitype.ResourceV3{}, err
|
|
|
|
}
|
|
|
|
inputs = sinp
|
2017-06-07 01:42:14 +02:00
|
|
|
}
|
|
|
|
var outputs map[string]interface{}
|
2017-08-01 03:26:15 +02:00
|
|
|
if outp := res.Outputs; outp != nil {
|
2019-04-17 22:48:38 +02:00
|
|
|
soutp, err := SerializeProperties(outp, enc)
|
|
|
|
if err != nil {
|
|
|
|
return apitype.ResourceV3{}, err
|
|
|
|
}
|
|
|
|
outputs = soutp
|
2017-06-07 01:42:14 +02:00
|
|
|
}
|
2017-02-22 03:31:43 +01:00
|
|
|
|
2019-08-21 00:01:27 +02:00
|
|
|
v3Resource := apitype.ResourceV3{
|
2019-05-09 23:27:34 +02:00
|
|
|
URN: res.URN,
|
|
|
|
Custom: res.Custom,
|
|
|
|
Delete: res.Delete,
|
|
|
|
ID: res.ID,
|
|
|
|
Type: res.Type,
|
|
|
|
Parent: res.Parent,
|
|
|
|
Inputs: inputs,
|
|
|
|
Outputs: outputs,
|
|
|
|
Protect: res.Protect,
|
|
|
|
External: res.External,
|
|
|
|
Dependencies: res.Dependencies,
|
|
|
|
InitErrors: res.InitErrors,
|
|
|
|
Provider: res.Provider,
|
|
|
|
PropertyDependencies: res.PropertyDependencies,
|
|
|
|
PendingReplacement: res.PendingReplacement,
|
|
|
|
AdditionalSecretOutputs: res.AdditionalSecretOutputs,
|
2019-06-01 08:01:01 +02:00
|
|
|
Aliases: res.Aliases,
|
2019-08-21 00:01:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if res.CustomTimeouts.IsNotEmpty() {
|
|
|
|
v3Resource.CustomTimeouts = &res.CustomTimeouts
|
|
|
|
}
|
|
|
|
|
|
|
|
return v3Resource, nil
|
2017-02-22 03:31:43 +01:00
|
|
|
}
|
|
|
|
|
2019-04-17 22:48:38 +02:00
|
|
|
func SerializeOperation(op resource.Operation, enc config.Encrypter) (apitype.OperationV2, error) {
|
|
|
|
res, err := SerializeResource(op.Resource, enc)
|
|
|
|
if err != nil {
|
|
|
|
return apitype.OperationV2{}, errors.Wrap(err, "serializing resource")
|
|
|
|
}
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
return apitype.OperationV2{
|
Add a list of in-flight operations to the deployment (#1759)
* Add a list of in-flight operations to the deployment
This commit augments 'DeploymentV2' with a list of operations that are
currently in flight. This information is used by the engine to keep
track of whether or not a particular deployment is in a valid state.
The SnapshotManager is responsible for inserting and removing operations
from the in-flight operation list. When the engine registers an intent
to perform an operation, SnapshotManager inserts an Operation into this
list and saves it to the snapshot. When an operation completes, the
SnapshotManager removes it from the snapshot. From this, the engine can
infer that if it ever sees a deployment with pending operations, the
Pulumi CLI must have crashed or otherwise abnormally terminated before
seeing whether or not an operation completed successfully.
To remedy this state, this commit also adds code to 'pulumi stack
import' that clears all pending operations from a deployment, as well as
code to plan generation that will reject any deployments that have
pending operations present.
At the CLI level, if we see that we are in a state where pending
operations were in-flight when the engine died, we'll issue a
human-friendly error message that indicates which resources are in a bad
state and how to recover their stack.
* CR: Multi-line string literals, renaming in-flight -> pending
* CR: Add enum to apitype for operation type, also name status -> type for clarity
* Fix the yaml type
* Fix missed renames
* Add implementation for lifecycle_test.go
* Rebase against master
2018-08-11 06:39:59 +02:00
|
|
|
Resource: res,
|
|
|
|
Type: apitype.OperationType(op.Type),
|
2019-04-17 22:48:38 +02:00
|
|
|
}, nil
|
Add a list of in-flight operations to the deployment (#1759)
* Add a list of in-flight operations to the deployment
This commit augments 'DeploymentV2' with a list of operations that are
currently in flight. This information is used by the engine to keep
track of whether or not a particular deployment is in a valid state.
The SnapshotManager is responsible for inserting and removing operations
from the in-flight operation list. When the engine registers an intent
to perform an operation, SnapshotManager inserts an Operation into this
list and saves it to the snapshot. When an operation completes, the
SnapshotManager removes it from the snapshot. From this, the engine can
infer that if it ever sees a deployment with pending operations, the
Pulumi CLI must have crashed or otherwise abnormally terminated before
seeing whether or not an operation completed successfully.
To remedy this state, this commit also adds code to 'pulumi stack
import' that clears all pending operations from a deployment, as well as
code to plan generation that will reject any deployments that have
pending operations present.
At the CLI level, if we see that we are in a state where pending
operations were in-flight when the engine died, we'll issue a
human-friendly error message that indicates which resources are in a bad
state and how to recover their stack.
* CR: Multi-line string literals, renaming in-flight -> pending
* CR: Add enum to apitype for operation type, also name status -> type for clarity
* Fix the yaml type
* Fix missed renames
* Add implementation for lifecycle_test.go
* Rebase against master
2018-08-11 06:39:59 +02:00
|
|
|
}
|
|
|
|
|
2017-06-10 03:34:37 +02:00
|
|
|
// SerializeProperties serializes a resource property bag so that it's suitable for serialization.
|
2019-04-17 22:48:38 +02:00
|
|
|
func SerializeProperties(props resource.PropertyMap, enc config.Encrypter) (map[string]interface{}, error) {
|
2017-06-07 01:42:14 +02:00
|
|
|
dst := make(map[string]interface{})
|
Make more progress on the new deployment model
This change restructures a lot more pertaining to deployments, snapshots,
environments, and the like.
The most notable change is that the notion of a deploy.Source is introduced,
which splits the responsibility between the deploy.Plan -- which simply
understands how to compute and carry out deployment plans -- and the idea
of something that can produce new objects on-demand during deployment.
The primary such implementation is evalSource, which encapsulates an
interpreter and takes a package, args, and config map, and proceeds to run
the interpreter in a distinct goroutine. It synchronizes as needed to
poke and prod the interpreter along its path to create new resource objects.
There are two other sources, however. First, a nullSource, which simply
refuses to create new objects. This can be handy when writing isolated
tests but is also used to simulate the "empty" environment as necessary to
do a complete teardown of the target environment. Second, a fixedSource,
which takes a pre-computed array of objects, and hands those, in order, to
the planning engine; this is mostly useful as a testing technique.
Boatloads of code is now changed and updated in the various CLI commands.
This further chugs along towards pulumi/lumi#90. The end is in sight.
2017-06-10 20:50:47 +02:00
|
|
|
for _, k := range props.StableKeys() {
|
2019-04-17 22:48:38 +02:00
|
|
|
v, err := SerializePropertyValue(props[k], enc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-22 20:03:02 +01:00
|
|
|
dst[string(k)] = v
|
2017-02-22 23:32:03 +01:00
|
|
|
}
|
2019-04-17 22:48:38 +02:00
|
|
|
return dst, nil
|
2017-02-22 23:32:03 +01:00
|
|
|
}
|
|
|
|
|
2017-06-10 03:34:37 +02:00
|
|
|
// SerializePropertyValue serializes a resource property value so that it's suitable for serialization.
|
2019-04-17 22:48:38 +02:00
|
|
|
func SerializePropertyValue(prop resource.PropertyValue, enc config.Encrypter) (interface{}, error) {
|
2019-11-22 20:03:02 +01:00
|
|
|
// Serialize nulls as nil.
|
|
|
|
if prop.IsNull() {
|
2019-04-17 22:48:38 +02:00
|
|
|
return nil, nil
|
2017-02-22 03:31:43 +01:00
|
|
|
}
|
|
|
|
|
2019-11-21 23:58:30 +01:00
|
|
|
// A computed value marks something that will be determined at a later time. (e.g. the result of
|
|
|
|
// a computation that we don't perform during a preview operation.) We serialize a magic constant
|
|
|
|
// to record its existence.
|
2019-11-22 20:03:02 +01:00
|
|
|
if prop.IsComputed() || prop.IsOutput() {
|
2019-11-21 23:58:30 +01:00
|
|
|
return computedValuePlaceholder, nil
|
|
|
|
}
|
|
|
|
|
2017-02-22 03:31:43 +01:00
|
|
|
// For arrays, make sure to recurse.
|
|
|
|
if prop.IsArray() {
|
2017-06-07 01:42:14 +02:00
|
|
|
srcarr := prop.ArrayValue()
|
|
|
|
dstarr := make([]interface{}, len(srcarr))
|
|
|
|
for i, elem := range prop.ArrayValue() {
|
2019-04-17 22:48:38 +02:00
|
|
|
selem, err := SerializePropertyValue(elem, enc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
dstarr[i] = selem
|
2017-02-22 03:31:43 +01:00
|
|
|
}
|
2019-04-17 22:48:38 +02:00
|
|
|
return dstarr, nil
|
2017-02-22 03:31:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Also for objects, recurse and use naked properties.
|
|
|
|
if prop.IsObject() {
|
2019-04-17 22:48:38 +02:00
|
|
|
return SerializeProperties(prop.ObjectValue(), enc)
|
2017-02-22 03:31:43 +01:00
|
|
|
}
|
|
|
|
|
2017-07-17 19:38:57 +02:00
|
|
|
// For assets, we need to serialize them a little carefully, so we can recover them afterwards.
|
|
|
|
if prop.IsAsset() {
|
2019-04-17 22:48:38 +02:00
|
|
|
return prop.AssetValue().Serialize(), nil
|
2017-07-17 19:38:57 +02:00
|
|
|
} else if prop.IsArchive() {
|
2019-04-17 22:48:38 +02:00
|
|
|
return prop.ArchiveValue().Serialize(), nil
|
2017-07-17 19:38:57 +02:00
|
|
|
}
|
|
|
|
|
2019-04-12 23:29:08 +02:00
|
|
|
if prop.IsSecret() {
|
2019-04-17 22:48:38 +02:00
|
|
|
// Since we are going to encrypt property value, we can elide encrypting sub-elements. We'll mark them as
|
|
|
|
// "secret" so we retain that information when deserializaing the overall structure, but there is no
|
|
|
|
// need to double encrypt everything.
|
|
|
|
value, err := SerializePropertyValue(prop.SecretValue().Element, config.NopEncrypter)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-12 23:29:08 +02:00
|
|
|
bytes, err := json.Marshal(value)
|
2019-04-17 22:48:38 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "encoding serialized property value")
|
|
|
|
}
|
2019-09-19 00:52:31 +02:00
|
|
|
plaintext := string(bytes)
|
|
|
|
|
|
|
|
// If the encrypter is a cachingCrypter, call through its encryptSecret method, which will look for a matching
|
|
|
|
// *resource.Secret + plaintext in its cache in order to avoid re-encrypting the value.
|
|
|
|
var ciphertext string
|
|
|
|
if cachingCrypter, ok := enc.(*cachingCrypter); ok {
|
|
|
|
ciphertext, err = cachingCrypter.encryptSecret(prop.SecretValue(), plaintext)
|
|
|
|
} else {
|
|
|
|
ciphertext, err = enc.EncryptValue(plaintext)
|
|
|
|
}
|
2019-04-17 22:48:38 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to encrypt secret value")
|
|
|
|
}
|
2019-04-12 23:29:08 +02:00
|
|
|
contract.AssertNoErrorf(err, "marshalling underlying secret value to JSON")
|
|
|
|
return apitype.SecretV1{
|
|
|
|
Sig: resource.SecretSig,
|
2019-04-17 22:48:38 +02:00
|
|
|
Ciphertext: ciphertext,
|
|
|
|
}, nil
|
2019-04-12 23:29:08 +02:00
|
|
|
}
|
|
|
|
|
2017-02-22 03:31:43 +01:00
|
|
|
// All others are returned as-is.
|
2019-04-17 22:48:38 +02:00
|
|
|
return prop.V, nil
|
2017-02-22 03:31:43 +01:00
|
|
|
}
|
|
|
|
|
2017-10-05 23:08:35 +02:00
|
|
|
// DeserializeResource turns a serialized resource back into its usual form.
|
2019-04-17 22:48:38 +02:00
|
|
|
func DeserializeResource(res apitype.ResourceV3, dec config.Decrypter) (*resource.State, error) {
|
2017-10-05 23:08:35 +02:00
|
|
|
// Deserialize the resource properties, if they exist.
|
2019-04-17 22:48:38 +02:00
|
|
|
inputs, err := DeserializeProperties(res.Inputs, dec)
|
2017-10-22 22:39:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-17 22:48:38 +02:00
|
|
|
outputs, err := DeserializeProperties(res.Outputs, dec)
|
2017-10-22 22:39:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-10-05 23:08:35 +02:00
|
|
|
|
2017-10-22 22:39:21 +02:00
|
|
|
return resource.NewState(
|
2018-08-03 23:06:00 +02:00
|
|
|
res.Type, res.URN, res.Custom, res.Delete, res.ID,
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
inputs, outputs, res.Parent, res.Protect, res.External, res.Dependencies, res.InitErrors, res.Provider,
|
Addition of Custom Timeouts (#2885)
* Plumbing the custom timeouts from the engine to the providers
* Plumbing the CustomTimeouts through to the engine and adding test to show this
* Change the provider proto to include individual timeouts
* Plumbing the CustomTimeouts from the engine through to the Provider RPC interface
* Change how the CustomTimeouts are sent across RPC
These errors were spotted in testing. We can now see that the timeout
information is arriving in the RegisterResourceRequest
```
req=&pulumirpc.RegisterResourceRequest{
Type: "aws:s3/bucket:Bucket",
Name: "my-bucket",
Parent: "urn:pulumi:dev::aws-vpc::pulumi:pulumi:Stack::aws-vpc-dev",
Custom: true,
Object: &structpb.Struct{},
Protect: false,
Dependencies: nil,
Provider: "",
PropertyDependencies: {},
DeleteBeforeReplace: false,
Version: "",
IgnoreChanges: nil,
AcceptSecrets: true,
AdditionalSecretOutputs: nil,
Aliases: nil,
CustomTimeouts: &pulumirpc.RegisterResourceRequest_CustomTimeouts{
Create: 300,
Update: 400,
Delete: 500,
XXX_NoUnkeyedLiteral: struct {}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
},
XXX_NoUnkeyedLiteral: struct {}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
}
```
* Changing the design to use strings
* CHANGELOG entry to include the CustomTimeouts work
* Changing custom timeouts to be passed around the engine as converted value
We don't want to pass around strings - the user can provide it but we want
to make the engine aware of the timeout in seconds as a float64
2019-07-15 23:26:28 +02:00
|
|
|
res.PropertyDependencies, res.PendingReplacement, res.AdditionalSecretOutputs, res.Aliases, res.CustomTimeouts), nil
|
2017-10-05 23:08:35 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 22:48:38 +02:00
|
|
|
func DeserializeOperation(op apitype.OperationV2, dec config.Decrypter) (resource.Operation, error) {
|
|
|
|
res, err := DeserializeResource(op.Resource, dec)
|
Add a list of in-flight operations to the deployment (#1759)
* Add a list of in-flight operations to the deployment
This commit augments 'DeploymentV2' with a list of operations that are
currently in flight. This information is used by the engine to keep
track of whether or not a particular deployment is in a valid state.
The SnapshotManager is responsible for inserting and removing operations
from the in-flight operation list. When the engine registers an intent
to perform an operation, SnapshotManager inserts an Operation into this
list and saves it to the snapshot. When an operation completes, the
SnapshotManager removes it from the snapshot. From this, the engine can
infer that if it ever sees a deployment with pending operations, the
Pulumi CLI must have crashed or otherwise abnormally terminated before
seeing whether or not an operation completed successfully.
To remedy this state, this commit also adds code to 'pulumi stack
import' that clears all pending operations from a deployment, as well as
code to plan generation that will reject any deployments that have
pending operations present.
At the CLI level, if we see that we are in a state where pending
operations were in-flight when the engine died, we'll issue a
human-friendly error message that indicates which resources are in a bad
state and how to recover their stack.
* CR: Multi-line string literals, renaming in-flight -> pending
* CR: Add enum to apitype for operation type, also name status -> type for clarity
* Fix the yaml type
* Fix missed renames
* Add implementation for lifecycle_test.go
* Rebase against master
2018-08-11 06:39:59 +02:00
|
|
|
if err != nil {
|
|
|
|
return resource.Operation{}, err
|
|
|
|
}
|
|
|
|
return resource.NewOperation(res, resource.OperationType(op.Type)), nil
|
|
|
|
}
|
|
|
|
|
Make more progress on the new deployment model
This change restructures a lot more pertaining to deployments, snapshots,
environments, and the like.
The most notable change is that the notion of a deploy.Source is introduced,
which splits the responsibility between the deploy.Plan -- which simply
understands how to compute and carry out deployment plans -- and the idea
of something that can produce new objects on-demand during deployment.
The primary such implementation is evalSource, which encapsulates an
interpreter and takes a package, args, and config map, and proceeds to run
the interpreter in a distinct goroutine. It synchronizes as needed to
poke and prod the interpreter along its path to create new resource objects.
There are two other sources, however. First, a nullSource, which simply
refuses to create new objects. This can be handy when writing isolated
tests but is also used to simulate the "empty" environment as necessary to
do a complete teardown of the target environment. Second, a fixedSource,
which takes a pre-computed array of objects, and hands those, in order, to
the planning engine; this is mostly useful as a testing technique.
Boatloads of code is now changed and updated in the various CLI commands.
This further chugs along towards pulumi/lumi#90. The end is in sight.
2017-06-10 20:50:47 +02:00
|
|
|
// DeserializeProperties deserializes an entire map of deploy properties into a resource property map.
|
2019-04-17 22:48:38 +02:00
|
|
|
func DeserializeProperties(props map[string]interface{}, dec config.Decrypter) (resource.PropertyMap, error) {
|
Make more progress on the new deployment model
This change restructures a lot more pertaining to deployments, snapshots,
environments, and the like.
The most notable change is that the notion of a deploy.Source is introduced,
which splits the responsibility between the deploy.Plan -- which simply
understands how to compute and carry out deployment plans -- and the idea
of something that can produce new objects on-demand during deployment.
The primary such implementation is evalSource, which encapsulates an
interpreter and takes a package, args, and config map, and proceeds to run
the interpreter in a distinct goroutine. It synchronizes as needed to
poke and prod the interpreter along its path to create new resource objects.
There are two other sources, however. First, a nullSource, which simply
refuses to create new objects. This can be handy when writing isolated
tests but is also used to simulate the "empty" environment as necessary to
do a complete teardown of the target environment. Second, a fixedSource,
which takes a pre-computed array of objects, and hands those, in order, to
the planning engine; this is mostly useful as a testing technique.
Boatloads of code is now changed and updated in the various CLI commands.
This further chugs along towards pulumi/lumi#90. The end is in sight.
2017-06-10 20:50:47 +02:00
|
|
|
result := make(resource.PropertyMap)
|
2017-02-22 23:32:03 +01:00
|
|
|
for k, prop := range props {
|
2019-04-17 22:48:38 +02:00
|
|
|
desprop, err := DeserializePropertyValue(prop, dec)
|
2017-10-22 22:39:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
result[resource.PropertyKey(k)] = desprop
|
2017-02-22 23:32:03 +01:00
|
|
|
}
|
2017-10-22 22:39:21 +02:00
|
|
|
return result, nil
|
2017-02-22 23:32:03 +01:00
|
|
|
}
|
|
|
|
|
Make more progress on the new deployment model
This change restructures a lot more pertaining to deployments, snapshots,
environments, and the like.
The most notable change is that the notion of a deploy.Source is introduced,
which splits the responsibility between the deploy.Plan -- which simply
understands how to compute and carry out deployment plans -- and the idea
of something that can produce new objects on-demand during deployment.
The primary such implementation is evalSource, which encapsulates an
interpreter and takes a package, args, and config map, and proceeds to run
the interpreter in a distinct goroutine. It synchronizes as needed to
poke and prod the interpreter along its path to create new resource objects.
There are two other sources, however. First, a nullSource, which simply
refuses to create new objects. This can be handy when writing isolated
tests but is also used to simulate the "empty" environment as necessary to
do a complete teardown of the target environment. Second, a fixedSource,
which takes a pre-computed array of objects, and hands those, in order, to
the planning engine; this is mostly useful as a testing technique.
Boatloads of code is now changed and updated in the various CLI commands.
This further chugs along towards pulumi/lumi#90. The end is in sight.
2017-06-10 20:50:47 +02:00
|
|
|
// DeserializePropertyValue deserializes a single deploy property into a resource property value.
|
2019-04-17 22:48:38 +02:00
|
|
|
func DeserializePropertyValue(v interface{}, dec config.Decrypter) (resource.PropertyValue, error) {
|
2017-02-22 23:32:03 +01:00
|
|
|
if v != nil {
|
|
|
|
switch w := v.(type) {
|
|
|
|
case bool:
|
2017-10-22 22:39:21 +02:00
|
|
|
return resource.NewBoolProperty(w), nil
|
2017-02-22 23:32:03 +01:00
|
|
|
case float64:
|
2017-10-22 22:39:21 +02:00
|
|
|
return resource.NewNumberProperty(w), nil
|
2017-02-22 23:32:03 +01:00
|
|
|
case string:
|
2019-11-21 23:58:30 +01:00
|
|
|
if w == computedValuePlaceholder {
|
|
|
|
return resource.MakeComputed(resource.NewStringProperty("")), nil
|
|
|
|
}
|
2017-10-22 22:39:21 +02:00
|
|
|
return resource.NewStringProperty(w), nil
|
2017-02-22 23:32:03 +01:00
|
|
|
case []interface{}:
|
Make more progress on the new deployment model
This change restructures a lot more pertaining to deployments, snapshots,
environments, and the like.
The most notable change is that the notion of a deploy.Source is introduced,
which splits the responsibility between the deploy.Plan -- which simply
understands how to compute and carry out deployment plans -- and the idea
of something that can produce new objects on-demand during deployment.
The primary such implementation is evalSource, which encapsulates an
interpreter and takes a package, args, and config map, and proceeds to run
the interpreter in a distinct goroutine. It synchronizes as needed to
poke and prod the interpreter along its path to create new resource objects.
There are two other sources, however. First, a nullSource, which simply
refuses to create new objects. This can be handy when writing isolated
tests but is also used to simulate the "empty" environment as necessary to
do a complete teardown of the target environment. Second, a fixedSource,
which takes a pre-computed array of objects, and hands those, in order, to
the planning engine; this is mostly useful as a testing technique.
Boatloads of code is now changed and updated in the various CLI commands.
This further chugs along towards pulumi/lumi#90. The end is in sight.
2017-06-10 20:50:47 +02:00
|
|
|
var arr []resource.PropertyValue
|
2017-02-22 23:32:03 +01:00
|
|
|
for _, elem := range w {
|
2019-04-17 22:48:38 +02:00
|
|
|
ev, err := DeserializePropertyValue(elem, dec)
|
2017-10-22 22:39:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return resource.PropertyValue{}, err
|
|
|
|
}
|
|
|
|
arr = append(arr, ev)
|
2017-02-22 23:32:03 +01:00
|
|
|
}
|
2017-10-22 22:39:21 +02:00
|
|
|
return resource.NewArrayProperty(arr), nil
|
2017-02-22 23:32:03 +01:00
|
|
|
case map[string]interface{}:
|
2019-04-17 22:48:38 +02:00
|
|
|
obj, err := DeserializeProperties(w, dec)
|
2017-10-22 22:39:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return resource.PropertyValue{}, err
|
|
|
|
}
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
|
2017-07-17 19:38:57 +02:00
|
|
|
// This could be an asset or archive; if so, recover its type.
|
|
|
|
objmap := obj.Mappable()
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
if sig, hasSig := objmap[resource.SigKey]; hasSig {
|
|
|
|
switch sig {
|
|
|
|
case resource.AssetSig:
|
|
|
|
asset, isasset, err := resource.DeserializeAsset(objmap)
|
|
|
|
if err != nil {
|
|
|
|
return resource.PropertyValue{}, err
|
|
|
|
}
|
|
|
|
contract.Assert(isasset)
|
|
|
|
return resource.NewAssetProperty(asset), nil
|
|
|
|
case resource.ArchiveSig:
|
|
|
|
archive, isarchive, err := resource.DeserializeArchive(objmap)
|
|
|
|
if err != nil {
|
|
|
|
return resource.PropertyValue{}, err
|
|
|
|
}
|
|
|
|
contract.Assert(isarchive)
|
|
|
|
return resource.NewArchiveProperty(archive), nil
|
|
|
|
case resource.SecretSig:
|
2019-04-12 23:29:08 +02:00
|
|
|
ciphertext, ok := objmap["ciphertext"].(string)
|
|
|
|
if !ok {
|
|
|
|
return resource.PropertyValue{}, errors.New("malformed secret value: missing ciphertext")
|
|
|
|
}
|
|
|
|
var elem interface{}
|
2019-04-17 22:48:38 +02:00
|
|
|
plaintext, err := dec.DecryptValue(ciphertext)
|
|
|
|
if err != nil {
|
|
|
|
return resource.PropertyValue{}, errors.Wrap(err, "decrypting secret value")
|
|
|
|
}
|
|
|
|
if err := json.Unmarshal([]byte(plaintext), &elem); err != nil {
|
2019-04-12 23:29:08 +02:00
|
|
|
return resource.PropertyValue{}, err
|
|
|
|
}
|
2019-04-17 22:48:38 +02:00
|
|
|
ev, err := DeserializePropertyValue(elem, config.NopDecrypter)
|
2019-04-12 23:29:08 +02:00
|
|
|
if err != nil {
|
|
|
|
return resource.PropertyValue{}, err
|
|
|
|
}
|
2019-09-19 00:52:31 +02:00
|
|
|
prop := resource.MakeSecret(ev)
|
|
|
|
// If the decrypter is a cachingCrypter, insert the plain- and ciphertext into the cache with the
|
|
|
|
// new *resource.Secret as the key.
|
|
|
|
if cachingCrypter, ok := dec.(*cachingCrypter); ok {
|
|
|
|
cachingCrypter.insert(prop.SecretValue(), plaintext, ciphertext)
|
|
|
|
}
|
|
|
|
return prop, nil
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
default:
|
|
|
|
return resource.PropertyValue{}, errors.Errorf("unrecognized signature '%v' in property map", sig)
|
|
|
|
}
|
2017-07-17 19:38:57 +02:00
|
|
|
}
|
Implement more precise delete-before-replace semantics. (#2369)
This implements the new algorithm for deciding which resources must be
deleted due to a delete-before-replace operation.
We need to compute the set of resources that may be replaced by a
change to the resource under consideration. We do this by taking the
complete set of transitive dependents on the resource under
consideration and removing any resources that would not be replaced by
changes to their dependencies. We determine whether or not a resource
may be replaced by substituting unknowns for input properties that may
change due to deletion of the resources their value depends on and
calling the resource provider's Diff method.
This is perhaps clearer when described by example. Consider the
following dependency graph:
A
__|__
B C
| _|_
D E F
In this graph, all of B, C, D, E, and F transitively depend on A. It may
be the case, however, that changes to the specific properties of any of
those resources R that would occur if a resource on the path to A were
deleted and recreated may not cause R to be replaced. For example, the
edge from B to A may be a simple dependsOn edge such that a change to
B does not actually influence any of B's input properties. In that case,
neither B nor D would need to be deleted before A could be deleted.
In order to make the above algorithm a reality, the resource monitor
interface has been updated to include a map that associates an input
property key with the list of resources that input property depends on.
Older clients of the resource monitor will leave this map empty, in
which case all input properties will be treated as depending on all
dependencies of the resource. This is probably overly conservative, but
it is less conservative than what we currently implement, and is
certainly correct.
2019-01-28 18:46:30 +01:00
|
|
|
|
2017-07-17 19:38:57 +02:00
|
|
|
// Otherwise, it's just a weakly typed object map.
|
2017-10-22 22:39:21 +02:00
|
|
|
return resource.NewObjectProperty(obj), nil
|
2017-02-22 23:32:03 +01:00
|
|
|
default:
|
|
|
|
contract.Failf("Unrecognized property type: %v", reflect.ValueOf(v))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-22 22:39:21 +02:00
|
|
|
return resource.NewNullProperty(), nil
|
2017-02-22 03:31:43 +01:00
|
|
|
}
|