Delete specific target (#3244)

This commit is contained in:
CyrusNajmabadi 2019-09-19 19:28:14 -07:00 committed by GitHub
parent da0bcdccda
commit c1ff9c37f8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 423 additions and 57 deletions

View file

@ -9,6 +9,8 @@ CHANGELOG
[#3239](https://github.com/pulumi/pulumi/pull/3239)
- `pulumi refresh` can now be scoped to refresh a subset of resources by adding a `--target urn` or
`-t urn` argument. Multiple resources can be specified using `-t urn1 -t urn2`.
- `pulumi destroy` can now be scoped to delete a single resource (and its dependents) by adding a
`--target urn` or `-t urn` argument. Multiple resources can be specified using `-t urn1 -t urn2`.
- Avoid re-encrypting secret values on each checkpoint write. These changes should improve update times for stacks
that contain secret values.
[#3183](https://github.com/pulumi/pulumi/pull/3183)

View file

@ -24,6 +24,7 @@ import (
"github.com/pulumi/pulumi/pkg/backend"
"github.com/pulumi/pulumi/pkg/backend/display"
"github.com/pulumi/pulumi/pkg/engine"
"github.com/pulumi/pulumi/pkg/resource"
"github.com/pulumi/pulumi/pkg/util/cmdutil"
"github.com/pulumi/pulumi/pkg/util/result"
)
@ -45,6 +46,7 @@ func newDestroyCmd() *cobra.Command {
var skipPreview bool
var suppressOutputs bool
var yes bool
var targets *[]string
var cmd = &cobra.Command{
Use: "destroy",
@ -110,11 +112,17 @@ func newDestroyCmd() *cobra.Command {
return result.FromError(errors.Wrap(err, "getting stack configuration"))
}
targetUrns := []resource.URN{}
for _, t := range *targets {
targetUrns = append(targetUrns, resource.URN(t))
}
opts.Engine = engine.UpdateOptions{
Parallel: parallel,
Debug: debug,
Refresh: refresh,
UseLegacyDiff: useLegacyDiff(),
Parallel: parallel,
Debug: debug,
Refresh: refresh,
DestroyTargets: targetUrns,
UseLegacyDiff: useLegacyDiff(),
}
_, res := s.Destroy(commandContext(), backend.UpdateOperation{
@ -127,11 +135,11 @@ func newDestroyCmd() *cobra.Command {
Scopes: cancellationScopes,
})
if res == nil {
if res == nil && len(*targets) == 0 {
fmt.Printf("The resources in the stack have been deleted, but the history and configuration "+
"associated with the stack are still maintained. \nIf you want to remove the stack "+
"completely, run 'pulumi stack rm %s'.\n", s.Ref())
} else if res.Error() == context.Canceled {
} else if res != nil && res.Error() == context.Canceled {
return result.FromError(errors.New("destroy cancelled"))
}
return PrintEngineResult(res)
@ -151,6 +159,11 @@ func newDestroyCmd() *cobra.Command {
&message, "message", "m", "",
"Optional message to associate with the destroy operation")
targets = cmd.PersistentFlags().StringArrayP(
"target", "t", []string{},
"Specify a single resource URN to destroy. All resources necessary to destroy this target will also be destroyed."+
" Multiple resources can be specified using: --target urn1 --target urn2")
// Flags for engine.UpdateOptions.
cmd.PersistentFlags().BoolVar(
&diffDisplay, "diff", false,
@ -176,6 +189,7 @@ func newDestroyCmd() *cobra.Command {
cmd.PersistentFlags().BoolVar(
&suppressOutputs, "suppress-outputs", false,
"Suppress display of stack outputs (in case they contain sensitive values)")
cmd.PersistentFlags().BoolVarP(
&yes, "yes", "y", false,
"Automatically approve and perform the destroy after previewing it")

View file

@ -67,3 +67,16 @@ func GetResourceToRefreshCouldNotBeFoundDidYouForgetError() *Diag {
return newError("", 2011, "Resource to refresh '%v' could not be found in the stack. "+
"Did you forget to escape $ in your shell?")
}
func GetResourceToDeleteCouldNotBeFoundError() *Diag {
return newError("", 2012, "Resource to delete '%v' could not be found in the stack.")
}
func GetResourceToDeleteCouldNotBeFoundDidYouForgetError() *Diag {
return newError("", 2013, "Resource to delete '%v' could not be found in the stack. "+
"Did you forget to escape $ in your shell?")
}
func GetCannotDeleteParentResourceWithoutAlsoDeletingChildError() *Diag {
return newError("", 2014, "Cannot delete parent resource '%v' without also deleting child '%v'.")
}

View file

@ -441,6 +441,16 @@ func (p *TestPlan) Run(t *testing.T, snapshot *deploy.Snapshot) *deploy.Snapshot
continue
}
if res != nil {
if res.IsBail() {
t.Logf("Got unexpected bail result")
t.FailNow()
} else {
t.Logf("Got unexpected error result: %v", res.Error())
t.FailNow()
}
}
assert.Nil(t, res)
}
@ -1556,17 +1566,18 @@ func TestRefreshWithDelete(t *testing.T) {
}
}
func pickURN(urns []resource.URN, target string) resource.URN {
switch target {
case "resA":
return urns[0]
case "resB":
return urns[1]
case "resC":
return urns[2]
default:
panic("Invalid target: " + target)
func pickURN(t *testing.T, urns []resource.URN, names []string, target string) resource.URN {
assert.Equal(t, len(urns), len(names))
assert.Contains(t, names, target)
for i, name := range names {
if name == target {
return urns[i]
}
}
t.Fatalf("Could not find target: %v in %v", target, names)
return ""
}
// Tests that dependencies are correctly rewritten when refresh removes deleted resources.
@ -1600,7 +1611,7 @@ func validateRefreshDeleteCombination(t *testing.T, names []string, targets []st
t.Logf("Refreshing targets: %v", targets)
for _, target := range targets {
refreshTargets = append(p.Options.RefreshTargets, pickURN(urns, target))
refreshTargets = append(refreshTargets, pickURN(t, urns, names, target))
}
p.Options.RefreshTargets = refreshTargets
@ -1757,7 +1768,7 @@ func validateRefreshBasicsCombination(t *testing.T, names []string, targets []st
refreshTargets := []resource.URN{}
for _, target := range targets {
refreshTargets = append(p.Options.RefreshTargets, pickURN(urns, target))
refreshTargets = append(p.Options.RefreshTargets, pickURN(t, urns, names, target))
}
p.Options.RefreshTargets = refreshTargets
@ -4334,3 +4345,208 @@ func TestImportUpdatedID(t *testing.T) {
}
}
}
func TestDeleteTarget(t *testing.T) {
names := []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L"}
// Try refreshing a stack with combinations of the above resources as target to destroy.
subsets := combinations.All(names)
for _, subset := range subsets {
// limit to up to 3 resources to destroy. This keeps the test running time under
// control as it only generates a few hundred combinations instead of several thousand.
if len(subset) <= 3 {
deleteSpecificTargets(t, names, subset, func(urns []resource.URN, deleted map[resource.URN]bool) {})
}
}
deleteSpecificTargets(t, names, []string{"A"}, func(urns []resource.URN, deleted map[resource.URN]bool) {
// when deleting 'A' we expect A, B, C, E, F, and K to be deleted
assert.Equal(t, map[resource.URN]bool{
pickURN(t, urns, names, "A"): true,
pickURN(t, urns, names, "B"): true,
pickURN(t, urns, names, "C"): true,
pickURN(t, urns, names, "E"): true,
pickURN(t, urns, names, "F"): true,
pickURN(t, urns, names, "K"): true,
}, deleted)
})
}
func deleteSpecificTargets(
t *testing.T, names []string, targets []string,
validate func(urns []resource.URN, deleted map[resource.URN]bool)) {
// A
// _________|_________
// B C D
// ___|___ ___|___
// E F G H I J
// |__|
// K L
p := &TestPlan{}
const resType = "pkgA:m:typA"
type propertyDependencies map[resource.PropertyKey][]resource.URN
urnA := p.NewProviderURN("pkgA", names[0], "")
urnB := p.NewURN(resType, names[1], "")
urnC := p.NewProviderURN("pkgA", names[2], "")
urnD := p.NewProviderURN("pkgA", names[3], "")
urnE := p.NewURN(resType, names[4], "")
urnF := p.NewURN(resType, names[5], "")
urnG := p.NewURN(resType, names[6], "")
urnH := p.NewURN(resType, names[7], "")
urnI := p.NewURN(resType, names[8], "")
urnJ := p.NewURN(resType, names[9], "")
urnK := p.NewURN(resType, names[10], "")
urnL := p.NewURN(resType, names[11], "")
urns := []resource.URN{
urnA, urnB, urnC, urnD, urnE, urnF,
urnG, urnH, urnI, urnJ, urnK, urnL,
}
newResource := func(urn resource.URN, id resource.ID, provider string, dependencies []resource.URN,
propertyDeps propertyDependencies, outputs resource.PropertyMap) *resource.State {
inputs := resource.PropertyMap{}
for k := range propertyDeps {
inputs[k] = resource.NewStringProperty("foo")
}
return &resource.State{
Type: urn.Type(),
URN: urn,
Custom: true,
Delete: false,
ID: id,
Inputs: inputs,
Outputs: outputs,
Dependencies: dependencies,
Provider: provider,
PropertyDependencies: propertyDeps,
}
}
old := &deploy.Snapshot{
Resources: []*resource.State{
newResource(urnA, "0", "", nil, nil, resource.PropertyMap{"A": resource.NewStringProperty("foo")}),
newResource(urnB, "1", string(urnA)+"::0", nil, nil, nil),
newResource(urnC, "2", "",
[]resource.URN{urnA},
propertyDependencies{"A": []resource.URN{urnA}},
resource.PropertyMap{"A": resource.NewStringProperty("bar")}),
newResource(urnD, "3", "",
[]resource.URN{urnA},
propertyDependencies{"B": []resource.URN{urnA}}, nil),
newResource(urnE, "4", string(urnC)+"::2", nil, nil, nil),
newResource(urnF, "5", "",
[]resource.URN{urnC},
propertyDependencies{"A": []resource.URN{urnC}}, nil),
newResource(urnG, "6", "",
[]resource.URN{urnC},
propertyDependencies{"B": []resource.URN{urnC}}, nil),
newResource(urnH, "4", string(urnD)+"::3", nil, nil, nil),
newResource(urnI, "5", "",
[]resource.URN{urnD},
propertyDependencies{"A": []resource.URN{urnD}}, nil),
newResource(urnJ, "6", "",
[]resource.URN{urnD},
propertyDependencies{"B": []resource.URN{urnD}}, nil),
newResource(urnK, "7", "",
[]resource.URN{urnF, urnG},
propertyDependencies{"A": []resource.URN{urnF, urnG}}, nil),
newResource(urnL, "8", "",
[]resource.URN{urnF, urnG},
propertyDependencies{"B": []resource.URN{urnF, urnG}}, nil),
},
}
loaders := []*deploytest.ProviderLoader{
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
return &deploytest.Provider{
DiffConfigF: func(urn resource.URN, olds, news resource.PropertyMap,
ignoreChanges []string) (plugin.DiffResult, error) {
if !olds["A"].DeepEquals(news["A"]) {
return plugin.DiffResult{
ReplaceKeys: []resource.PropertyKey{"A"},
DeleteBeforeReplace: true,
}, nil
}
return plugin.DiffResult{}, nil
},
DiffF: func(urn resource.URN, id resource.ID,
olds, news resource.PropertyMap, ignoreChanges []string) (plugin.DiffResult, error) {
if !olds["A"].DeepEquals(news["A"]) {
return plugin.DiffResult{ReplaceKeys: []resource.PropertyKey{"A"}}, nil
}
return plugin.DiffResult{}, nil
},
}, nil
}),
}
program := deploytest.NewLanguageRuntime(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
register := func(urn resource.URN, provider string, inputs resource.PropertyMap) resource.ID {
_, id, _, err := monitor.RegisterResource(urn.Type(), string(urn.Name()), true, deploytest.ResourceOptions{
Provider: provider,
Inputs: inputs,
})
assert.NoError(t, err)
return id
}
idA := register(urnA, "", resource.PropertyMap{"A": resource.NewStringProperty("bar")})
register(urnB, string(urnA)+"::"+string(idA), nil)
idC := register(urnC, "", nil)
idD := register(urnD, "", nil)
register(urnE, string(urnC)+"::"+string(idC), nil)
register(urnF, "", nil)
register(urnG, "", nil)
register(urnH, string(urnD)+"::"+string(idD), nil)
register(urnI, "", nil)
register(urnJ, "", nil)
register(urnK, "", nil)
register(urnL, "", nil)
return nil
})
p.Options.host = deploytest.NewPluginHost(nil, nil, program, loaders...)
destroyTargets := []resource.URN{}
for _, target := range targets {
destroyTargets = append(destroyTargets, pickURN(t, urns, names, target))
}
p.Options.DestroyTargets = destroyTargets
t.Logf("Destroying targets: %v", destroyTargets)
p.Steps = []TestStep{{
Op: Destroy,
ExpectFailure: false,
Validate: func(project workspace.Project, target deploy.Target, j *Journal,
evts []Event, res result.Result) result.Result {
assert.Nil(t, res)
assert.True(t, len(j.Entries) > 0)
deleted := make(map[resource.URN]bool)
for _, entry := range j.Entries {
assert.Equal(t, deploy.OpDelete, entry.Step.Op())
deleted[entry.Step.URN()] = true
}
for _, target := range p.Options.DestroyTargets {
assert.Contains(t, deleted, target)
}
validate(urns, deleted)
return res
},
}}
p.Run(t, old)
}

View file

@ -182,6 +182,7 @@ func (planResult *planResult) Walk(cancelCtx *Context, events deploy.Events, pre
Refresh: planResult.Options.Refresh,
RefreshOnly: planResult.Options.isRefresh,
RefreshTargets: planResult.Options.RefreshTargets,
DestroyTargets: planResult.Options.DestroyTargets,
TrustDependencies: planResult.Options.trustDependencies,
UseLegacyDiff: planResult.Options.UseLegacyDiff,
}

View file

@ -65,6 +65,9 @@ type UpdateOptions struct {
// Specific resources to refresh during a refresh operation.
RefreshTargets []resource.URN
// Specific resources to destroy during a destroy operation.
DestroyTargets []resource.URN
// true if the engine should use legacy diffing behavior during an update.
UseLegacyDiff bool

View file

@ -52,6 +52,7 @@ type Options struct {
Refresh bool // whether or not to refresh before executing the plan.
RefreshOnly bool // whether or not to exit after refreshing.
RefreshTargets []resource.URN // The specific resources to refresh during a refresh op.
DestroyTargets []resource.URN // Specific resources to destroy.
TrustDependencies bool // whether or not to trust the resource dependency graph.
UseLegacyDiff bool // whether or not to use legacy diffing behavior.
}

View file

@ -153,25 +153,7 @@ func (pe *planExecutor) Execute(callerCtx context.Context, opts Options, preview
}
if event.Event == nil {
deleteSteps := pe.stepGen.GenerateDeletes()
deletes := pe.stepGen.ScheduleDeletes(deleteSteps)
// ScheduleDeletes gives us a list of lists of steps. Each list of steps can safely be executed in
// parallel, but each list must execute completes before the next list can safely begin executing.
//
// This is not "true" delete parallelism, since there may be resources that could safely begin
// deleting but we won't until the previous set of deletes fully completes. This approximation is
// conservative, but correct.
for _, antichain := range deletes {
logging.V(4).Infof("planExecutor.Execute(...): beginning delete antichain")
tok := pe.stepExec.ExecuteParallel(antichain)
tok.Wait(ctx)
logging.V(4).Infof("planExecutor.Execute(...): antichain complete")
}
// We're done here - signal completion so that the step executor knows to terminate.
pe.stepExec.SignalCompletion()
return false, nil
return false, pe.performDeletes(ctx, opts)
}
if res := pe.handleSingleEvent(event.Event); res != nil {
@ -213,6 +195,55 @@ func (pe *planExecutor) Execute(callerCtx context.Context, opts Options, preview
return res
}
func (pe *planExecutor) performDeletes(ctx context.Context, opts Options) result.Result {
defer func() {
// We're done here - signal completion so that the step executor knows to terminate.
pe.stepExec.SignalCompletion()
}()
prev := pe.plan.prev
if prev == nil || len(prev.Resources) == 0 {
return nil
}
logging.V(7).Infof("performDeletes(...): beginning")
deleteSteps, res := pe.stepGen.GenerateDeletes(opts.DestroyTargets)
if res != nil {
logging.V(7).Infof("performDeletes(...): generating deletes produced error result")
return res
}
deletes := pe.stepGen.ScheduleDeletes(deleteSteps)
// ScheduleDeletes gives us a list of lists of steps. Each list of steps can safely be executed
// in parallel, but each list must execute completes before the next list can safely begin
// executing.
//
// This is not "true" delete parallelism, since there may be resources that could safely begin
// deleting but we won't until the previous set of deletes fully completes. This approximation
// is conservative, but correct.
for _, antichain := range deletes {
logging.V(4).Infof("planExecutor.Execute(...): beginning delete antichain")
tok := pe.stepExec.ExecuteParallel(antichain)
tok.Wait(ctx)
logging.V(4).Infof("planExecutor.Execute(...): antichain complete")
}
// After executing targeted deletes, we may now have resources that depend on the resource that
// were deleted. Go through and clean things up accordingly for them.
if len(opts.DestroyTargets) > 0 {
resourceToStep := make(map[*resource.State]Step)
for _, step := range deleteSteps {
resourceToStep[pe.plan.olds[step.URN()]] = step
}
pe.rebuildBaseState(resourceToStep, false /*refresh*/)
}
return nil
}
// handleSingleEvent handles a single source event. For all incoming events, it produces a chain that needs
// to be executed and schedules the chain for execution.
func (pe *planExecutor) handleSingleEvent(event SourceEvent) result.Result {
@ -308,10 +339,8 @@ func (pe *planExecutor) refresh(callerCtx context.Context, opts Options, preview
// old snapshot. If they did provider --target's then only create refresh steps for those
// specific targets.
steps := []Step{}
initialResources := []*resource.State{}
resourceToStep := map[*resource.State]Step{}
for _, res := range prev.Resources {
initialResources = append(initialResources, res)
if shouldRefresh(opts, res) {
step := NewRefreshStep(pe.plan, res, nil)
steps = append(steps, step)
@ -326,6 +355,23 @@ func (pe *planExecutor) refresh(callerCtx context.Context, opts Options, preview
stepExec.SignalCompletion()
stepExec.WaitForCompletion()
pe.rebuildBaseState(resourceToStep, true /*refresh*/)
// NOTE: we use the presence of an error in the caller context in order to distinguish caller-initiated
// cancellation from internally-initiated cancellation.
canceled := callerCtx.Err() != nil
if stepExec.Errored() {
pe.reportExecResult("failed", preview)
return result.Bail()
} else if canceled {
pe.reportExecResult("canceled", preview)
return result.Bail()
}
return nil
}
func (pe *planExecutor) rebuildBaseState(resourceToStep map[*resource.State]Step, refresh bool) {
// Rebuild this plan's map of old resources and dependency graph, stripping out any deleted
// resources and repairing dependency lists as necessary. Note that this updates the base
// snapshot _in memory_, so it is critical that any components that use the snapshot refer to
@ -357,7 +403,7 @@ func (pe *planExecutor) refresh(callerCtx context.Context, opts Options, preview
resources := []*resource.State{}
referenceable := make(map[resource.URN]bool)
olds := make(map[resource.URN]*resource.State)
for _, s := range initialResources {
for _, s := range pe.plan.prev.Resources {
var old, new *resource.State
if step, has := resourceToStep[s]; has {
// We produces a refresh step for this specific resource. Use the new information about
@ -372,8 +418,10 @@ func (pe *planExecutor) refresh(callerCtx context.Context, opts Options, preview
}
if new == nil {
contract.Assert(old.Custom)
contract.Assert(!providers.IsProviderType(old.Type))
if refresh {
contract.Assert(old.Custom)
contract.Assert(!providers.IsProviderType(old.Type))
}
continue
}
@ -400,19 +448,6 @@ func (pe *planExecutor) refresh(callerCtx context.Context, opts Options, preview
pe.plan.prev.Resources = resources
pe.plan.olds, pe.plan.depGraph = olds, graph.NewDependencyGraph(resources)
// NOTE: we use the presence of an error in the caller context in order to distinguish caller-initiated
// cancellation from internally-initiated cancellation.
canceled := callerCtx.Err() != nil
if stepExec.Errored() {
pe.reportExecResult("failed", preview)
return result.Bail()
} else if canceled {
pe.reportExecResult("canceled", preview)
return result.Bail()
}
return nil
}
func shouldRefresh(opts Options, res *resource.State) bool {

View file

@ -501,7 +501,7 @@ func (sg *stepGenerator) GenerateSteps(event RegisterResourceEvent) ([]Step, res
return []Step{NewCreateStep(sg.plan, event, new)}, nil
}
func (sg *stepGenerator) GenerateDeletes() []Step {
func (sg *stepGenerator) GenerateDeletes(targets []resource.URN) ([]Step, result.Result) {
// To compute the deletion list, we must walk the list of old resources *backwards*. This is because the list is
// stored in dependency order, and earlier elements are possibly leaf nodes for later elements. We must not delete
// dependencies prior to their dependent nodes.
@ -555,7 +555,88 @@ func (sg *stepGenerator) GenerateDeletes() []Step {
}
}
}
return dels
if len(targets) > 0 {
logging.V(7).Infof("Planner was asked to only delete '%v'", targets)
resourcesToDelete := make(map[resource.URN]bool)
// Do an initial pass first to ensure that all the targets mentioned are ones we know about.
hasUnknownTarget := false
for _, target := range targets {
if _, has := sg.plan.olds[target]; !has {
hasUnknownTarget = true
logging.V(7).Infof("Resource to delete (%v) could not be found in the stack.", target)
if strings.Contains(string(target), "$") {
sg.plan.Diag().Errorf(diag.GetResourceToDeleteCouldNotBeFoundError(), target)
} else {
sg.plan.Diag().Errorf(diag.GetResourceToDeleteCouldNotBeFoundDidYouForgetError(), target)
}
}
}
if hasUnknownTarget {
return nil, result.Bail()
}
// Now actually use all the requested targets to figure out the exact set to delete.
for _, target := range targets {
current := sg.plan.olds[target]
resourcesToDelete[target] = true
// the item the user is asking to destroy may cause downstream replacements. Clean those up
// as well. Use the standard delete-before-replace computation to determine the minimal
// set of downstream resources that are affected.
deps, res := sg.calculateDependentReplacements(current)
if res != nil {
return nil, res
}
for _, dep := range deps {
logging.V(7).Infof("GenerateDeletes(...): Adding dependent: %v", dep.res.URN)
resourcesToDelete[dep.res.URN] = true
}
}
// Also see if any resources have a resource we're deleting as a parent. If so, we'll block
// the delete. It's a little painful. But can be worked around by explicitly deleting
// children before parents. Note: in almost all cases, people will want to delete children,
// so this restriction should not be too onerous.
for _, res := range sg.plan.prev.Resources {
if res.Parent != "" {
if _, has := resourcesToDelete[res.URN]; has {
// already deleting this sibling
continue
}
if _, has := resourcesToDelete[res.Parent]; has {
sg.plan.Diag().Errorf(diag.GetCannotDeleteParentResourceWithoutAlsoDeletingChildError(),
res.Parent, res.URN)
return nil, result.Bail()
}
}
}
if logging.V(7) {
keys := []resource.URN{}
for k := range resourcesToDelete {
keys = append(keys, k)
}
logging.V(7).Infof("Planner will delete all of '%v'", keys)
}
filtered := []Step{}
for _, step := range dels {
if _, has := resourcesToDelete[step.URN()]; has {
filtered = append(filtered, step)
}
}
dels = filtered
}
return dels, nil
}
// GeneratePendingDeletes generates delete steps for all resources that are pending deletion. This function should be