pulumi/pkg/engine/update.go
joeduffy 8b5874dab5 General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):

* The primary change is to change the way the engine's core update
  functionality works with respect to deploy.Source.  This is the
  way we can plug in new sources of resource information during
  planning (and, soon, diffing).  The way I intend to model refresh
  is by having a new kind of source, deploy.RefreshSource, which
  will let us do virtually everything about an update/diff the same
  way with refreshes, which avoid otherwise duplicative effort.

  This includes changing the planOptions (nee deployOptions) to
  take a new SourceFunc callback, which is responsible for creating
  a source specific to the kind of plan being requested.

  Preview, Update, and Destroy now are primarily differentiated by
  the kind of deploy.Source that they return, rather than sprinkling
  things like `if Destroying` throughout.  This tidies up some logic
  and, more importantly, gives us precisely the refresh hook we need.

* Originally, we used the deploy.NullSource for Destroy operations.
  This simply returns nothing, which is how Destroy works.  For some
  reason, we were no longer doing this, and instead had some
  `if Destroying` cases sprinkled throughout the deploy.EvalSource.
  I think this is a vestige of some old way we did configuration, at
  least judging by a comment, which is apparently no longer relevant.

* Move diff and diff-printing logic within the engine into its own
  pkg/engine/diff.go file, to prepare for upcoming work.

* I keep noticing benign diffs anytime I regenerate protobufs.  I
  suspect this is because we're also on different versions.  I changed
  generate.sh to also dump the version into grpc_version.txt.  At
  least we can understand where the diffs are coming from, decide
  whether to take them (i.e., a newer version), and ensure that as
  a team we are monotonically increasing, and not going backwards.

* I also tidied up some tiny things I noticed while in there, like
  comments, incorrect types, lint suppressions, and so on.
2018-03-28 07:45:23 -07:00

213 lines
6.7 KiB
Go

// Copyright 2018, Pulumi Corporation. All rights reserved.
package engine
import (
"time"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/pkg/diag"
"github.com/pulumi/pulumi/pkg/resource"
"github.com/pulumi/pulumi/pkg/resource/deploy"
"github.com/pulumi/pulumi/pkg/resource/plugin"
"github.com/pulumi/pulumi/pkg/util/contract"
"github.com/pulumi/pulumi/pkg/workspace"
)
// UpdateOptions contains all the settings for customizing how an update (deploy, preview, or destroy) is performed.
type UpdateOptions struct {
Analyzers []string // an optional set of analyzers to run as part of this deployment.
DryRun bool // true if we should just print the plan without performing it.
Parallel int // the degree of parallelism for resource operations (<=1 for serial).
Debug bool // true if debugging output it enabled
}
// ResourceChanges contains the aggregate resource changes by operation type.
type ResourceChanges map[deploy.StepOp]int
func Update(u UpdateInfo, events chan<- Event, opts UpdateOptions) (ResourceChanges, error) {
contract.Require(u != nil, "update")
contract.Require(events != nil, "events")
defer func() { events <- cancelEvent() }()
ctx, err := newPlanContext(u)
if err != nil {
return nil, err
}
defer ctx.Close()
emitter := makeEventEmitter(events, u)
return update(ctx, planOptions{
UpdateOptions: opts,
SourceFunc: newUpdateSourceFunc(),
Events: emitter,
Diag: newEventSink(emitter),
})
}
func newUpdateSourceFunc() planSourceFunc {
return func(opts planOptions, proj *workspace.Project, pwd, main string,
target *deploy.Target, plugctx *plugin.Context) (deploy.Source, error) {
// Figure out which plugins to load by inspecting the program contents.
plugins, err := plugctx.Host.GetRequiredPlugins(plugin.ProgInfo{
Proj: proj,
Pwd: pwd,
Program: main,
})
if err != nil {
return nil, err
}
// Now ensure that we have loaded up any plugins that the program will need in advance.
if err = plugctx.Host.EnsurePlugins(plugins); err != nil {
return nil, err
}
// If that succeeded, create a new source that will perform interpretation of the compiled program.
// TODO[pulumi/pulumi#88]: we are passing `nil` as the arguments map; we need to allow a way to pass these.
return deploy.NewEvalSource(plugctx, &deploy.EvalRunInfo{
Proj: proj,
Pwd: pwd,
Program: main,
Target: target,
}, opts.DryRun), nil
}
}
func update(info *planContext, opts planOptions) (ResourceChanges, error) {
result, err := plan(info, opts)
if err != nil {
return nil, err
}
var resourceChanges ResourceChanges
if result != nil {
defer contract.IgnoreClose(result)
// Make the current working directory the same as the program's, and restore it upon exit.
done, err := result.Chdir()
if err != nil {
return nil, err
}
defer done()
if opts.DryRun {
// If a dry run, just print the plan, don't actually carry out the deployment.
resourceChanges, err = printPlan(result)
if err != nil {
return resourceChanges, err
}
} else {
// Otherwise, we will actually deploy the latest bits.
opts.Events.preludeEvent(opts.DryRun, result.Ctx.Update.GetTarget().Config)
// Walk the plan, reporting progress and executing the actual operations as we go.
start := time.Now()
actions := newUpdateActions(info.Update, opts)
summary, _, _, err := result.Walk(actions, false)
if err != nil && summary == nil {
// Something went wrong, and no changes were made.
return resourceChanges, err
}
contract.Assert(summary != nil)
// Print out the total number of steps performed (and their kinds), the duration, and any summary info.
resourceChanges = ResourceChanges(actions.Ops)
opts.Events.updateSummaryEvent(actions.MaybeCorrupt, time.Since(start), resourceChanges)
if err != nil {
return resourceChanges, err
}
}
}
if !opts.Diag.Success() {
// If any error that wasn't printed above, be sure to make it evident in the output.
return resourceChanges, errors.New("One or more errors occurred during this update")
}
return resourceChanges, nil
}
// updateActions pretty-prints the plan application process as it goes.
type updateActions struct {
Steps int
Ops map[deploy.StepOp]int
Seen map[resource.URN]deploy.Step
MaybeCorrupt bool
Update UpdateInfo
Opts planOptions
}
func newUpdateActions(u UpdateInfo, opts planOptions) *updateActions {
return &updateActions{
Ops: make(map[deploy.StepOp]int),
Seen: make(map[resource.URN]deploy.Step),
Update: u,
Opts: opts,
}
}
func (acts *updateActions) OnResourceStepPre(step deploy.Step) (interface{}, error) {
// Ensure we've marked this step as observed.
acts.Seen[step.URN()] = step
indent := getIndent(step, acts.Seen)
summary := getResourcePropertiesSummary(step, indent)
details := getResourcePropertiesDetails(step, indent, false, acts.Opts.Debug)
acts.Opts.Events.resourcePreEvent(step, indent, summary, details)
// Inform the snapshot service that we are about to perform a step.
return acts.Update.BeginMutation()
}
func (acts *updateActions) OnResourceStepPost(ctx interface{},
step deploy.Step, status resource.Status, err error) error {
assertSeen(acts.Seen, step)
// Report the result of the step.
stepop := step.Op()
if err != nil {
if status == resource.StatusUnknown {
acts.MaybeCorrupt = true
}
// Issue a true, bonafide error.
acts.Opts.Diag.Errorf(diag.ErrorPlanApplyFailed, err)
acts.Opts.Events.resourceOperationFailedEvent(step, status, acts.Steps)
} else {
if step.Logical() {
// Increment the counters.
acts.Steps++
acts.Ops[stepop]++
}
// Also show outputs here, since there might be some from the initial registration.
indent := getIndent(step, acts.Seen)
text := getResourceOutputsPropertiesString(step, indent, false, acts.Opts.Debug)
acts.Opts.Events.resourceOutputsEvent(step, indent, text)
}
// Write out the current snapshot. Note that even if a failure has occurred, we should still have a
// safe checkpoint. Note that any error that occurs when writing the checkpoint trumps the error reported above.
return ctx.(SnapshotMutation).End(step.Iterator().Snap())
}
func (acts *updateActions) OnResourceOutputs(step deploy.Step) error {
assertSeen(acts.Seen, step)
indent := getIndent(step, acts.Seen)
text := getResourceOutputsPropertiesString(step, indent, false, acts.Opts.Debug)
acts.Opts.Events.resourceOutputsEvent(step, indent, text)
// There's a chance there are new outputs that weren't written out last time.
// We need to perform another snapshot write to ensure they get written out.
mutation, err := acts.Update.BeginMutation()
if err != nil {
return err
}
return mutation.End(step.Iterator().Snap())
}