pulumi/pkg/engine/plan.go
Sean Gillespie 9a5f4044fa
Serialize SourceEvents coming from the refresh source (#1725)
* Serialize SourceEvents coming from the refresh source

The engine requires that a source event coming from a source be "ready
to execute" at the moment that it is sent to the engine. Since the
refresh source sent all goal states eagerly through its source iterator,
the engine assumed that it was legal to execute them all in parallel and
did so. This is a problem for the snapshot, since the snapshot expects
to be in an order that is a legal topological ordering of the dependency
DAG.

This PR fixes the issue by sending refresh source events one-at-a-time
through the refresh source iterator, only unblocking to send the next
step as soon as the previous step completes.

* Fix deadlock in refresh test

* Fix an issue where the engine "completed" steps too early

By signalling that a step is done before committing the step's results
to the snapshot, the engine was left with a race where dependent
resources could find themselves completely executed and committed before
a resource that they depend on has been committed.

Fixes pulumi/pulumi#1726

* Fix an issue with Replace steps at the end of a plan

If the last step that was executed successfully was a Replace, we could
end up in a situation where we unintentionally left the snapshot
invalid.

* Add a test

* CR: pass context.Context as first parameter to Iterate

* CR: null->nil
2018-08-08 13:45:48 -07:00

340 lines
11 KiB
Go

// Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package engine
import (
"bytes"
"context"
"fmt"
"os"
"sync"
"github.com/golang/glog"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/pkg/diag"
"github.com/pulumi/pulumi/pkg/diag/colors"
"github.com/pulumi/pulumi/pkg/resource"
"github.com/pulumi/pulumi/pkg/resource/deploy"
"github.com/pulumi/pulumi/pkg/resource/plugin"
"github.com/pulumi/pulumi/pkg/tokens"
"github.com/pulumi/pulumi/pkg/util/contract"
"github.com/pulumi/pulumi/pkg/workspace"
)
// ProjectInfoContext returns information about the current project, including its pwd, main, and plugin context.
func ProjectInfoContext(projinfo *Projinfo, host plugin.Host, config plugin.ConfigSource, pluginEvents plugin.Events,
diag diag.Sink, tracingSpan opentracing.Span) (string, string, *plugin.Context, error) {
contract.Require(projinfo != nil, "projinfo")
// If the package contains an override for the main entrypoint, use it.
pwd, main, err := projinfo.GetPwdMain()
if err != nil {
return "", "", nil, err
}
// Create a context for plugins.
ctx, err := plugin.NewContext(diag, host, config, pluginEvents, pwd, projinfo.Proj.RuntimeInfo.Options(),
tracingSpan)
if err != nil {
return "", "", nil, err
}
return pwd, main, ctx, nil
}
// newPlanContext creates a context for a subsequent planning operation. Callers must call Close on the
// resulting context object once they have completed the associated planning operation.
func newPlanContext(u UpdateInfo, opName string, parentSpan opentracing.SpanContext) (*planContext, error) {
contract.Require(u != nil, "u")
// Create a root span for the operation
opts := []opentracing.StartSpanOption{}
if opName != "" {
opts = append(opts, opentracing.Tag{Key: "operation", Value: opName})
}
if parentSpan != nil {
opts = append(opts, opentracing.ChildOf(parentSpan))
}
tracingSpan := opentracing.StartSpan("pulumi-plan", opts...)
return &planContext{
Update: u,
TracingSpan: tracingSpan,
}, nil
}
type planContext struct {
Update UpdateInfo // The update being processed.
TracingSpan opentracing.Span // An OpenTracing span to parent plan operations within.
}
func (ctx *planContext) Close() {
ctx.TracingSpan.Finish()
}
// planOptions includes a full suite of options for performing a plan and/or deploy operation.
type planOptions struct {
UpdateOptions
// SourceFunc is a factory that returns an EvalSource to use during planning. This is the thing that
// creates resources to compare against the current checkpoint state (e.g., by evaluating a program, etc).
SourceFunc planSourceFunc
SkipOutputs bool // true if we we should skip printing outputs separately.
DOT bool // true if we should print the DOT file for this plan.
Events eventEmitter // the channel to write events from the engine to.
Diag diag.Sink // the sink to use for diag'ing.
}
// planSourceFunc is a callback that will be used to prepare for, and evaluate, the "new" state for a stack.
type planSourceFunc func(
opts planOptions, proj *workspace.Project, pwd, main string,
target *deploy.Target, plugctx *plugin.Context, dryRun bool) (deploy.Source, error)
// plan just uses the standard logic to parse arguments, options, and to create a snapshot and plan.
func plan(ctx *Context, info *planContext, opts planOptions, dryRun bool) (*planResult, error) {
contract.Assert(info != nil)
contract.Assert(info.Update != nil)
contract.Assert(opts.SourceFunc != nil)
// If this isn't a dry run, we will need to record plugin events, so that we persist them in the checkpoint. If
// we're just doing a dry run, we don't actually need to persist anything (and indeed trying to do so would fail).
var pluginEvents plugin.Events
if !dryRun {
pluginEvents = &pluginActions{ctx}
}
// First, load the package metadata and the deployment target in preparation for executing the package's program
// and creating resources. This includes fetching its pwd and main overrides.
proj, target := info.Update.GetProject(), info.Update.GetTarget()
contract.Assert(proj != nil)
contract.Assert(target != nil)
projinfo := &Projinfo{Proj: proj, Root: info.Update.GetRoot()}
pwd, main, plugctx, err := ProjectInfoContext(projinfo, opts.host, target, pluginEvents, opts.Diag, info.TracingSpan)
if err != nil {
return nil, err
}
// Now create the state source. This may issue an error if it can't create the source. This entails,
// for example, loading any plugins which will be required to execute a program, among other things.
source, err := opts.SourceFunc(opts, proj, pwd, main, target, plugctx, dryRun)
if err != nil {
return nil, err
}
// If there are any analyzers in the project file, add them.
var analyzers []tokens.QName
if as := projinfo.Proj.Analyzers; as != nil {
for _, a := range *as {
analyzers = append(analyzers, a)
}
}
// Append any analyzers from the command line.
for _, a := range opts.Analyzers {
analyzers = append(analyzers, tokens.QName(a))
}
// Generate a plan; this API handles all interesting cases (create, update, delete).
plan, err := deploy.NewPlan(plugctx, target, target.Snapshot, source, analyzers, dryRun)
if err != nil {
return nil, err
}
return &planResult{
Ctx: info,
Plugctx: plugctx,
Plan: plan,
Options: opts,
}, nil
}
type planResult struct {
Ctx *planContext // plan context information.
Plugctx *plugin.Context // the context containing plugins and their state.
Plan *deploy.Plan // the plan created by this command.
Options planOptions // the options used during planning.
}
// Chdir changes the directory so that all operations from now on are relative to the project we are working with.
// It returns a function that, when run, restores the old working directory.
func (res *planResult) Chdir() (func(), error) {
pwd := res.Plugctx.Pwd
if pwd == "" {
return func() {}, nil
}
oldpwd, err := os.Getwd()
if err != nil {
return nil, err
}
if err = os.Chdir(pwd); err != nil {
return nil, errors.Wrapf(err, "could not change to the project working directory")
}
return func() {
// Restore the working directory after planning completes.
cderr := os.Chdir(oldpwd)
contract.IgnoreError(cderr)
}, nil
}
// Walk enumerates all steps in the plan, calling out to the provided action at each step. It returns four things: the
// resulting Snapshot, no matter whether an error occurs or not; an error, if something went wrong; the step that
// failed, if the error is non-nil; and finally the state of the resource modified in the failing step.
func (res *planResult) Walk(cancelCtx *Context, events deploy.Events, preview bool) (deploy.PlanSummary, error) {
ctx, cancelFunc := context.WithCancel(context.Background())
opts := deploy.Options{
Events: events,
Parallel: res.Options.Parallel,
}
src, err := res.Plan.Source().Iterate(ctx, opts, res.Plan)
if err != nil {
cancelFunc()
return nil, err
}
done := make(chan bool)
var summary deploy.PlanSummary
go func() {
planExec := deploy.NewPlanExecutor(ctx, res.Plan, opts, preview, src)
err = planExec.Execute()
summary = planExec.Summary()
close(done)
}()
// Asynchronously listen for cancellation, and deliver that signal to plan.
go func() {
select {
case <-cancelCtx.Cancel.Canceled():
// Cancel the plan executor's context, so it begins to shut down.
cancelFunc()
cancelErr := res.Plan.SignalCancellation()
if cancelErr != nil {
glog.V(3).Infof("Attempted to signal cancellation to resource providers, but failed: %s",
cancelErr.Error())
}
case <-done:
return
}
}()
select {
case <-cancelCtx.Cancel.Terminated():
return summary, cancelCtx.Cancel.TerminateErr()
case <-done:
return summary, err
}
}
func (res *planResult) Close() error {
return res.Plugctx.Close()
}
// printPlan prints the plan's result to the plan's Options.Events stream.
func printPlan(ctx *Context, result *planResult, dryRun bool) (ResourceChanges, error) {
result.Options.Events.preludeEvent(dryRun, result.Ctx.Update.GetTarget().Config)
// Walk the plan's steps and and pretty-print them out.
actions := newPlanActions(result.Options)
_, err := result.Walk(ctx, actions, true)
if err != nil {
return nil, errors.New("an error occurred while advancing the preview")
}
// Emit an event with a summary of operation counts.
changes := ResourceChanges(actions.Ops)
result.Options.Events.previewSummaryEvent(changes)
return changes, nil
}
type planActions struct {
Refresh bool
Ops map[deploy.StepOp]int
Opts planOptions
Seen map[resource.URN]deploy.Step
MapLock sync.Mutex
}
func newPlanActions(opts planOptions) *planActions {
return &planActions{
Ops: make(map[deploy.StepOp]int),
Opts: opts,
Seen: make(map[resource.URN]deploy.Step),
}
}
func (acts *planActions) OnResourceStepPre(step deploy.Step) (interface{}, error) {
acts.MapLock.Lock()
acts.Seen[step.URN()] = step
acts.MapLock.Unlock()
acts.Opts.Events.resourcePreEvent(step, true /*planning*/, acts.Opts.Debug)
// Warn the user if they're not updating a resource whose initialization failed.
if step.Op() == deploy.OpSame && len(step.Old().InitErrors) > 0 {
indent := " "
// TODO: Move indentation to the display logic, instead of doing it ourselves.
var warning bytes.Buffer
warning.WriteString("This resource failed to initialize in a previous deployment. It is recommended\n")
warning.WriteString(indent + "to update it to fix these issues:\n")
for i, err := range step.Old().InitErrors {
warning.WriteString(colors.SpecImportant + indent + fmt.Sprintf(" - Problem #%d", i+1) +
colors.Reset + " " + err + "\n")
}
acts.Opts.Diag.Warningf(diag.RawMessage(step.URN(), warning.String()))
}
return nil, nil
}
func (acts *planActions) OnResourceStepPost(ctx interface{},
step deploy.Step, status resource.Status, err error) error {
acts.MapLock.Lock()
assertSeen(acts.Seen, step)
acts.MapLock.Unlock()
if err != nil {
acts.Opts.Diag.Errorf(diag.GetPreviewFailedError(step.URN()), err)
} else {
// Track the operation if shown and/or if it is a logically meaningful operation.
if step.Logical() {
acts.MapLock.Lock()
acts.Ops[step.Op()]++
acts.MapLock.Unlock()
}
_ = acts.OnResourceOutputs(step)
}
return nil
}
func (acts *planActions) OnResourceOutputs(step deploy.Step) error {
acts.MapLock.Lock()
assertSeen(acts.Seen, step)
acts.MapLock.Unlock()
// Print the resource outputs separately, unless this is a refresh in which case they are already printed.
if !acts.Opts.SkipOutputs {
acts.Opts.Events.resourceOutputsEvent(step, true /*planning*/, acts.Opts.Debug)
}
return nil
}
func assertSeen(seen map[resource.URN]deploy.Step, step deploy.Step) {
_, has := seen[step.URN()]
contract.Assertf(has, "URN '%v' had not been marked as seen", step.URN())
}