Improve error messages output by the CLI (#1011)

* Improve error messages output by the CLI

This fixes a couple known issues with the way that we present errors
from the Pulumi CLI:
    1. Any errors from RPC endpoints were bubbling up as they were to
    the top-level, which was unfortunate because they contained
    RPC-specific noise that we don't want to present to the user. This
    commit unwraps errors from resource providers.
    2. The "catastrophic error" message often got printed twice
    3. Fatal errors are often printed twice, because our CLI top-level
    prints out the fatal error that it receives before exiting. A lot of
    the time this error has already been printed.
    4. Errors were prefixed by PU####.

* Feedback: Omit the 'catastrophic' error message and use a less verbose error message as the final error

* Code review feedback: interpretRPCError -> resourceStateAndError

* Code review feedback: deleting some commented-out code, error capitalization

* Cleanup after rebase
This commit is contained in:
Sean Gillespie 2018-03-09 15:43:16 -08:00 committed by GitHub
parent 8906731315
commit 703a954839
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 71 additions and 54 deletions

View file

@ -139,12 +139,6 @@ func displaySummaryEvent(out io.Writer, event engine.SummaryEventPayload, opts b
fmt.Fprint(out, opts.Color.Colorize(fmt.Sprintf("%vUpdate duration: %v%v\n",
colors.SpecUnimportant, event.Duration, colors.Reset)))
}
if event.MaybeCorrupt {
fmt.Fprint(out, opts.Color.Colorize(fmt.Sprintf(
"%vA catastrophic error occurred; resources states may be unknown%v\n",
colors.SpecAttention, colors.Reset)))
}
}
}
@ -175,19 +169,12 @@ func displayPreludeEvent(out io.Writer, event engine.PreludeEventPayload, opts b
func displayResourceOperationFailedEvent(out io.Writer,
event engine.ResourceOperationFailedPayload, opts backend.DisplayOptions) {
fmt.Fprintf(out, "Step #%v failed [%v]: ", event.Steps+1, event.Metadata.Op)
switch event.Status {
case resource.StatusOK:
fmt.Fprint(out, opts.Color.Colorize(
fmt.Sprintf("%vprovider successfully recovered from this failure%v", colors.SpecNote, colors.Reset)))
case resource.StatusUnknown:
fmt.Fprint(out, opts.Color.Colorize(
fmt.Sprintf("%vthis failure was catastrophic and the provider cannot guarantee recovery%v",
colors.SpecAttention, colors.Reset)))
default:
contract.Failf("Unrecognized resource state: %v", event.Status)
}
fmt.Fprint(out, "\n")
// It's not actually useful or interesting to print out any details about
// the resource state here, because we always assume that the resource state
// is unknown if an error occurs.
//
// In the future, once we get more fine-grained error messages from providers,
// we can provide useful diagnostics here.
}
// nolint: gas

View file

@ -7,7 +7,6 @@ import (
"fmt"
"io"
"io/ioutil"
"strconv"
"sync"
"github.com/golang/glog"
@ -212,13 +211,6 @@ func (d *defaultSink) Stringify(sev Severity, diag *Diag, args ...interface{}) s
}
buffer.WriteString(string(sev))
if diag.ID > 0 {
buffer.WriteString(" ")
buffer.WriteString(DefaultSinkIDPrefix)
buffer.WriteString(strconv.Itoa(int(diag.ID)))
}
buffer.WriteString(": ")
buffer.WriteString(colors.Reset)

View file

@ -3,7 +3,6 @@ package engine
import (
"bytes"
"fmt"
"strconv"
"sync"
"github.com/pulumi/pulumi/pkg/diag"
@ -20,8 +19,6 @@ func newEventSink(events eventEmitter) diag.Sink {
}
}
const eventSinkIDPrefix = "PU"
// eventSink is a sink which writes all events to a channel
type eventSink struct {
events eventEmitter // the channel to emit events into.
@ -131,13 +128,6 @@ func (s *eventSink) Stringify(sev diag.Severity, d *diag.Diag, args ...interface
}
buffer.WriteString(string(sev))
if d.ID > 0 {
buffer.WriteString(" ")
buffer.WriteString(eventSinkIDPrefix)
buffer.WriteString(strconv.Itoa(int(d.ID)))
}
buffer.WriteString(": ")
buffer.WriteString(colors.Reset)

View file

@ -190,13 +190,13 @@ func printPlan(result *planResult) (ResourceChanges, error) {
actions := newPreviewActions(result.Options)
_, _, _, err := result.Walk(actions, true)
if err != nil {
return nil, errors.Wrapf(err, "An error occurred while advancing the preview")
return nil, errors.New("an error occurred while advancing the preview")
}
if !result.Options.Diag.Success() {
// If any error occurred while walking the plan, be sure to let the developer know. Otherwise,
// although error messages may have spewed to the output, the final lines of the plan may look fine.
return nil, errors.New("One or more errors occurred during this preview")
return nil, errors.New("one or more errors occurred during this preview")
}
// Emit an event with a summary of operation counts.

View file

@ -142,7 +142,21 @@ func (iter *PlanIterator) Apply(step Step, preview bool) (resource.Status, error
}
}
return status, err
// At this point, if err is not nil, we've already issued an error message through our
// diag subsystem and we need to bail.
//
// This error message is ultimately what's going to be presented to the user at the top
// level, so the message here is intentionally vague; we should have already presented
// a more specific error message.
if err != nil {
if preview {
return status, errors.New("preview failed")
}
return status, errors.New("update failed")
}
return status, nil
}
// Close terminates the iteration of this plan.

View file

@ -10,6 +10,8 @@ import (
"github.com/golang/glog"
pbempty "github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pulumi/pulumi/pkg/resource"
"github.com/pulumi/pulumi/pkg/resource/config"
@ -75,8 +77,9 @@ func (p *provider) Configure(vars map[config.Key]string) error {
}
_, err := p.client.Configure(p.ctx.Request(), &pulumirpc.ConfigureRequest{Variables: config})
if err != nil {
glog.V(7).Infof("%s failed: err=%v", label, err)
return err
rpcStatus, _ := status.FromError(err)
glog.V(7).Infof("%s failed: err=%v", label, rpcStatus.Message())
return errors.New(rpcStatus.Message())
}
return nil
}
@ -105,8 +108,9 @@ func (p *provider) Check(urn resource.URN,
News: mnews,
})
if err != nil {
glog.V(7).Infof("%s failed: err=%v", label, err)
return nil, nil, err
rpcStatus, _ := status.FromError(err)
glog.V(7).Infof("%s failed: err=%v", label, rpcStatus.Message())
return nil, nil, errors.New(rpcStatus.Message())
}
// Unmarshal the provider inputs.
@ -158,8 +162,9 @@ func (p *provider) Diff(urn resource.URN, id resource.ID,
News: mnews,
})
if err != nil {
glog.V(7).Infof("%s failed: %v", label, err)
return DiffResult{}, err
rpcStatus, _ := status.FromError(err)
glog.V(7).Infof("%s failed: %v", label, rpcStatus.Message())
return DiffResult{}, errors.New(rpcStatus.Message())
}
var replaces []resource.PropertyKey
@ -199,8 +204,9 @@ func (p *provider) Create(urn resource.URN, props resource.PropertyMap) (resourc
Properties: mprops,
})
if err != nil {
glog.V(7).Infof("%s failed: err=%v", label, err)
return "", nil, resource.StatusUnknown, err
resourceStatus, rpcErr := resourceStateAndError(err)
glog.V(7).Infof("%s failed: err=%v", label, rpcErr)
return "", nil, resourceStatus, rpcErr
}
id := resource.ID(resp.GetId())
@ -249,8 +255,9 @@ func (p *provider) Update(urn resource.URN, id resource.ID,
resp, err := p.client.Update(p.ctx.Request(), req)
if err != nil {
glog.V(7).Infof("%s failed: %v", label, err)
return nil, resource.StatusUnknown, err
resourceStatus, rpcErr := resourceStateAndError(err)
glog.V(7).Infof("%s failed: %v", label, rpcErr)
return nil, resourceStatus, rpcErr
}
outs, err := UnmarshalProperties(resp.GetProperties(), MarshalOptions{
@ -283,8 +290,9 @@ func (p *provider) Delete(urn resource.URN, id resource.ID, props resource.Prope
}
if _, err := p.client.Delete(p.ctx.Request(), req); err != nil {
glog.V(7).Infof("%s failed: %v", label, err)
return resource.StatusUnknown, err
resourceStatus, rpcErr := resourceStateAndError(err)
glog.V(7).Infof("%s failed: %v", label, rpcErr)
return resourceStatus, rpcErr
}
glog.V(7).Infof("%s success", label)
@ -333,8 +341,9 @@ func (p *provider) GetPluginInfo() (workspace.PluginInfo, error) {
glog.V(7).Infof("%s executing", label)
resp, err := p.client.GetPluginInfo(p.ctx.Request(), &pbempty.Empty{})
if err != nil {
glog.V(7).Infof("%s failed: err=%v", label, err)
return workspace.PluginInfo{}, err
rpcStatus, _ := status.FromError(err)
glog.V(7).Infof("%s failed: err=%v", label, rpcStatus.Message())
return workspace.PluginInfo{}, errors.New(rpcStatus.Message())
}
var version *semver.Version
@ -358,3 +367,28 @@ func (p *provider) GetPluginInfo() (workspace.PluginInfo, error) {
func (p *provider) Close() error {
return p.plug.Close()
}
// resourceStateAndError interprets an error obtained from a gRPC endpoint.
//
// gRPC gives us a `status.Status` structure as an `error` whenever our
// gRPC servers serve up an error. Each `status.Status` contains a code
// and a message. Based on the error code given to us, we can understand
// the state of our system and if our resource status is truly unknown.
//
// In general, our resource state is only really unknown if the server
// had an internal error, in which case it will serve one of `codes.Internal`,
// `codes.DataLoss`, or `codes.Unknown` to us.
func resourceStateAndError(err error) (resource.Status, error) {
rpcStatus, _ := status.FromError(err)
glog.V(8).Infof("provider received rpc error `%s`: `%s`", rpcStatus.Code(), rpcStatus.Message())
switch rpcStatus.Code() {
case codes.Internal:
case codes.DataLoss:
case codes.Unknown:
glog.V(8).Infof("rpc error kind `%s` may not be recoverable", rpcStatus.Code())
return resource.StatusUnknown, errors.New(rpcStatus.Message())
}
glog.V(8).Infof("rpc error kind `%s` is well-understood and recoverable", rpcStatus.Code())
return resource.StatusOK, errors.New(rpcStatus.Message())
}