pulumi/pkg/graph/dotconv/print.go

134 lines
3.8 KiB
Go
Raw Normal View History

2018-05-22 21:43:36 +02:00
// Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package dotconv converts a resource graph into its DOT digraph equivalent. This is useful for integration with
// various visualization tools, like Graphviz. Please see http://www.graphviz.org/content/dot-language for a thorough
// specification of the DOT file format.
package dotconv
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
"github.com/pulumi/pulumi/pkg/v3/graph"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
)
// Print prints a resource graph.
func Print(g graph.Graph, w io.Writer) error {
// Allocate a new writer. In general, we will ignore write errors throughout this function, for simplicity, opting
// instead to return the result of flushing the buffer at the end, which is generally latching.
b := bufio.NewWriter(w)
// Print the graph header.
2017-06-14 01:47:55 +02:00
if _, err := b.WriteString("strict digraph {\n"); err != nil {
return err
}
// Initialize the frontier with unvisited graph vertices.
queued := make(map[graph.Vertex]bool)
frontier := make([]graph.Vertex, 0, len(g.Roots()))
for _, root := range g.Roots() {
to := root.To()
queued[to] = true
frontier = append(frontier, to)
}
// For now, we auto-generate IDs.
// TODO[pulumi/pulumi#76]: use the object URNs instead, once we have them.
c := 0
ids := make(map[graph.Vertex]string)
getID := func(v graph.Vertex) string {
if id, has := ids[v]; has {
return id
}
id := "Resource" + strconv.Itoa(c)
c++
ids[v] = id
return id
}
// Now, until the frontier is empty, emit entries into the stream.
indent := " "
emitted := make(map[graph.Vertex]bool)
for len(frontier) > 0 {
// Dequeue the head of the frontier.
v := frontier[0]
frontier = frontier[1:]
contract.Assert(!emitted[v])
emitted[v] = true
// Get and lazily allocate the ID for this vertex.
id := getID(v)
// Print this vertex; first its "label" (type) and then its direct dependencies.
// IDEA: consider serializing properties on the node also.
2017-06-14 01:47:55 +02:00
if _, err := b.WriteString(fmt.Sprintf("%v%v", indent, id)); err != nil {
2017-06-08 20:44:16 +02:00
return err
}
Implement updates This change is a first whack at implementing updates. Creation and deletion plans are pretty straightforward; we just take a single graph, topologically sort it, and perform the operations in the right order. For creation, this is in dependency order (things that are depended upon must be created before dependents); for deletion, this is in reverse-dependency order (things that depend on others must be deleted before dependencies). These are just special cases of the more general idea of performing DAG operations in dependency order. Updates must work in terms of this more general notion. For example: * It is an error to delete a resource while another refers to it; thus, resources are deleted after deleting dependents, or after updating dependent properties that reference the resource to new values. * It is an error to depend on a create a resource before it is created; thus, resources must be created before dependents are created, and/or before updates to existing resource properties that would cause them to refer to the new resource. Of course, all of this is tangled up in a graph of dependencies. As a result, we must create a DAG of the dependencies between creates, updates, and deletes, and then topologically sort this DAG, in order to determine the proper order of update operations. To do this, we slightly generalize the existing graph infrastructure, while also specializing two kinds of graphs; the existing one becomes a heapstate.ObjectGraph, while this new one is resource.planGraph (internal).
2017-02-23 23:56:23 +01:00
if label := v.Label(); label != "" {
2017-06-14 01:47:55 +02:00
if _, err := b.WriteString(fmt.Sprintf(" [label=\"%v\"]", label)); err != nil {
2017-06-08 20:44:16 +02:00
return err
}
}
2017-06-14 01:47:55 +02:00
if _, err := b.WriteString(";\n"); err != nil {
2017-06-08 20:44:16 +02:00
return err
}
// Now print out all dependencies as "ID -> {A ... Z}".
outs := v.Outs()
if len(outs) > 0 {
base := fmt.Sprintf("%v%v", indent, id)
// Print the ID of each dependency and, for those we haven't seen, add them to the frontier.
for _, out := range outs {
to := out.To()
if _, err := b.WriteString(fmt.Sprintf("%s -> %s", base, getID(to))); err != nil {
return err
}
var attrs []string
if out.Color() != "" {
attrs = append(attrs, fmt.Sprintf("color = \"%s\"", out.Color()))
}
if out.Label() != "" {
attrs = append(attrs, fmt.Sprintf("label = \"%s\"", out.Label()))
}
if len(attrs) > 0 {
if _, err := b.WriteString(fmt.Sprintf(" [%s]", strings.Join(attrs, ", "))); err != nil {
2017-06-08 20:44:16 +02:00
return err
}
}
if _, err := b.WriteString(";\n"); err != nil {
2017-06-08 20:44:16 +02:00
return err
}
if _, q := queued[to]; !q {
queued[to] = true
frontier = append(frontier, to)
}
}
}
}
// Finish the graph.
2017-06-14 01:47:55 +02:00
if _, err := b.WriteString("}\n"); err != nil {
2017-06-08 20:44:16 +02:00
return err
}
return b.Flush()
}