Begin merging MuPackage/MuIL into the compiler

This is the first change of many to merge the MuPack/MuIL formats
into the heart of the "compiler".

In fact, the entire meaning of the compiler has changed, from
something that took metadata and produced CloudFormation, into
something that takes MuPack/MuIL as input, and produces a MuGL
graph as output.  Although this process is distinctly different,
there are several aspects we can reuse, like workspace management,
dependency resolution, and some amount of name binding and symbol
resolution, just as a few examples.

An overview of the compilation process is available as a comment
inside of the compiler.Compile function, although it is currently
unimplemented.

The relationship between Workspace and Compiler has been semi-
inverted, such that all Compiler instances require a Workspace
object.  This is more natural anyway and moves some of the detection
logic "outside" of the Compiler.  Similarly, Options has moved to
a top-level package, so that Workspace and Compiler may share
access to it without causing package import cycles.

Finally, all that templating crap is gone.  This alone is cause
for mass celebration!
This commit is contained in:
joeduffy 2017-01-17 17:04:15 -08:00
parent bbb60799f8
commit 01658d04bb
32 changed files with 257 additions and 573 deletions

View file

@ -11,10 +11,9 @@ import (
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/marapongo/mu/pkg/cmdutil"
"github.com/marapongo/mu/pkg/compiler"
"github.com/marapongo/mu/pkg/compiler/backends"
"github.com/marapongo/mu/pkg/compiler/backends/clouds"
"github.com/marapongo/mu/pkg/compiler/backends/schedulers"
"github.com/marapongo/mu/pkg/options"
)
// defaultIn is where the Mu compiler looks for inputs by default.
@ -27,7 +26,6 @@ func newBuildCmd() *cobra.Command {
var outp string
var cluster string
var targetArch string
var skipCodegen bool
var cmd = &cobra.Command{
Use: "build [source] -- [args]",
Short: "Compile a Mu Stack",
@ -52,19 +50,15 @@ func newBuildCmd() *cobra.Command {
glog.Fatal(err)
}
opts := compiler.DefaultOpts(abs)
if skipCodegen {
opts.SkipCodegen = true
}
opts := options.Default(abs)
// Set the cluster and architecture if specified.
opts.Cluster = cluster
setCloudArchOptions(targetArch, opts)
cmdutil.SetCloudArchOptions(targetArch, opts)
// See if there are any arguments and, if so, accumulate them.
if len(sargs) > 0 {
opts.Args = make(map[string]string)
opts.Args = make(map[string]interface{})
// TODO[marapongo/mu#7]: This is a very rudimentary parser. We can and should do better.
for i := 0; i < len(sargs); i++ {
sarg := sargs[i]
@ -94,8 +88,11 @@ func newBuildCmd() *cobra.Command {
}
// Now new up a compiler and actually perform the build.
mup := compiler.NewCompiler(opts)
mup.Build(abs, outp)
mup, err := compiler.NewDefault(abs, opts)
if err != nil {
fmt.Fprintf(os.Stderr, "fatal: %v", err)
}
mup.Compile(nil)
},
}
@ -108,45 +105,6 @@ func newBuildCmd() *cobra.Command {
cmd.PersistentFlags().StringVarP(
&targetArch, "target", "t", "",
"Generate output for the target cloud architecture (format: \"cloud[:scheduler]\")")
cmd.PersistentFlags().BoolVar(
&skipCodegen, "skip-codegen", false,
"Skip code-generation phases of the compiler")
return cmd
}
func setCloudArchOptions(arch string, opts *compiler.Options) {
// If an architecture was specified, parse the pieces and set the options. This isn't required because stacks
// and workspaces can have defaults. This simply overrides or provides one where none exists.
if arch != "" {
// The format is "cloud[:scheduler]"; parse out the pieces.
var cloud string
var scheduler string
if delim := strings.IndexRune(arch, ':'); delim != -1 {
cloud = arch[:delim]
scheduler = arch[delim+1:]
} else {
cloud = arch
}
cloudArch, ok := clouds.Values[cloud]
if !ok {
fmt.Fprintf(os.Stderr, "Unrecognized cloud arch '%v'\n", cloud)
os.Exit(-1)
}
var schedulerArch schedulers.Arch
if scheduler != "" {
schedulerArch, ok = schedulers.Values[scheduler]
if !ok {
fmt.Fprintf(os.Stderr, "Unrecognized cloud scheduler arch '%v'\n", scheduler)
os.Exit(-1)
}
}
opts.Arch = backends.Arch{
Cloud: cloudArch,
Scheduler: schedulerArch,
}
}
}

View file

@ -8,6 +8,7 @@ import (
"github.com/spf13/cobra"
"github.com/marapongo/mu/pkg/cmdutil"
"github.com/marapongo/mu/pkg/pack"
)
@ -38,7 +39,7 @@ func newCompileCmd() *cobra.Command {
var pkg *pack.Package
if len(args) > 0 {
// The user has specified a path (or requested Stdin).
pkg = readPackageFromArg(args[0])
pkg = cmdutil.ReadPackageFromArg(args[0])
} else {
// Otherwise, use default Mu package name.
fmt.Fprintf(os.Stderr, "error: Default package names NYI")

View file

@ -9,6 +9,7 @@ import (
"github.com/spf13/cobra"
"github.com/marapongo/mu/pkg/cmdutil"
"github.com/marapongo/mu/pkg/pack"
"github.com/marapongo/mu/pkg/pack/ast"
"github.com/marapongo/mu/pkg/symbols"
@ -34,7 +35,7 @@ func newDescribeCmd() *cobra.Command {
// Enumerate the list of packages, deserialize them, and print information.
for _, arg := range args {
pkg := readPackageFromArg(arg)
pkg := cmdutil.ReadPackageFromArg(arg)
if pkg == nil {
break
}
@ -212,7 +213,7 @@ func printModuleMember(name symbols.Token, member ast.ModuleMember, exportOnly b
case ast.ModuleMethodKind:
printModuleMethod(name, member.(*ast.ModuleMethod), indent)
default:
contract.FailMF("Unexpected ModuleMember kind: %v\n", member.GetKind())
contract.Failf("Unexpected ModuleMember kind: %v\n", member.GetKind())
}
}
}
@ -277,7 +278,7 @@ func printClassMember(name symbols.Token, member ast.ClassMember, exportOnly boo
case ast.ClassMethodKind:
printClassMethod(name, member.(*ast.ClassMethod), indent)
default:
contract.FailMF("Unexpected ClassMember kind: %v\n", member.GetKind())
contract.Failf("Unexpected ClassMember kind: %v\n", member.GetKind())
}
}
}

View file

@ -18,7 +18,7 @@ func newGetCmd() *cobra.Command {
"to download dependencies referenced by the current Stack. Otherwise, if one\n" +
"or more specific dependencies are provided, only those will be downloaded.",
Run: func(cmd *cobra.Command, args []string) {
contract.FailM("Get command is not yet implemented")
contract.Failf("Get command is not yet implemented")
},
}

View file

@ -161,7 +161,7 @@ func ToValue(l ast.Literal) interface{} {
}
return m
default:
contract.FailMF("Unexpected map key type: %v", keyt)
contract.Failf("Unexpected map key type: %v", keyt)
return nil
}
case ast.SchemaLiteral:
@ -171,7 +171,7 @@ func ToValue(l ast.Literal) interface{} {
}
return p
default:
contract.FailM("Unexpected literal type")
contract.Failf("Unexpected literal type")
return nil
}
}

View file

@ -23,7 +23,7 @@ func IsName(s string) bool {
// AsName converts a given string to a Name, asserting its validity.
func AsName(s string) Name {
contract.AssertMF(IsName(s), "Expected string '%v' to be a name (%v)", s, NameRegexps)
contract.Assertf(IsName(s), "Expected string '%v' to be a name (%v)", s, NameRegexps)
return Name(s)
}

View file

@ -59,7 +59,7 @@ func (r Ref) Parse() (RefParts, error) {
// MustParse parses the parts of a Ref into a RefParts, failing fast if parsing fails.
func (r Ref) MustParse() RefParts {
p, err := r.Parse()
contract.AssertMF(err == nil, "Expected a nil error from Ref.Parse; got %v", err)
contract.Assertf(err == nil, "Expected a nil error from Ref.Parse; got %v", err)
return p
}

View file

@ -89,7 +89,7 @@ func (ty *Type) Name() Ref {
return Ref(fmt.Sprintf(string(TypeDecorsMap), ty.Decors.KeyType.Name(), ty.Decors.ValueType.Name()))
}
} else {
contract.FailM("Expected this type to have one of primitive, stack, schema, unref, resref, or decors")
contract.Failf("Expected this type to have one of primitive, stack, schema, unref, resref, or decors")
return Ref("")
}
}

50
pkg/cmdutil/clouds.go Normal file
View file

@ -0,0 +1,50 @@
// Copyright 2016 Marapongo, Inc. All rights reserved.
package cmdutil
import (
"fmt"
"os"
"strings"
"github.com/marapongo/mu/pkg/compiler/backends"
"github.com/marapongo/mu/pkg/compiler/backends/clouds"
"github.com/marapongo/mu/pkg/compiler/backends/schedulers"
"github.com/marapongo/mu/pkg/options"
)
func SetCloudArchOptions(arch string, opts *options.Options) {
// If an architecture was specified, parse the pieces and set the options. This isn't required because stacks
// and workspaces can have defaults. This simply overrides or provides one where none exists.
if arch != "" {
// The format is "cloud[:scheduler]"; parse out the pieces.
var cloud string
var scheduler string
if delim := strings.IndexRune(arch, ':'); delim != -1 {
cloud = arch[:delim]
scheduler = arch[delim+1:]
} else {
cloud = arch
}
cloudArch, ok := clouds.Values[cloud]
if !ok {
fmt.Fprintf(os.Stderr, "Unrecognized cloud arch '%v'\n", cloud)
os.Exit(-1)
}
var schedulerArch schedulers.Arch
if scheduler != "" {
schedulerArch, ok = schedulers.Values[scheduler]
if !ok {
fmt.Fprintf(os.Stderr, "Unrecognized cloud scheduler arch '%v'\n", scheduler)
os.Exit(-1)
}
}
opts.Arch = backends.Arch{
Cloud: cloudArch,
Scheduler: schedulerArch,
}
}
}

View file

@ -1,6 +1,6 @@
// Copyright 2016 Marapongo, Inc. All rights reserved.
package cmd
package cmdutil
import (
"fmt"
@ -12,9 +12,9 @@ import (
"github.com/marapongo/mu/pkg/pack"
)
// readPackage attempts to read a package from the given path; if an error occurs, it will be printed to Stderr, and
// ReadPackage attempts to read a package from the given path; if an error occurs, it will be printed to Stderr, and
// the returned value will be nil.
func readPackage(path string) *pack.Package {
func ReadPackage(path string) *pack.Package {
// Lookup the marshaler for this format.
ext := filepath.Ext(path)
m, has := encoding.Marshalers[ext]
@ -31,25 +31,25 @@ func readPackage(path string) *pack.Package {
return nil
}
return decodePackage(m, b, path)
return DecodePackage(m, b, path)
}
// readPackageFromArg reads a package from an argument value. It can be "-" to request reading from Stdin, and is
// ReadPackageFromArg reads a package from an argument value. It can be "-" to request reading from Stdin, and is
// interpreted as a path otherwise. If an error occurs, it is printed to Stderr, and the returned value will be nil.
func readPackageFromArg(arg string) *pack.Package {
func ReadPackageFromArg(arg string) *pack.Package {
if arg == "-" {
// Read the package from stdin.
return readPackageFromStdin()
return ReadPackageFromStdin()
} else {
// Read the package from a file.
return readPackage(arg)
return ReadPackage(arg)
}
}
// readPackageFromStdin attempts to read a package from Stdin; if an error occurs, it will be printed to Stderr, and
// ReadPackageFromStdin attempts to read a package from Stdin; if an error occurs, it will be printed to Stderr, and
// the returned value will be nil.
func readPackageFromStdin() *pack.Package {
func ReadPackageFromStdin() *pack.Package {
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintf(os.Stderr, "error: could not read from stdin\n")
@ -57,12 +57,12 @@ func readPackageFromStdin() *pack.Package {
return nil
}
return decodePackage(encoding.Marshalers[".json"], b, "stdin")
return DecodePackage(encoding.Marshalers[".json"], b, "stdin")
}
// decodePackage turns a byte array into a package using the given marshaler. If an error occurs, it is printed to
// DecodePackage turns a byte array into a package using the given marshaler. If an error occurs, it is printed to
// Stderr, and the returned package value will be nil.
func decodePackage(m encoding.Marshaler, b []byte, path string) *pack.Package {
func DecodePackage(m encoding.Marshaler, b []byte, path string) *pack.Package {
// Unmarshal the contents into a fresh package.
pkg, err := encoding.Decode(m, b)
if err != nil {

View file

@ -117,7 +117,7 @@ func (c *awsCloud) genResourceDependsID(ref *ast.ServiceRef) cfLogicalID {
// TODO: this works "one-level deep"; however, we will need to figure out a scheme for logical dependencies;
// that is, dependencies on stacks that are merely a composition of many other stacks.
contract.AssertMF(len(sel.BoundType.Services.Public) == 1,
contract.Assertf(len(sel.BoundType.Services.Public) == 1,
"expected service type '%v' to export a single public service; got %v",
sel.BoundType.Name, len(sel.BoundType.Services.Public))
for _, s := range sel.BoundType.Services.Public {

View file

@ -30,7 +30,7 @@ type cfIntrinsic struct {
// AsCFIntrinsic converts a given service to a CloudFormationService, validating it as we go.
func asCFIntrinsic(svc *ast.Service) *cfIntrinsic {
contract.AssertM(svc.BoundType.Name == cfIntrinsicName, "asCFIntrinsic expects a bound CF service type")
contract.Assertf(svc.BoundType.Name == cfIntrinsicName, "asCFIntrinsic expects a bound CF service type")
res := &cfIntrinsic{
Service: svc,
@ -40,7 +40,7 @@ func asCFIntrinsic(svc *ast.Service) *cfIntrinsic {
res.Resource, ok = conv.ToString(r)
contract.Assert(ok)
} else {
contract.FailMF("Expected a required 'resource' property")
contract.Failf("Expected a required 'resource' property")
}
if do, ok := svc.BoundProperties[cfIntrinsicDependsOn]; ok {
res.DependsOn, ok = conv.ToServiceArray(do)

View file

@ -20,9 +20,9 @@ func New(arch Arch, d diag.Sink) core.Backend {
// TODO(joe): come up with a way to get options from CLI/workspace/etc. to here.
cloud = aws.New(d, aws.Options{})
case clouds.None:
contract.FailM("Expected a non-None cloud architecture")
contract.Failf("Expected a non-None cloud architecture")
default:
contract.FailMF("Cloud architecture '%v' not yet supported", clouds.Names[arch.Cloud])
contract.Failf("Cloud architecture '%v' not yet supported", clouds.Names[arch.Cloud])
}
contract.Assert(cloud != nil)
contract.Assert(cloud.Arch() == arch.Cloud)
@ -38,7 +38,7 @@ func New(arch Arch, d diag.Sink) core.Backend {
case schedulers.AWSECS:
scheduler = awsecs.New(d, cloud)
default:
contract.FailMF("Scheduler architecture '%v' not yet supported", schedulers.Names[arch.Scheduler])
contract.Failf("Scheduler architecture '%v' not yet supported", schedulers.Names[arch.Scheduler])
}
if scheduler != nil {
contract.Assert(scheduler.Arch() == arch.Scheduler)

View file

@ -102,37 +102,37 @@ func (b *binder) ValidateStack(stack *ast.Stack) {
// LookupService binds a name to a Service type.
func (b *binder) LookupService(nm ast.Name) (*ast.Service, bool) {
contract.AssertM(b.scope != nil, "Unexpected empty binding scope during LookupService")
contract.Assertf(b.scope != nil, "Unexpected empty binding scope during LookupService")
return b.scope.LookupService(nm)
}
// LookupStack binds a name to a Stack type.
func (b *binder) LookupStack(nm ast.Name) (*ast.Stack, bool) {
contract.AssertM(b.scope != nil, "Unexpected empty binding scope during LookupStack")
contract.Assertf(b.scope != nil, "Unexpected empty binding scope during LookupStack")
return b.scope.LookupStack(nm)
}
// LookupUninstStack binds a name to a UninstStack type.
func (b *binder) LookupUninstStack(nm ast.Name) (*ast.UninstStack, bool) {
contract.AssertM(b.scope != nil, "Unexpected empty binding scope during LookupUninstStack")
contract.Assertf(b.scope != nil, "Unexpected empty binding scope during LookupUninstStack")
return b.scope.LookupUninstStack(nm)
}
// LookupSchema binds a name to a Schema type.
func (b *binder) LookupSchema(nm ast.Name) (*ast.Schema, bool) {
contract.AssertM(b.scope != nil, "Unexpected empty binding scope during LookupSchema")
contract.Assertf(b.scope != nil, "Unexpected empty binding scope during LookupSchema")
return b.scope.LookupSchema(nm)
}
// LookupSymbol binds a name to any kind of Symbol.
func (b *binder) LookupSymbol(nm ast.Name) (*Symbol, bool) {
contract.AssertM(b.scope != nil, "Unexpected empty binding scope during LookupSymbol")
contract.Assertf(b.scope != nil, "Unexpected empty binding scope during LookupSymbol")
return b.scope.LookupSymbol(nm)
}
// RegisterSymbol registers a symbol with the given name; if it already exists, the function returns false.
func (b *binder) RegisterSymbol(sym *Symbol) bool {
contract.AssertM(b.scope != nil, "Unexpected empty binding scope during RegisterSymbol")
contract.Assertf(b.scope != nil, "Unexpected empty binding scope during RegisterSymbol")
return b.scope.RegisterSymbol(sym)
}
@ -143,7 +143,7 @@ func (b *binder) PushScope() {
// PopScope replaces the current scope with its parent.
func (b *binder) PopScope() {
contract.AssertM(b.scope != nil, "Unexpected empty binding scope during pop")
contract.Assertf(b.scope != nil, "Unexpected empty binding scope during pop")
b.scope = b.scope.parent
}
@ -454,7 +454,7 @@ func (p *binderBindPhase) VisitService(pstack *ast.Stack, parent *ast.Services,
svc *ast.Service) {
// The service's type has been prepared in phase 1, and must now be bound to a symbol. All shorthand type
// expressions, intra stack references, cycles, and so forth, will have been taken care of by this earlier phase.
contract.AssertMF(svc.Type != "",
contract.Assertf(svc.Type != "",
"Expected all Services to have types in binding phase2; %v is missing one", svc.Name)
svc.BoundType = p.ensureStack(svc.Type, svc.Properties)
@ -505,7 +505,7 @@ func (p *binderBindPhase) ensureType(ref ast.Ref) *ast.Type {
if exists {
return ast.NewSchemaType(schema)
}
contract.FailMF("Expected 1st pass of binding to guarantee type %v exists (%v)", ref, nm)
contract.Failf("Expected 1st pass of binding to guarantee type %v exists (%v)", ref, nm)
return nil
}
@ -627,7 +627,7 @@ func (p *binderValidatePhase) bindValue(node *ast.Node, val interface{}, ty *ast
} else if ty.IsSchema() {
lit = p.bindSchemaValue(node, val, ty.Schema)
} else if ty.IsUnresolvedRef() {
contract.FailM("Expected all unresolved refs to be gone by this phase in binding")
contract.Failf("Expected all unresolved refs to be gone by this phase in binding")
}
if lit == nil {
@ -722,7 +722,7 @@ func (p *binderValidatePhase) bindPrimitiveValue(node *ast.Node, val interface{}
// table, and store a strong reference to the result. This lets the backend connect the dots.
return p.bindServiceValue(node, val, nil)
default:
contract.FailMF("Unrecognized primitive type: %v", prim)
contract.Failf("Unrecognized primitive type: %v", prim)
return nil
}
}
@ -884,7 +884,7 @@ func (p *binderValidatePhase) bindServiceRef(node *ast.Node, val string, ty *ast
var ref *ast.ServiceRef
if svc, ok := p.b.LookupService(ast.Name(nm)); ok {
svct := svc.BoundType
contract.AssertMF(svct != nil, "Expected service '%v' to have a type", svc.Name)
contract.Assertf(svct != nil, "Expected service '%v' to have a type", svc.Name)
var selsvc *ast.Service
if sel == "" {

View file

@ -3,6 +3,7 @@
package compiler
import (
"fmt"
"os"
"github.com/golang/glog"
@ -11,6 +12,9 @@ import (
"github.com/marapongo/mu/pkg/compiler/core"
"github.com/marapongo/mu/pkg/diag"
"github.com/marapongo/mu/pkg/errors"
"github.com/marapongo/mu/pkg/graph"
"github.com/marapongo/mu/pkg/options"
"github.com/marapongo/mu/pkg/pack"
"github.com/marapongo/mu/pkg/util/contract"
"github.com/marapongo/mu/pkg/workspace"
)
@ -19,109 +23,89 @@ import (
type Compiler interface {
core.Phase
// Context returns the current compiler context.
Context() *Context
Options() *options.Options // the options this compiler is using.
Workspace() workspace.W // the workspace that this compielr is using.
// Build detects and compiles inputs from the given location, storing build artifacts in the given destination.
Build(inp string, outp string)
// BuildFile uses the given Mufile directly, and stores build artifacts in the given destination.
BuildFile(mufile []byte, ext string, outp string)
// Compile takes a MuPackage as input and compiles it into a MuGL graph.
Compile(pkg *pack.Package) graph.Graph
}
// compiler is the canonical implementation of the Mu compiler.
type compiler struct {
opts *Options
ctx *Context
w workspace.W
deps map[ast.Ref]*diag.Document // a cache of mapping names to loaded dependencies.
}
// NewCompiler creates a new instance of the Mu compiler, with the given initialization settings.
func NewCompiler(opts *Options) Compiler {
// New creates a new instance of the Mu compiler with the given workspace and options.
func New(w workspace.W) Compiler {
contract.Requiref(w != nil, "w", "!= nil")
return &compiler{
opts: opts,
ctx: NewContext(opts),
w: w,
deps: make(map[ast.Ref]*diag.Document),
}
}
func (c *compiler) Context() *Context {
return c.ctx
// NewDefault creates a new instance of the Mu compiler, along with a new workspace, from the given path. If options
// is nil, the default compiler options will be used instead. If any IO errors occur, they will be output in the usual
// diagnostics ways, and the compiler return value will be nil while the error will be non-nil.
func NewDefault(path string, opts *options.Options) (Compiler, error) {
if opts == nil {
opts = options.Default(path)
} else {
opts.Pwd = path
}
w, err := workspace.New(opts)
if err != nil {
opts.Diag.Errorf(errors.ErrorIO.AtFile(path), err)
return nil, fmt.Errorf("cannot proceed without a workspace")
}
return New(w), nil
}
func (c *compiler) Diag() diag.Sink {
return c.opts.Diag
// NewDefaultwd creates a new instance of the Mu compiler, along with a new workspace, from the current working
// directory. If options is nil, the default compiler options will be used instead. If any IO errors occur, they will
// be output in the usual diagnostics ways, and the compiler return value will be nil while the error will be non-nil.
func NewDefaultwd(opts *options.Options) (Compiler, error) {
pwd, err := os.Getwd()
contract.Assertf(err == nil, "Unexpected os.Getwd error: %v", err)
return NewDefault(pwd, opts)
}
func (c *compiler) Build(inp string, outp string) {
glog.Infof("Building target '%v' (out='%v')", inp, outp)
func (c *compiler) Diag() diag.Sink { return c.Options().Diag }
func (c *compiler) Options() *options.Options { return c.w.Options() }
func (c *compiler) Workspace() workspace.W { return c.w }
// First find the root of the current package based on the location of its Mufile.
w, err := workspace.New(inp, c.Diag())
if err != nil {
c.Diag().Errorf(errors.ErrorIO.AtFile(inp), err)
return
}
// Now actually locate, load, and parse the Mufile.
mufile, err := w.DetectMufile()
if err != nil {
c.Diag().Errorf(errors.ErrorIO.AtFile(inp), err)
return
}
if mufile == "" {
c.Diag().Errorf(errors.ErrorMissingMufile, inp)
return
}
// Read in the contents of the document and make it available to subsequent stages.
doc, err := diag.ReadDocument(mufile)
if err != nil {
c.Diag().Errorf(errors.ErrorCouldNotReadMufile.AtFile(mufile), err)
return
}
c.buildDocument(w, doc, outp)
func (c *compiler) Compile(pkg *pack.Package) graph.Graph {
contract.Requiref(pkg != nil, "pkg", "!= nil")
return c.compilePackage(pkg)
}
func (c *compiler) BuildFile(mufile []byte, ext string, outp string) {
glog.Infof("Building in-memory %v file (bytes=%v out='%v')", ext, len(mufile), outp)
// Default to the current working directory for the workspace.
dir, err := os.Getwd()
if err != nil {
c.Diag().Errorf(errors.ErrorIO, err)
return
}
w, err := workspace.New(dir, c.Diag())
if err != nil {
c.Diag().Errorf(errors.ErrorIO, err)
return
}
doc := &diag.Document{File: workspace.Mufile + ext, Body: mufile}
c.buildDocument(w, doc, outp)
}
func (c *compiler) buildDocument(w workspace.W, doc *diag.Document, outp string) {
glog.Infof("Building doc '%v' (bytes=%v out='%v')", doc.File, len(doc.Body), outp)
func (c *compiler) compilePackage(pkg *pack.Package) graph.Graph {
glog.Infof("Compiling package '%v' (w=%v)", pkg.Name, c.w.Root())
if glog.V(2) {
defer glog.V(2).Infof("Building doc '%v' completed w/ %v warnings and %v errors",
doc.File, c.Diag().Warnings(), c.Diag().Errors())
defer glog.V(2).Infof("Building package '%v' completed w/ %v warnings and %v errors",
pkg.Name, c.Diag().Warnings(), c.Diag().Errors())
}
// Perform the front-end phases of the compiler.
stack := c.buildDocumentFE(w, doc)
if !c.Diag().Success() {
return
}
contract.Assert(stack != nil)
// To compile a package, we require a decoded MuPackage object; this has already been done, and is presented to us
// an argument. Next, we must bind it's contents. To bind its contents, we must:
//
// * Resolve all dependency packages and inject them into a package map (just a map of names to symbols).
// * Bind each dependency package, in order, by recursing into the present algorithm.
// * Enumerate all modules, and for each:
// + Inject a module symbol into an export map associated with the package symbol.
// + Enumerate all module members, and for each:
// - Inject a symbol of the appropriate kind into the module's associated member map.
// - Enumerate any class symbols resulting from this process, and for each:
// . Inject a symbol of the appropriate kind into the class's associated member map.
//
// Essentially, all we are doing is mapping names to concrete symbols. This ensures that as we compile a package,
// we are able to find all tokens in these maps. If we ever cannot find a token in a map, it means the MuPackage
// file is invalid. We require all MetaMu compilers to produce valid, verifiable MuIL, and this is a requriement.
//
// Afterwards, we can safely evaluate the MuIL entrypoint, using our MuIL AST interpreter.
// Next, perform the semantic analysis phases of the compiler.
c.buildDocumentSema(w, stack)
if !c.Diag().Success() {
return
}
// Finally, perform the back-end phases of the compiler.
c.buildDocumentBE(w, stack)
return nil
}

View file

@ -1,25 +0,0 @@
// Copyright 2016 Marapongo, Inc. All rights reserved.
package compiler
import (
"github.com/golang/glog"
"github.com/marapongo/mu/pkg/ast"
"github.com/marapongo/mu/pkg/compiler/backends"
"github.com/marapongo/mu/pkg/compiler/core"
"github.com/marapongo/mu/pkg/workspace"
)
// buildDocumentBE runs the back-end phases of the compiler.
func (c *compiler) buildDocumentBE(w workspace.W, stack *ast.Stack) {
if c.opts.SkipCodegen {
glog.V(2).Infof("Skipping code-generation (opts.SkipCodegen=true)")
} else {
glog.V(2).Infof("Stack %v targets cluster=%v arch=%v", stack.Name, c.ctx.Cluster.Name, c.ctx.Arch)
// Now get the backend cloud provider to process the stack from here on out.
be := backends.New(c.ctx.Arch, c.Diag())
be.CodeGen(core.Compiland{c.ctx.Cluster, stack})
}
}

View file

@ -11,7 +11,6 @@ import (
"github.com/marapongo/mu/pkg/compiler/backends/schedulers"
"github.com/marapongo/mu/pkg/diag"
"github.com/marapongo/mu/pkg/errors"
"github.com/marapongo/mu/pkg/util/contract"
"github.com/marapongo/mu/pkg/workspace"
)
@ -33,18 +32,16 @@ func (c *compiler) buildDocumentFE(w workspace.W, doc *diag.Document) *ast.Stack
}
// Determine what cloud target we will be using; we need this to process the Mufile and imports.
cl, a := c.detectClusterArch(w)
c.detectClusterArch(w)
if !c.Diag().Success() {
return nil
}
c.ctx = c.ctx.WithClusterArch(cl, a)
contract.Assert(c.ctx.Cluster != nil)
// Now parse the stack, using whatever args may have been supplied as the properties.
// TODO[marapongo/mu#7]: we want to strongly type the properties; e.g., a stack expecting a number should
// get a number, etc. However, to know that we must first have parsed the metadata for the target stack!
props := make(ast.PropertyBag)
for arg, val := range c.opts.Args {
for arg, val := range c.Options().Args {
props[arg] = val
}
stack := p.ParseStack(doc, props)
@ -63,15 +60,16 @@ func (c *compiler) detectClusterArch(w workspace.W) (*ast.Cluster, backends.Arch
// Cluster and architectures settings may come from one of two places, in order of search preference:
// 1) command line arguments.
// 2) cluster-wide settings in a workspace.
arch := c.opts.Arch
var arch backends.Arch
var cluster *ast.Cluster
// If a cluster was specified, look it up and load up its options.
var cluster *ast.Cluster
if c.opts.Cluster != "" {
if cl, exists := w.Settings().Clusters[c.opts.Cluster]; exists {
clname := c.Options().Cluster
if clname != "" {
if cl, exists := w.Settings().Clusters[clname]; exists {
cluster = cl
} else {
c.Diag().Errorf(errors.ErrorClusterNotFound, c.opts.Cluster)
c.Diag().Errorf(errors.ErrorClusterNotFound, clname)
return nil, arch
}
}
@ -88,7 +86,7 @@ func (c *compiler) detectClusterArch(w workspace.W) (*ast.Cluster, backends.Arch
if cluster == nil {
// If no target was found, and we don't have an architecture, error out.
if arch.Cloud == clouds.None && !c.opts.SkipCodegen {
if arch.Cloud == clouds.None {
c.Diag().Errorf(errors.ErrorMissingTarget)
return nil, arch
}

View file

@ -1,49 +0,0 @@
// Copyright 2016 Marapongo, Inc. All rights reserved.
package compiler
import (
"github.com/marapongo/mu/pkg/ast"
"github.com/marapongo/mu/pkg/compiler/backends"
"github.com/marapongo/mu/pkg/util/contract"
)
// Context holds all state available to any templates or code evaluated at compile-time.
type Context struct {
Options *Options // compiler options supplied.
Cluster *ast.Cluster // the cluster that we will deploy to.
Arch backends.Arch // the target cloud architecture.
Properties ast.PropertyBag // properties supplied at stack construction time.
}
// NewContext returns a new, empty context.
func NewContext(opts *Options) *Context {
return &Context{
Options: opts,
Properties: make(ast.PropertyBag),
}
}
// WithClusterArch returns a clone of this Context with the given cluster and architecture attached to it.
func (c *Context) WithClusterArch(cl *ast.Cluster, a backends.Arch) *Context {
contract.Assert(cl != nil)
return &Context{
Cluster: cl,
Arch: a,
Options: c.Options,
Properties: c.Properties,
}
}
// WithProps returns a clone of this Context with the given properties attached to it.
func (c *Context) WithProps(props ast.PropertyBag) *Context {
if props == nil {
props = make(ast.PropertyBag)
}
return &Context{
Cluster: c.Cluster,
Arch: c.Arch,
Options: c.Options,
Properties: props,
}
}

View file

@ -1,24 +0,0 @@
// Copyright 2016 Marapongo, Inc. All rights reserved.
package compiler
import (
"github.com/marapongo/mu/pkg/compiler/backends"
"github.com/marapongo/mu/pkg/diag"
)
// Options contains all of the settings a user can use to control the compiler's behavior.
type Options struct {
Diag diag.Sink // a sink to use for all diagnostics.
SkipCodegen bool // if true, no code-generation phases run.
Arch backends.Arch // a target cloud architecture.
Cluster string // a named cluster with predefined settings to target.
Args map[string]string // optional arguments passed at the CLI.
}
// DefaultOpts returns the default set of compiler options.
func DefaultOpts(pwd string) *Options {
return &Options{
Diag: diag.DefaultSink(pwd),
}
}

View file

@ -47,7 +47,7 @@ func (p *parser) ParseWorkspace(doc *diag.Document) *ast.Workspace {
// We support many file formats. Detect the file extension and deserialize the contents.
var w ast.Workspace
marshaler, has := encoding.Marshalers[doc.Ext()]
contract.AssertMF(has, "No marshaler registered for this workspace extension: %v", doc.Ext())
contract.Assertf(has, "No marshaler registered for this workspace extension: %v", doc.Ext())
if err := marshaler.Unmarshal(doc.Body, &w); err != nil {
p.Diag().Errorf(errors.ErrorIllegalWorkspaceSyntax.At(doc), err)
// TODO[marapongo/mu#14]: issue an error per issue found in the file with line/col numbers.
@ -78,27 +78,10 @@ func (p *parser) ParseStack(doc *diag.Document, props ast.PropertyBag) *ast.Stac
doc.File, p.Diag().Warnings(), p.Diag().Errors())
}
// Expand templates in the document first and foremost.
// TODO[marapongo/mu#7]: the order of template expansion is not clear. The way we've done it right now (i.e.,
// performing it right here), we haven't yet type-checked the properties supplied to the stack. As a result,
// there is less compile-time safety. And furthermore, the properties are in a map rather than being stored
// in structured types. In other words, this is really just a fancy pre-processor, rather than being well-
// integrated into the type system. To do that, however, we'd need to delay processing of templates, which
// itself will mess with our ability to parse the document. This is an area of future thinking.
// TODO[marapongo/mu#7]: related to this, certain information (like cluster target) isn't even available yet!
// TODO[marapongo/mu#14]: when we produce precise line/column errors, we'll need to somehow trace back to pre-
// template expansion, otherwise the numbers may not make sense to the user.
rend, err := RenderTemplates(doc, p.c.Context().WithProps(props))
if err != nil {
p.Diag().Errorf(errors.ErrorBadTemplate.At(doc), err)
return nil
}
doc = rend
// We support many file formats. Detect the file extension and deserialize the contents.
var stack ast.Stack
marshaler, has := encoding.Marshalers[doc.Ext()]
contract.AssertMF(has, "No marshaler registered for this Mufile extension: %v", doc.Ext())
contract.Assertf(has, "No marshaler registered for this Mufile extension: %v", doc.Ext())
if err := marshaler.Unmarshal(doc.Body, &stack); err != nil {
p.Diag().Errorf(errors.ErrorIllegalMufileSyntax.At(doc), err)
// TODO[marapongo/mu#14]: issue an error per issue found in the file with line/col numbers.

View file

@ -1,207 +0,0 @@
// Copyright 2016 Marapongo, Inc. All rights reserved.
package compiler
import (
"bytes"
"fmt"
"io/ioutil"
"path/filepath"
"text/template"
"github.com/Masterminds/sprig"
"github.com/golang/glog"
"github.com/marapongo/mu/pkg/ast"
"github.com/marapongo/mu/pkg/compiler/backends/clouds"
"github.com/marapongo/mu/pkg/compiler/backends/schedulers"
"github.com/marapongo/mu/pkg/diag"
"github.com/marapongo/mu/pkg/encoding"
"github.com/marapongo/mu/pkg/util/contract"
)
// RenderTemplates performs standard template substitution on the given buffer using the given properties object.
// TODO[marapongo/mu#7]: render many templates at once so they can share code.
// TODO[marapongo/mu#7]: support configuration sections, etc., that can also contain templates.
func RenderTemplates(doc *diag.Document, ctx *Context) (*diag.Document, error) {
glog.V(2).Infof("Rendering template %v", doc.File)
r, err := newRenderer(doc, ctx)
if err != nil {
return nil, err
}
// Now actually render the template.
b, err := r.Render()
if err != nil {
return nil, err
}
glog.V(7).Infof("Rendered template %v:\n%v", doc.File, string(b))
return &diag.Document{
File: doc.File,
Body: b,
Parent: doc,
}, nil
}
type renderer struct {
T *template.Template
doc *diag.Document
ctx *renderContext
}
func newRenderer(doc *diag.Document, ctx *Context) (*renderer, error) {
// Create a new renderer; note that the template will be set last.
r := &renderer{doc: doc, ctx: newRenderContext(ctx)}
// Now create the template; this is a multi-step process.
t := template.New(doc.File)
// We will issue errors if the template tries to use a key that doesn't exist.
// TODO[marapongo/mu#7]: consider having an option to relax this.
t.Option("missingkey=error")
// Add a stock set of helper functions to the template.
t = t.Funcs(r.standardTemplateFuncs())
// Parse up the resulting template from the provided document.
var err error
t, err = t.Parse(string(doc.Body))
if err != nil {
return nil, err
}
r.T = t
return r, nil
}
// Render renders the root template and returns the result, or an error, whichever occurs.
func (r *renderer) Render() ([]byte, error) {
b := bytes.NewBuffer(nil)
if err := r.T.Execute(b, r.ctx); err != nil {
return nil, err
}
return b.Bytes(), nil
}
// standardTemplateFuncs returns a new FuncMap containing all of the functions available to templates. It is a
// member function of renderer because it closes over its state and may use it recursively.
func (r *renderer) standardTemplateFuncs() template.FuncMap {
// Use the Sprig library to seed our map with a lot of useful functions.
// TODO[marapongo/mu#7]: audit these and add them one-by-one, so any changes are intentional. There also may be
// some that we don't actually want to offer.
funcs := sprig.TxtFuncMap()
// Panic abruptly quits the template processing by injecting an ordinary error into it.
funcs["panic"] = func(msg string, args ...interface{}) (string, error) {
return "", fmt.Errorf(msg, args...)
}
// Require checks that a condition is true, and errors out if it does not. This is useful for validation tasks.
funcs["require"] = func(cond bool, msg string, args ...interface{}) (string, error) {
if cond {
return "", nil
} else {
return "", fmt.Errorf(msg, args...)
}
}
// Include textually includes the given document, also expanding templates.
funcs["include"] = func(name string) (string, error) {
glog.V(3).Infof("Recursive include of template file: %v", name)
// Attempt to load the target file so that we may expand templates within it.
dir := filepath.Dir(r.doc.File)
path := filepath.Join(dir, name)
raw, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
// Now perform the template expansion.
b := bytes.NewBuffer(nil)
u, err := r.T.Parse(string(raw))
if err != nil {
return "", err
}
if err := u.Execute(b, r.ctx); err != nil {
return "", err
}
s := b.String()
glog.V(7).Infof("Recursively included template file %v:\n%v", name, s)
return s, nil
}
// Add functions to unmarshal structures into their JSON/YAML textual equivalents.
funcs["json"] = func(v interface{}) (string, error) {
res, err := encoding.JSON.Marshal(v)
return string(res), err
}
funcs["yaml"] = func(v interface{}) (string, error) {
res, err := encoding.YAML.Marshal(v)
return string(res), err
}
// Functions for interacting with maps.
funcs["has"] = func(m map[string]interface{}, k string) bool {
_, has := m[k]
return has
}
funcs["orElse"] = func(m map[string]interface{}, k string, els interface{}) interface{} {
if v, has := m[k]; has {
return v
}
return els
}
funcs["orEmpty"] = func(m map[string]interface{}, k string) interface{} {
if v, has := m[k]; has {
return v
}
return ""
}
// Functions for interacting with the mutable set of template variables.
funcs["get"] = func(key string) interface{} {
return r.ctx.Vars[key]
}
funcs["set"] = func(key string, v interface{}) string {
r.ctx.Vars[key] = v
return ""
}
return funcs
}
// renderContext is a "template-friendly" version of the Context object. Namely, certain structured types are projected
// as strings for easier usage within markup templates.
type renderContext struct {
Arch renderArch // the cloud architecture to target.
Cluster ast.Cluster // the cluster we will deploy to.
Options Options // any compiler options supplied.
Properties ast.PropertyBag // a set of properties associated with the current stack.
Vars ast.PropertyBag // mutable variables used throughout this template's evaluation.
}
// renderArch is just like a normal Arch, except it has been expanded into strings for easier usage.
type renderArch struct {
Cloud string
Scheduler string
}
func newRenderContext(ctx *Context) *renderContext {
contract.Assert(ctx != nil)
contract.Assert(ctx.Cluster != nil)
contract.Assert(ctx.Properties != nil)
return &renderContext{
Arch: renderArch{
Cloud: clouds.Names[ctx.Arch.Cloud],
Scheduler: schedulers.Names[ctx.Arch.Scheduler],
},
Cluster: *ctx.Cluster,
Options: *ctx.Options,
Properties: ctx.Properties,
Vars: make(ast.PropertyBag),
}
}

View file

@ -27,7 +27,7 @@ func decodeModuleMember(m mapper.Mapper, tree mapper.Object) (ast.ModuleMember,
case ast.ModuleMethodKind:
return decodeModuleMethod(m, tree)
default:
contract.FailMF("Unrecognized ModuleMember kind: %v\n", kind)
contract.Failf("Unrecognized ModuleMember kind: %v\n", kind)
}
}
return nil, nil
@ -54,7 +54,7 @@ func decodeClassMember(m mapper.Mapper, tree mapper.Object) (ast.ClassMember, er
case ast.ClassMethodKind:
return decodeClassMethod(m, tree)
default:
contract.FailMF("Unrecognized ClassMember kind: %v\n", kind)
contract.Failf("Unrecognized ClassMember kind: %v\n", kind)
}
}
return nil, nil

View file

@ -67,7 +67,7 @@ func decodeExpression(m mapper.Mapper, tree mapper.Object) (ast.Expression, erro
return decodeSequenceExpression(m, tree)
default:
contract.FailMF("Unrecognized Expression kind: %v\n%v\n", kind, tree)
contract.Failf("Unrecognized Expression kind: %v\n%v\n", kind, tree)
}
}
return nil, nil

View file

@ -55,7 +55,7 @@ func decodeStatement(m mapper.Mapper, tree mapper.Object) (ast.Statement, error)
return decodeExpressionStatement(m, tree)
default:
contract.FailMF("Unrecognized Statement kind: %v\n", kind)
contract.Failf("Unrecognized Statement kind: %v\n", kind)
}
}
return nil, nil

View file

@ -18,7 +18,7 @@ func init() {
case ".yaml":
Marshalers[ext] = YAML
default:
contract.FailMF("No Marshaler available for MufileExt %v", ext)
contract.Failf("No Marshaler available for MufileExt %v", ext)
}
}
}

25
pkg/options/opts.go Normal file
View file

@ -0,0 +1,25 @@
// Copyright 2016 Marapongo, Inc. All rights reserved.
package options
import (
"github.com/marapongo/mu/pkg/compiler/backends"
"github.com/marapongo/mu/pkg/diag"
)
// Options contains all of the settings a user can use to control the compiler's behavior.
type Options struct {
Pwd string // the working directory for the compilation.
Diag diag.Sink // a sink to use for all diagnostics.
Arch backends.Arch // a target cloud architecture.
Cluster string // a named cluster with predefined settings to target.
Args map[string]interface{} // optional blueprint arguments passed at the CLI.
}
// Default returns the default set of compiler options.
func Default(pwd string) *Options {
return &Options{
Pwd: pwd,
Diag: diag.DefaultSink(pwd),
}
}

View file

@ -15,15 +15,8 @@ func Assert(cond bool) {
}
}
// AssertM checks a condition and FailsMs if it is false, logging the given message.
func AssertM(cond bool, msg string) {
if !cond {
failfast(fmt.Sprintf("%v: %v", assertMsg, msg))
}
}
// AssertMF checks a condition and FailsMFs if it is false, formatting and logging the given message.
func AssertMF(cond bool, msg string, args ...interface{}) {
// Assertf checks a condition and Failfs if it is false, formatting and logging the given message.
func Assertf(cond bool, msg string, args ...interface{}) {
if !cond {
failfast(fmt.Sprintf("%v: %v", assertMsg, fmt.Sprintf(msg, args...)))
}

View file

@ -4,8 +4,6 @@ package contract
import (
"fmt"
"github.com/golang/glog"
)
const failMsg = "A failure has occurred"
@ -15,17 +13,7 @@ func Fail() {
failfast(failMsg)
}
// FailM unconditionally abandons the process, logging the given message.
func FailM(msg string) {
failfast(fmt.Sprintf("%v: %v", failMsg, msg))
}
// FailMF unconditionally abandons the process, formatting and logging the given message.
func FailMF(msg string, args ...interface{}) {
// Failf unconditionally abandons the process, formatting and logging the given message.
func Failf(msg string, args ...interface{}) {
failfast(fmt.Sprintf("%v: %v", failMsg, fmt.Sprintf(msg, args...)))
}
// failfast logs and panics the process in a way that is friendly to debugging.
func failfast(msg string) {
glog.Fatal(msg)
}

View file

@ -0,0 +1,12 @@
// Copyright 2016 Marapongo, Inc. All rights reserved.
package contract
import (
"github.com/golang/glog"
)
// failfast logs and panics the process in a way that is friendly to debugging.
func failfast(msg string) {
glog.Fatal(msg)
}

View file

@ -15,15 +15,8 @@ func Require(cond bool, param string) {
}
}
// RequireM checks a precondition condition pertaining to a function parameter, and FailMs if it is false.
func RequireM(cond bool, param string, msg string) {
if !cond {
failfast(fmt.Sprintf("%v: %v", fmt.Sprintf(requireMsg, param), msg))
}
}
// RequireMF checks a precondition condition pertaining to a function parameter, and FailMFs if it is false.
func RequireMF(cond bool, param string, msg string, args ...interface{}) {
// Requiref checks a precondition condition pertaining to a function parameter, and Failfs if it is false.
func Requiref(cond bool, param string, msg string, args ...interface{}) {
if !cond {
failfast(fmt.Sprintf("%v: %v", fmt.Sprintf(requireMsg, param), fmt.Sprintf(msg, args...)))
}

View file

@ -32,10 +32,10 @@ type Decoders map[reflect.Type]Decoder
// Decode decodes an entire map into a target object, using tag-directed mappings.
func (md *mapper) Decode(tree Object, target interface{}) error {
vdst := reflect.ValueOf(target)
contract.AssertMF(vdst.Kind() == reflect.Ptr && !vdst.IsNil() && vdst.Elem().CanSet(),
contract.Assertf(vdst.Kind() == reflect.Ptr && !vdst.IsNil() && vdst.Elem().CanSet(),
"Target %v must be a non-nil, settable pointer", vdst.Type())
vdstType := vdst.Type().Elem()
contract.AssertMF(vdstType.Kind() == reflect.Struct && !vdst.IsNil(),
contract.Assertf(vdstType.Kind() == reflect.Struct && !vdst.IsNil(),
"Target %v must be a struct type with `json:\"x\"` tags to direct decoding", vdstType)
// For each field in the struct that has a `json:"name"`, look it up in the map by that `name`, issuing an error if
@ -70,7 +70,7 @@ func (md *mapper) Decode(tree Object, target interface{}) error {
// Decode the tag.
tagparts := strings.Split(tag, ",")
contract.AssertMF(len(tagparts) > 0,
contract.Assertf(len(tagparts) > 0,
"Expected >0 tagparts on field %v.%v; got %v", vdstType.Name(), fldinfo.Name, len(tagparts))
key = tagparts[0]
for i := 1; i < len(tagparts); i++ {
@ -80,7 +80,7 @@ func (md *mapper) Decode(tree Object, target interface{}) error {
case "skip":
skip = true
default:
contract.FailMF("Unrecognized tagpart on field %v.%v: %v", vdstType.Name(), fldinfo.Name, tagparts[i])
contract.Failf("Unrecognized tagpart on field %v.%v: %v", vdstType.Name(), fldinfo.Name, tagparts[i])
}
}
@ -110,7 +110,7 @@ func (md *mapper) Decode(tree Object, target interface{}) error {
// decodeField decodes primitive fields. For fields of complex types, we use custom deserialization.
func (md *mapper) DecodeField(tree Object, ty reflect.Type, key string, target interface{}, optional bool) error {
vdst := reflect.ValueOf(target)
contract.AssertMF(vdst.Kind() == reflect.Ptr && !vdst.IsNil() && vdst.Elem().CanSet(),
contract.Assertf(vdst.Kind() == reflect.Ptr && !vdst.IsNil() && vdst.Elem().CanSet(),
"Target %v must be a non-nil, settable pointer", vdst.Type())
if v, has := tree[key]; has {
// The field exists; okay, try to map it to the right type.

View file

@ -14,14 +14,20 @@ import (
"github.com/marapongo/mu/pkg/ast"
"github.com/marapongo/mu/pkg/diag"
"github.com/marapongo/mu/pkg/encoding"
"github.com/marapongo/mu/pkg/options"
"github.com/marapongo/mu/pkg/util/contract"
)
// W offers functionality for interacting with Mu workspaces.
// W offers functionality for interacting with Mu workspaces. A workspace influences Mu compilation; for example, it
// can specify default versions of dependencies, easing the process of working with multiple projects.
type W interface {
// Root returns the base path of the current workspace.
Root() string
// Options represents the current options governing the compilation.
Options() *options.Options
// Settings returns a mutable pointer to the optional workspace settings info.
Settings() *ast.Workspace
// ReadSettings reads in the settings file and returns it, returning nil if there is none.
ReadSettings() (*diag.Document, error)
@ -32,10 +38,11 @@ type W interface {
}
// New creates a new workspace from the given starting path.
func New(path string, d diag.Sink) (W, error) {
func New(options *options.Options) (W, error) {
contract.Requiref(options != nil, "options", "!= nil")
// First normalize the path to an absolute one.
var err error
path, err = filepath.Abs(path)
path, err := filepath.Abs(options.Pwd)
if err != nil {
return nil, err
}
@ -46,9 +53,9 @@ func New(path string, d diag.Sink) (W, error) {
}
ws := workspace{
path: path,
home: home,
d: d,
path: path,
home: home,
options: options,
}
// Memoize the root directory before returning.
@ -60,12 +67,12 @@ func New(path string, d diag.Sink) (W, error) {
}
type workspace struct {
path string // the path at which the workspace was constructed.
home string // the home directory to use for this workspace.
root string // the root of the workspace.
muspace string // a path to the Muspace file, if any.
settings ast.Workspace // an optional bag of workspace-wide settings.
d diag.Sink // a diagnostics sink to use for workspace operations.
path string // the path at which the workspace was constructed.
home string // the home directory to use for this workspace.
root string // the root of the workspace.
muspace string // a path to the Muspace file, if any.
options *options.Options // the options governing the current compilation.
settings ast.Workspace // an optional bag of workspace-wide settings.
}
// initRootInfo finds the root of the workspace, caches it for fast lookups, and loads up any workspace settings.
@ -82,7 +89,7 @@ func (w *workspace) initRootInfo() (string, error) {
for _, file := range files {
// A muspace file delimits the root of the workspace.
muspace := filepath.Join(root, file.Name())
if IsMuspace(muspace, w.d) {
if IsMuspace(muspace, w.options.Diag) {
glog.V(3).Infof("Mu workspace detected; setting root to %v", w.root)
w.root = root
w.muspace = muspace
@ -104,13 +111,9 @@ func (w *workspace) initRootInfo() (string, error) {
return w.root, nil
}
func (w *workspace) Root() string {
return w.root
}
func (w *workspace) Settings() *ast.Workspace {
return &w.settings
}
func (w *workspace) Root() string { return w.root }
func (w *workspace) Options() *options.Options { return w.options }
func (w *workspace) Settings() *ast.Workspace { return &w.settings }
func (w *workspace) ReadSettings() (*diag.Document, error) {
if w.muspace == "" {
@ -122,7 +125,7 @@ func (w *workspace) ReadSettings() (*diag.Document, error) {
}
func (w *workspace) DetectMufile() (string, error) {
return DetectMufile(w.path, w.d)
return DetectMufile(w.path, w.options.Diag)
}
func (w *workspace) DepCandidates(dep ast.RefParts) []string {