2017-02-25 16:25:33 +01:00
|
|
|
// Copyright 2016 Pulumi, Inc. All rights reserved.
|
2016-11-20 17:20:19 +01:00
|
|
|
|
|
|
|
package workspace
|
|
|
|
|
|
|
|
import (
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
|
|
|
|
2017-02-25 16:25:33 +01:00
|
|
|
"github.com/pulumi/coconut/pkg/compiler/errors"
|
|
|
|
"github.com/pulumi/coconut/pkg/diag"
|
|
|
|
"github.com/pulumi/coconut/pkg/encoding"
|
Add basic targeting capability
This change partially implements pulumi/coconut#94, by adding the
ability to name targets during creation and reuse those names during
deletion and update. This simplifies the management of deployment
records, checkpoints, and snapshots.
I've opted to call these things "husks" (perhaps going overboard with
joy after our recent renaming). The basic idea is that for any
executable Nut that will be deployed, you have a nutpack/ directory
whose layout looks roughly as follows:
nutpack/
bin/
Nutpack.json
... any other compiled artifacts ...
husks/
... one snapshot per husk ...
For example, if we had a stage and prod husk, we would have:
nutpack/
bin/...
husks/
prod.json
stage.json
In the prod.json and stage.json files, we'd have the most recent
deployment record for that environment. These would presumably get
checked in and versioned along with the overall Nut, so that we
can use Git history for rollbacks, etc.
The create, update, and delete commands look in the right place for
these files automatically, so you don't need to manually supply them.
2017-02-25 18:24:52 +01:00
|
|
|
"github.com/pulumi/coconut/pkg/tokens"
|
2016-11-20 17:20:19 +01:00
|
|
|
)
|
|
|
|
|
Add basic targeting capability
This change partially implements pulumi/coconut#94, by adding the
ability to name targets during creation and reuse those names during
deletion and update. This simplifies the management of deployment
records, checkpoints, and snapshots.
I've opted to call these things "husks" (perhaps going overboard with
joy after our recent renaming). The basic idea is that for any
executable Nut that will be deployed, you have a nutpack/ directory
whose layout looks roughly as follows:
nutpack/
bin/
Nutpack.json
... any other compiled artifacts ...
husks/
... one snapshot per husk ...
For example, if we had a stage and prod husk, we would have:
nutpack/
bin/...
husks/
prod.json
stage.json
In the prod.json and stage.json files, we'd have the most recent
deployment record for that environment. These would presumably get
checked in and versioned along with the overall Nut, so that we
can use Git history for rollbacks, etc.
The create, update, and delete commands look in the right place for
these files automatically, so you don't need to manually supply them.
2017-02-25 18:24:52 +01:00
|
|
|
const Nutfile = "Nut" // the base name of a Nutfile.
|
|
|
|
const Nutpack = "Nutpack" // the base name of a compiled NutPack.
|
|
|
|
const NutpackOutDir = "nutpack" // the default name of the NutPack output directory.
|
|
|
|
const NutpackBinDir = "bin" // the default name of the NutPack binary output directory.
|
|
|
|
const NutpackHusksDir = "husks" // the default name of the NutPack husks directory.
|
|
|
|
const Nutspace = "Coconut" // the base name of a markup file for shared settings in a workspace.
|
|
|
|
const Nutdeps = ".Nuts" // the directory in which dependencies exist, either local or global.
|
2016-11-20 17:20:19 +01:00
|
|
|
|
Add basic targeting capability
This change partially implements pulumi/coconut#94, by adding the
ability to name targets during creation and reuse those names during
deletion and update. This simplifies the management of deployment
records, checkpoints, and snapshots.
I've opted to call these things "husks" (perhaps going overboard with
joy after our recent renaming). The basic idea is that for any
executable Nut that will be deployed, you have a nutpack/ directory
whose layout looks roughly as follows:
nutpack/
bin/
Nutpack.json
... any other compiled artifacts ...
husks/
... one snapshot per husk ...
For example, if we had a stage and prod husk, we would have:
nutpack/
bin/...
husks/
prod.json
stage.json
In the prod.json and stage.json files, we'd have the most recent
deployment record for that environment. These would presumably get
checked in and versioned along with the overall Nut, so that we
can use Git history for rollbacks, etc.
The create, update, and delete commands look in the right place for
these files automatically, so you don't need to manually supply them.
2017-02-25 18:24:52 +01:00
|
|
|
const InstallRootEnvvar = "COCOROOT" // the envvar describing where Coconut has been installed.
|
|
|
|
const InstallRootLibdir = "lib" // the directory in which the Coconut standard library exists.
|
|
|
|
const DefaultInstallRoot = "/usr/local/coconut" // where Coconut is installed by default.
|
2016-11-21 18:23:39 +01:00
|
|
|
|
2017-02-25 16:25:33 +01:00
|
|
|
// InstallRoot returns Coconut's installation location. This is controlled my the COCOROOT envvar.
|
2016-11-21 18:23:39 +01:00
|
|
|
func InstallRoot() string {
|
Add basic targeting capability
This change partially implements pulumi/coconut#94, by adding the
ability to name targets during creation and reuse those names during
deletion and update. This simplifies the management of deployment
records, checkpoints, and snapshots.
I've opted to call these things "husks" (perhaps going overboard with
joy after our recent renaming). The basic idea is that for any
executable Nut that will be deployed, you have a nutpack/ directory
whose layout looks roughly as follows:
nutpack/
bin/
Nutpack.json
... any other compiled artifacts ...
husks/
... one snapshot per husk ...
For example, if we had a stage and prod husk, we would have:
nutpack/
bin/...
husks/
prod.json
stage.json
In the prod.json and stage.json files, we'd have the most recent
deployment record for that environment. These would presumably get
checked in and versioned along with the overall Nut, so that we
can use Git history for rollbacks, etc.
The create, update, and delete commands look in the right place for
these files automatically, so you don't need to manually supply them.
2017-02-25 18:24:52 +01:00
|
|
|
// TODO: support Windows.
|
2016-11-21 18:23:39 +01:00
|
|
|
root := os.Getenv(InstallRootEnvvar)
|
|
|
|
if root == "" {
|
|
|
|
return DefaultInstallRoot
|
|
|
|
}
|
|
|
|
return root
|
|
|
|
}
|
|
|
|
|
Add basic targeting capability
This change partially implements pulumi/coconut#94, by adding the
ability to name targets during creation and reuse those names during
deletion and update. This simplifies the management of deployment
records, checkpoints, and snapshots.
I've opted to call these things "husks" (perhaps going overboard with
joy after our recent renaming). The basic idea is that for any
executable Nut that will be deployed, you have a nutpack/ directory
whose layout looks roughly as follows:
nutpack/
bin/
Nutpack.json
... any other compiled artifacts ...
husks/
... one snapshot per husk ...
For example, if we had a stage and prod husk, we would have:
nutpack/
bin/...
husks/
prod.json
stage.json
In the prod.json and stage.json files, we'd have the most recent
deployment record for that environment. These would presumably get
checked in and versioned along with the overall Nut, so that we
can use Git history for rollbacks, etc.
The create, update, and delete commands look in the right place for
these files automatically, so you don't need to manually supply them.
2017-02-25 18:24:52 +01:00
|
|
|
// HuskPath returns a path to the given husk's default location.
|
|
|
|
func HuskPath(husk tokens.QName) string {
|
2017-02-26 22:06:33 +01:00
|
|
|
path := filepath.Join(NutpackOutDir, NutpackHusksDir)
|
|
|
|
if husk != "" {
|
|
|
|
path = filepath.Join(path, qnamePath(husk)+encoding.Exts[0])
|
|
|
|
}
|
|
|
|
return path
|
Add basic targeting capability
This change partially implements pulumi/coconut#94, by adding the
ability to name targets during creation and reuse those names during
deletion and update. This simplifies the management of deployment
records, checkpoints, and snapshots.
I've opted to call these things "husks" (perhaps going overboard with
joy after our recent renaming). The basic idea is that for any
executable Nut that will be deployed, you have a nutpack/ directory
whose layout looks roughly as follows:
nutpack/
bin/
Nutpack.json
... any other compiled artifacts ...
husks/
... one snapshot per husk ...
For example, if we had a stage and prod husk, we would have:
nutpack/
bin/...
husks/
prod.json
stage.json
In the prod.json and stage.json files, we'd have the most recent
deployment record for that environment. These would presumably get
checked in and versioned along with the overall Nut, so that we
can use Git history for rollbacks, etc.
The create, update, and delete commands look in the right place for
these files automatically, so you don't need to manually supply them.
2017-02-25 18:24:52 +01:00
|
|
|
}
|
|
|
|
|
2016-11-21 18:23:39 +01:00
|
|
|
// isTop returns true if the path represents the top of the filesystem.
|
|
|
|
func isTop(path string) bool {
|
|
|
|
return os.IsPathSeparator(path[len(path)-1])
|
|
|
|
}
|
|
|
|
|
Implement dependency resolution
This change includes logic to resolve dependencies declared by stacks. The design
is described in https://github.com/marapongo/mu/blob/master/docs/deps.md.
In summary, each stack may declare dependencies, which are name/semver pairs. A
new structure has been introduced, ast.Ref, to distinguish between ast.Names and
dependency names. An ast.Ref includes a protocol, base part, and a name part (the
latter being an ast.Name); for example, in "https://hub.mu.com/mu/container/",
"https://" is the protocol, "hub.mu.com/" is the base, and "mu/container" is the
name. This is used to resolve URL-like names to package manager-like artifacts.
The dependency resolution phase happens after parsing, but before semantic analysis.
This is because dependencies are "source-like" in that we must load and parse all
dependency metadata files. We stick the full transitive closure of dependencies
into a map attached to the compiler to avoid loading dependencies multiple times.
Note that, although dependencies prohibit cycles, this forms a DAG, meaning multiple
inbound edges to a single stack may come from multiple places.
From there, we rely on ordinary visitation to deal with dependencies further.
This includes inserting symbol entries into the symbol table, mapping names to the
loaded stacks, during the first phase of binding so that they may be found
subsequently when typechecking during the second phase and beyond.
2016-11-21 20:19:25 +01:00
|
|
|
// pathDir returns the nearest directory to the given path (identity if a directory; parent otherwise).
|
|
|
|
func pathDir(path string) string {
|
2017-02-25 16:25:33 +01:00
|
|
|
// It's possible that the path is a file (e.g., a Nut.yaml file); if so, we want the directory.
|
Implement dependency resolution
This change includes logic to resolve dependencies declared by stacks. The design
is described in https://github.com/marapongo/mu/blob/master/docs/deps.md.
In summary, each stack may declare dependencies, which are name/semver pairs. A
new structure has been introduced, ast.Ref, to distinguish between ast.Names and
dependency names. An ast.Ref includes a protocol, base part, and a name part (the
latter being an ast.Name); for example, in "https://hub.mu.com/mu/container/",
"https://" is the protocol, "hub.mu.com/" is the base, and "mu/container" is the
name. This is used to resolve URL-like names to package manager-like artifacts.
The dependency resolution phase happens after parsing, but before semantic analysis.
This is because dependencies are "source-like" in that we must load and parse all
dependency metadata files. We stick the full transitive closure of dependencies
into a map attached to the compiler to avoid loading dependencies multiple times.
Note that, although dependencies prohibit cycles, this forms a DAG, meaning multiple
inbound edges to a single stack may come from multiple places.
From there, we rely on ordinary visitation to deal with dependencies further.
This includes inserting symbol entries into the symbol table, mapping names to the
loaded stacks, during the first phase of binding so that they may be found
subsequently when typechecking during the second phase and beyond.
2016-11-21 20:19:25 +01:00
|
|
|
info, err := os.Stat(path)
|
|
|
|
if err != nil || info.IsDir() {
|
|
|
|
return path
|
2016-11-21 18:23:39 +01:00
|
|
|
}
|
2017-01-28 00:42:39 +01:00
|
|
|
return filepath.Dir(path)
|
2016-11-21 18:23:39 +01:00
|
|
|
}
|
|
|
|
|
Begin overhauling semantic phases
This change further merges the new AST and MuPack/MuIL formats and
abstractions into the core of the compiler. A good amount of the old
code is gone now; I decided against ripping it all out in one fell
swoop so that I can methodically check that we are preserving all
relevant decisions and/or functionality we had in the old model.
The changes are too numerous to outline in this commit message,
however, here are the noteworthy ones:
* Split up the notion of symbols and tokens, resulting in:
- pkg/symbols for true compiler symbols (bound nodes)
- pkg/tokens for name-based tokens, identifiers, constants
* Several packages move underneath pkg/compiler:
- pkg/ast becomes pkg/compiler/ast
- pkg/errors becomes pkg/compiler/errors
- pkg/symbols becomes pkg/compiler/symbols
* pkg/ast/... becomes pkg/compiler/legacy/ast/...
* pkg/pack/ast becomes pkg/compiler/ast.
* pkg/options goes away, merged back into pkg/compiler.
* All binding functionality moves underneath a dedicated
package, pkg/compiler/binder. The legacy.go file contains
cruft that will eventually go away, while the other files
represent a halfway point between new and old, but are
expected to stay roughly in the current shape.
* All parsing functionality is moved underneath a new
pkg/compiler/metadata namespace, and we adopt new terminology
"metadata reading" since real parsing happens in the MetaMu
compilers. Hence, Parser has become metadata.Reader.
* In general phases of the compiler no longer share access to
the actual compiler.Compiler object. Instead, shared state is
moved to the core.Context object underneath pkg/compiler/core.
* Dependency resolution during binding has been rewritten to
the new model, including stashing bound package symbols in the
context object, and detecting import cycles.
* Compiler construction does not take a workspace object. Instead,
creation of a workspace is entirely hidden inside of the compiler's
constructor logic.
* There are three Compile* functions on the Compiler interface, to
support different styles of invoking compilation: Compile() auto-
detects a Mu package, based on the workspace; CompilePath(string)
loads the target as a Mu package and compiles it, regardless of
the workspace settings; and, CompilePackage(*pack.Package) will
compile a pre-loaded package AST, again regardless of workspace.
* Delete the _fe, _sema, and parsetree phases. They are no longer
relevant and the functionality is largely subsumed by the above.
...and so very much more. I'm surprised I ever got this to compile again!
2017-01-18 21:18:37 +01:00
|
|
|
// DetectPackage locates the closest package from the given path, searching "upwards" in the directory hierarchy. If no
|
2017-02-25 16:25:33 +01:00
|
|
|
// Nutfile is found, an empty path is returned. If problems are detected, they are logged to the diag.Sink.
|
Begin overhauling semantic phases
This change further merges the new AST and MuPack/MuIL formats and
abstractions into the core of the compiler. A good amount of the old
code is gone now; I decided against ripping it all out in one fell
swoop so that I can methodically check that we are preserving all
relevant decisions and/or functionality we had in the old model.
The changes are too numerous to outline in this commit message,
however, here are the noteworthy ones:
* Split up the notion of symbols and tokens, resulting in:
- pkg/symbols for true compiler symbols (bound nodes)
- pkg/tokens for name-based tokens, identifiers, constants
* Several packages move underneath pkg/compiler:
- pkg/ast becomes pkg/compiler/ast
- pkg/errors becomes pkg/compiler/errors
- pkg/symbols becomes pkg/compiler/symbols
* pkg/ast/... becomes pkg/compiler/legacy/ast/...
* pkg/pack/ast becomes pkg/compiler/ast.
* pkg/options goes away, merged back into pkg/compiler.
* All binding functionality moves underneath a dedicated
package, pkg/compiler/binder. The legacy.go file contains
cruft that will eventually go away, while the other files
represent a halfway point between new and old, but are
expected to stay roughly in the current shape.
* All parsing functionality is moved underneath a new
pkg/compiler/metadata namespace, and we adopt new terminology
"metadata reading" since real parsing happens in the MetaMu
compilers. Hence, Parser has become metadata.Reader.
* In general phases of the compiler no longer share access to
the actual compiler.Compiler object. Instead, shared state is
moved to the core.Context object underneath pkg/compiler/core.
* Dependency resolution during binding has been rewritten to
the new model, including stashing bound package symbols in the
context object, and detecting import cycles.
* Compiler construction does not take a workspace object. Instead,
creation of a workspace is entirely hidden inside of the compiler's
constructor logic.
* There are three Compile* functions on the Compiler interface, to
support different styles of invoking compilation: Compile() auto-
detects a Mu package, based on the workspace; CompilePath(string)
loads the target as a Mu package and compiles it, regardless of
the workspace settings; and, CompilePackage(*pack.Package) will
compile a pre-loaded package AST, again regardless of workspace.
* Delete the _fe, _sema, and parsetree phases. They are no longer
relevant and the functionality is largely subsumed by the above.
...and so very much more. I'm surprised I ever got this to compile again!
2017-01-18 21:18:37 +01:00
|
|
|
func DetectPackage(path string, d diag.Sink) (string, error) {
|
2016-11-20 17:20:19 +01:00
|
|
|
// It's possible the target is already the file we seek; if so, return right away.
|
2017-02-25 16:25:33 +01:00
|
|
|
if IsNutfile(path, d) {
|
Implement dependency resolution
This change includes logic to resolve dependencies declared by stacks. The design
is described in https://github.com/marapongo/mu/blob/master/docs/deps.md.
In summary, each stack may declare dependencies, which are name/semver pairs. A
new structure has been introduced, ast.Ref, to distinguish between ast.Names and
dependency names. An ast.Ref includes a protocol, base part, and a name part (the
latter being an ast.Name); for example, in "https://hub.mu.com/mu/container/",
"https://" is the protocol, "hub.mu.com/" is the base, and "mu/container" is the
name. This is used to resolve URL-like names to package manager-like artifacts.
The dependency resolution phase happens after parsing, but before semantic analysis.
This is because dependencies are "source-like" in that we must load and parse all
dependency metadata files. We stick the full transitive closure of dependencies
into a map attached to the compiler to avoid loading dependencies multiple times.
Note that, although dependencies prohibit cycles, this forms a DAG, meaning multiple
inbound edges to a single stack may come from multiple places.
From there, we rely on ordinary visitation to deal with dependencies further.
This includes inserting symbol entries into the symbol table, mapping names to the
loaded stacks, during the first phase of binding so that they may be found
subsequently when typechecking during the second phase and beyond.
2016-11-21 20:19:25 +01:00
|
|
|
return path, nil
|
2016-11-20 17:20:19 +01:00
|
|
|
}
|
|
|
|
|
Implement dependency resolution
This change includes logic to resolve dependencies declared by stacks. The design
is described in https://github.com/marapongo/mu/blob/master/docs/deps.md.
In summary, each stack may declare dependencies, which are name/semver pairs. A
new structure has been introduced, ast.Ref, to distinguish between ast.Names and
dependency names. An ast.Ref includes a protocol, base part, and a name part (the
latter being an ast.Name); for example, in "https://hub.mu.com/mu/container/",
"https://" is the protocol, "hub.mu.com/" is the base, and "mu/container" is the
name. This is used to resolve URL-like names to package manager-like artifacts.
The dependency resolution phase happens after parsing, but before semantic analysis.
This is because dependencies are "source-like" in that we must load and parse all
dependency metadata files. We stick the full transitive closure of dependencies
into a map attached to the compiler to avoid loading dependencies multiple times.
Note that, although dependencies prohibit cycles, this forms a DAG, meaning multiple
inbound edges to a single stack may come from multiple places.
From there, we rely on ordinary visitation to deal with dependencies further.
This includes inserting symbol entries into the symbol table, mapping names to the
loaded stacks, during the first phase of binding so that they may be found
subsequently when typechecking during the second phase and beyond.
2016-11-21 20:19:25 +01:00
|
|
|
curr := pathDir(path)
|
2016-11-20 17:20:19 +01:00
|
|
|
for {
|
|
|
|
stop := false
|
|
|
|
|
2017-02-25 16:25:33 +01:00
|
|
|
// Enumerate the current path's files, checking each to see if it's a Nutfile.
|
2016-11-20 17:20:19 +01:00
|
|
|
files, err := ioutil.ReadDir(curr)
|
2016-11-21 18:23:39 +01:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
Add basic targeting capability
This change partially implements pulumi/coconut#94, by adding the
ability to name targets during creation and reuse those names during
deletion and update. This simplifies the management of deployment
records, checkpoints, and snapshots.
I've opted to call these things "husks" (perhaps going overboard with
joy after our recent renaming). The basic idea is that for any
executable Nut that will be deployed, you have a nutpack/ directory
whose layout looks roughly as follows:
nutpack/
bin/
Nutpack.json
... any other compiled artifacts ...
husks/
... one snapshot per husk ...
For example, if we had a stage and prod husk, we would have:
nutpack/
bin/...
husks/
prod.json
stage.json
In the prod.json and stage.json files, we'd have the most recent
deployment record for that environment. These would presumably get
checked in and versioned along with the overall Nut, so that we
can use Git history for rollbacks, etc.
The create, update, and delete commands look in the right place for
these files automatically, so you don't need to manually supply them.
2017-02-25 18:24:52 +01:00
|
|
|
|
|
|
|
// See if there's a compiled Nutpack in the expected location.
|
|
|
|
pack := filepath.Join(NutpackOutDir, NutpackBinDir, Nutpack)
|
|
|
|
for _, ext := range encoding.Exts {
|
|
|
|
packfile := pack + ext
|
|
|
|
if IsNutpack(packfile, d) {
|
|
|
|
return packfile, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now look for individual Nutfiles.
|
2016-11-20 17:20:19 +01:00
|
|
|
for _, file := range files {
|
|
|
|
name := file.Name()
|
|
|
|
path := filepath.Join(curr, name)
|
2017-02-25 16:25:33 +01:00
|
|
|
if IsNutfile(path, d) {
|
2016-11-21 18:23:39 +01:00
|
|
|
return path, nil
|
2017-02-25 16:25:33 +01:00
|
|
|
} else if IsNutspace(path, d) {
|
|
|
|
// If we hit a Nutspace file, stop looking.
|
2016-11-20 17:20:19 +01:00
|
|
|
stop = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we encountered a stop condition, break out of the loop.
|
|
|
|
if stop {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// If neither succeeded, keep looking in our parent directory.
|
|
|
|
curr = filepath.Dir(curr)
|
2016-11-21 18:23:39 +01:00
|
|
|
if isTop(curr) {
|
2016-11-20 17:20:19 +01:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-21 18:23:39 +01:00
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
|
2017-02-25 16:25:33 +01:00
|
|
|
// IsNutfile returns true if the path references what appears to be a valid Nutfile. If problems are detected -- like
|
2016-11-20 17:20:19 +01:00
|
|
|
// an incorrect extension -- they are logged to the provided diag.Sink (if non-nil).
|
2017-02-25 16:25:33 +01:00
|
|
|
func IsNutfile(path string, d diag.Sink) bool {
|
|
|
|
return isMarkupFile(path, Nutfile, d)
|
2016-11-30 05:07:27 +01:00
|
|
|
}
|
|
|
|
|
2017-02-25 16:25:33 +01:00
|
|
|
// IsNutpack returns true if the path references what appears to be a valid Nutpack. If problems are detected -- like
|
2017-02-09 20:23:27 +01:00
|
|
|
// an incorrect extension -- they are logged to the provided diag.Sink (if non-nil).
|
2017-02-25 16:25:33 +01:00
|
|
|
func IsNutpack(path string, d diag.Sink) bool {
|
|
|
|
return isMarkupFile(path, Nutpack, d)
|
2017-02-09 20:23:27 +01:00
|
|
|
}
|
|
|
|
|
2017-02-25 16:25:33 +01:00
|
|
|
// IsNutspace returns true if the path references what appears to be a valid Nutspace file. If problems are detected --
|
2016-11-30 05:07:27 +01:00
|
|
|
// like an incorrect extension -- they are logged to the provided diag.Sink (if non-nil).
|
2017-02-25 16:25:33 +01:00
|
|
|
func IsNutspace(path string, d diag.Sink) bool {
|
|
|
|
return isMarkupFile(path, Nutspace, d)
|
2016-11-30 05:07:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func isMarkupFile(path string, expect string, d diag.Sink) bool {
|
2016-11-20 17:20:19 +01:00
|
|
|
info, err := os.Stat(path)
|
Implement dependency resolution
This change includes logic to resolve dependencies declared by stacks. The design
is described in https://github.com/marapongo/mu/blob/master/docs/deps.md.
In summary, each stack may declare dependencies, which are name/semver pairs. A
new structure has been introduced, ast.Ref, to distinguish between ast.Names and
dependency names. An ast.Ref includes a protocol, base part, and a name part (the
latter being an ast.Name); for example, in "https://hub.mu.com/mu/container/",
"https://" is the protocol, "hub.mu.com/" is the base, and "mu/container" is the
name. This is used to resolve URL-like names to package manager-like artifacts.
The dependency resolution phase happens after parsing, but before semantic analysis.
This is because dependencies are "source-like" in that we must load and parse all
dependency metadata files. We stick the full transitive closure of dependencies
into a map attached to the compiler to avoid loading dependencies multiple times.
Note that, although dependencies prohibit cycles, this forms a DAG, meaning multiple
inbound edges to a single stack may come from multiple places.
From there, we rely on ordinary visitation to deal with dependencies further.
This includes inserting symbol entries into the symbol table, mapping names to the
loaded stacks, during the first phase of binding so that they may be found
subsequently when typechecking during the second phase and beyond.
2016-11-21 20:19:25 +01:00
|
|
|
if err != nil || info.IsDir() {
|
2016-11-30 05:07:27 +01:00
|
|
|
// Missing files and directories can't be markup files.
|
2016-11-20 17:20:19 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the base name is expected.
|
|
|
|
name := info.Name()
|
|
|
|
ext := filepath.Ext(name)
|
|
|
|
base := strings.TrimSuffix(name, ext)
|
2016-11-30 05:07:27 +01:00
|
|
|
if base != expect {
|
|
|
|
if d != nil && strings.EqualFold(base, expect) {
|
2016-11-20 17:20:19 +01:00
|
|
|
// If the strings aren't equal, but case-insensitively match, issue a warning.
|
2016-11-30 05:07:27 +01:00
|
|
|
d.Warningf(errors.WarningIllegalMarkupFileCasing.AtFile(name), expect)
|
2016-11-20 17:20:19 +01:00
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check all supported extensions.
|
2016-11-30 05:07:27 +01:00
|
|
|
for _, mext := range encoding.Exts {
|
|
|
|
if name == expect+mext {
|
2016-11-20 17:20:19 +01:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we got here, it means the base name matched, but not the extension. Warn and return.
|
|
|
|
if d != nil {
|
2016-11-30 05:07:27 +01:00
|
|
|
d.Warningf(errors.WarningIllegalMarkupFileExt.AtFile(name), expect, ext)
|
2016-11-20 17:20:19 +01:00
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|