pulumi/sdk/proto/go/provider.pb.go

697 lines
26 KiB
Go
Raw Normal View History

// Code generated by protoc-gen-go.
// source: provider.proto
// DO NOT EDIT!
package pulumirpc
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
import google_protobuf "github.com/golang/protobuf/ptypes/struct"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type ConfigureRequest struct {
Variables map[string]string `protobuf:"bytes,1,rep,name=variables" json:"variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *ConfigureRequest) Reset() { *m = ConfigureRequest{} }
func (m *ConfigureRequest) String() string { return proto.CompactTextString(m) }
func (*ConfigureRequest) ProtoMessage() {}
func (*ConfigureRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
func (m *ConfigureRequest) GetVariables() map[string]string {
if m != nil {
return m.Variables
}
return nil
}
type CheckRequest struct {
Urn string `protobuf:"bytes,1,opt,name=urn" json:"urn,omitempty"`
Properties *google_protobuf.Struct `protobuf:"bytes,2,opt,name=properties" json:"properties,omitempty"`
}
func (m *CheckRequest) Reset() { *m = CheckRequest{} }
func (m *CheckRequest) String() string { return proto.CompactTextString(m) }
func (*CheckRequest) ProtoMessage() {}
func (*CheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
func (m *CheckRequest) GetUrn() string {
if m != nil {
return m.Urn
}
return ""
}
func (m *CheckRequest) GetProperties() *google_protobuf.Struct {
if m != nil {
return m.Properties
}
return nil
}
type CheckResponse struct {
Defaults *google_protobuf.Struct `protobuf:"bytes,1,opt,name=defaults" json:"defaults,omitempty"`
Failures []*CheckFailure `protobuf:"bytes,2,rep,name=failures" json:"failures,omitempty"`
}
func (m *CheckResponse) Reset() { *m = CheckResponse{} }
func (m *CheckResponse) String() string { return proto.CompactTextString(m) }
func (*CheckResponse) ProtoMessage() {}
func (*CheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
func (m *CheckResponse) GetDefaults() *google_protobuf.Struct {
if m != nil {
return m.Defaults
}
return nil
}
func (m *CheckResponse) GetFailures() []*CheckFailure {
if m != nil {
return m.Failures
}
return nil
}
type CheckFailure struct {
Property string `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"`
Reason string `protobuf:"bytes,2,opt,name=reason" json:"reason,omitempty"`
}
func (m *CheckFailure) Reset() { *m = CheckFailure{} }
func (m *CheckFailure) String() string { return proto.CompactTextString(m) }
func (*CheckFailure) ProtoMessage() {}
func (*CheckFailure) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{3} }
func (m *CheckFailure) GetProperty() string {
if m != nil {
return m.Property
}
return ""
}
func (m *CheckFailure) GetReason() string {
if m != nil {
return m.Reason
}
return ""
}
type DiffRequest struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Urn string `protobuf:"bytes,2,opt,name=urn" json:"urn,omitempty"`
Olds *google_protobuf.Struct `protobuf:"bytes,3,opt,name=olds" json:"olds,omitempty"`
News *google_protobuf.Struct `protobuf:"bytes,4,opt,name=news" json:"news,omitempty"`
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
}
func (m *DiffRequest) Reset() { *m = DiffRequest{} }
func (m *DiffRequest) String() string { return proto.CompactTextString(m) }
func (*DiffRequest) ProtoMessage() {}
func (*DiffRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{4} }
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
func (m *DiffRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *DiffRequest) GetUrn() string {
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
if m != nil {
return m.Urn
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
}
return ""
}
func (m *DiffRequest) GetOlds() *google_protobuf.Struct {
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
if m != nil {
return m.Olds
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
}
return nil
}
func (m *DiffRequest) GetNews() *google_protobuf.Struct {
if m != nil {
return m.News
}
return nil
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
}
type DiffResponse struct {
Replaces []string `protobuf:"bytes,1,rep,name=replaces" json:"replaces,omitempty"`
}
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
func (m *DiffResponse) Reset() { *m = DiffResponse{} }
func (m *DiffResponse) String() string { return proto.CompactTextString(m) }
func (*DiffResponse) ProtoMessage() {}
func (*DiffResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{5} }
func (m *DiffResponse) GetReplaces() []string {
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
if m != nil {
return m.Replaces
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
}
return nil
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
}
type CreateRequest struct {
Urn string `protobuf:"bytes,1,opt,name=urn" json:"urn,omitempty"`
Properties *google_protobuf.Struct `protobuf:"bytes,2,opt,name=properties" json:"properties,omitempty"`
}
func (m *CreateRequest) Reset() { *m = CreateRequest{} }
func (m *CreateRequest) String() string { return proto.CompactTextString(m) }
func (*CreateRequest) ProtoMessage() {}
func (*CreateRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{6} }
func (m *CreateRequest) GetUrn() string {
if m != nil {
return m.Urn
}
return ""
}
func (m *CreateRequest) GetProperties() *google_protobuf.Struct {
if m != nil {
return m.Properties
}
return nil
}
type CreateResponse struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Properties *google_protobuf.Struct `protobuf:"bytes,2,opt,name=properties" json:"properties,omitempty"`
}
func (m *CreateResponse) Reset() { *m = CreateResponse{} }
func (m *CreateResponse) String() string { return proto.CompactTextString(m) }
func (*CreateResponse) ProtoMessage() {}
func (*CreateResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{7} }
func (m *CreateResponse) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *CreateResponse) GetProperties() *google_protobuf.Struct {
if m != nil {
return m.Properties
}
return nil
}
Initial support for output properties (1 of 3) This change includes approximately 1/3rd of the change necessary to support output properties, as per pulumi/lumi#90. In short, the runtime now has a new hidden type, Latent<T>, which represents a "speculative" value, whose eventual type will be T, that we can use during evaluation in various ways. Namely, operations against Latent<T>s generally produce new Latent<U>s. During planning, any Latent<T>s that end up in resource properties are transformed into "unknown" property values. An unknown property value is legal only during planning-time activities, such as Check, Name, and InspectChange. As a result, those RPC interfaces have been updated to include lookaside maps indicating which properties have unknown values. My intent is to add some helper functions to make dealing with this circumstance more correct-by-construction. For now, using an unresolved Latent<T> in a conditional will lead to an error. See pulumi/lumi#67. Speculating beyond these -- by supporting iterative planning and application -- is something we want to support eventually, but it makes sense to do that as an additive change beyond this initial support. That is a missing 1/3. Finally, the other missing 1/3rd which will happen much sooner than the rest is restructuing plan application so that it will correctly observe resolution of Latent<T> values. Right now, the evaluation happens in one single pass, prior to the application, and so Latent<T>s never actually get witnessed in a resolved state.
2017-05-24 02:32:59 +02:00
type UpdateRequest struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Urn string `protobuf:"bytes,2,opt,name=urn" json:"urn,omitempty"`
Initial support for output properties (1 of 3) This change includes approximately 1/3rd of the change necessary to support output properties, as per pulumi/lumi#90. In short, the runtime now has a new hidden type, Latent<T>, which represents a "speculative" value, whose eventual type will be T, that we can use during evaluation in various ways. Namely, operations against Latent<T>s generally produce new Latent<U>s. During planning, any Latent<T>s that end up in resource properties are transformed into "unknown" property values. An unknown property value is legal only during planning-time activities, such as Check, Name, and InspectChange. As a result, those RPC interfaces have been updated to include lookaside maps indicating which properties have unknown values. My intent is to add some helper functions to make dealing with this circumstance more correct-by-construction. For now, using an unresolved Latent<T> in a conditional will lead to an error. See pulumi/lumi#67. Speculating beyond these -- by supporting iterative planning and application -- is something we want to support eventually, but it makes sense to do that as an additive change beyond this initial support. That is a missing 1/3. Finally, the other missing 1/3rd which will happen much sooner than the rest is restructuing plan application so that it will correctly observe resolution of Latent<T> values. Right now, the evaluation happens in one single pass, prior to the application, and so Latent<T>s never actually get witnessed in a resolved state.
2017-05-24 02:32:59 +02:00
Olds *google_protobuf.Struct `protobuf:"bytes,3,opt,name=olds" json:"olds,omitempty"`
News *google_protobuf.Struct `protobuf:"bytes,4,opt,name=news" json:"news,omitempty"`
}
func (m *UpdateRequest) Reset() { *m = UpdateRequest{} }
func (m *UpdateRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateRequest) ProtoMessage() {}
func (*UpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{8} }
Initial support for output properties (1 of 3) This change includes approximately 1/3rd of the change necessary to support output properties, as per pulumi/lumi#90. In short, the runtime now has a new hidden type, Latent<T>, which represents a "speculative" value, whose eventual type will be T, that we can use during evaluation in various ways. Namely, operations against Latent<T>s generally produce new Latent<U>s. During planning, any Latent<T>s that end up in resource properties are transformed into "unknown" property values. An unknown property value is legal only during planning-time activities, such as Check, Name, and InspectChange. As a result, those RPC interfaces have been updated to include lookaside maps indicating which properties have unknown values. My intent is to add some helper functions to make dealing with this circumstance more correct-by-construction. For now, using an unresolved Latent<T> in a conditional will lead to an error. See pulumi/lumi#67. Speculating beyond these -- by supporting iterative planning and application -- is something we want to support eventually, but it makes sense to do that as an additive change beyond this initial support. That is a missing 1/3. Finally, the other missing 1/3rd which will happen much sooner than the rest is restructuing plan application so that it will correctly observe resolution of Latent<T> values. Right now, the evaluation happens in one single pass, prior to the application, and so Latent<T>s never actually get witnessed in a resolved state.
2017-05-24 02:32:59 +02:00
func (m *UpdateRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *UpdateRequest) GetUrn() string {
Initial support for output properties (1 of 3) This change includes approximately 1/3rd of the change necessary to support output properties, as per pulumi/lumi#90. In short, the runtime now has a new hidden type, Latent<T>, which represents a "speculative" value, whose eventual type will be T, that we can use during evaluation in various ways. Namely, operations against Latent<T>s generally produce new Latent<U>s. During planning, any Latent<T>s that end up in resource properties are transformed into "unknown" property values. An unknown property value is legal only during planning-time activities, such as Check, Name, and InspectChange. As a result, those RPC interfaces have been updated to include lookaside maps indicating which properties have unknown values. My intent is to add some helper functions to make dealing with this circumstance more correct-by-construction. For now, using an unresolved Latent<T> in a conditional will lead to an error. See pulumi/lumi#67. Speculating beyond these -- by supporting iterative planning and application -- is something we want to support eventually, but it makes sense to do that as an additive change beyond this initial support. That is a missing 1/3. Finally, the other missing 1/3rd which will happen much sooner than the rest is restructuing plan application so that it will correctly observe resolution of Latent<T> values. Right now, the evaluation happens in one single pass, prior to the application, and so Latent<T>s never actually get witnessed in a resolved state.
2017-05-24 02:32:59 +02:00
if m != nil {
return m.Urn
Initial support for output properties (1 of 3) This change includes approximately 1/3rd of the change necessary to support output properties, as per pulumi/lumi#90. In short, the runtime now has a new hidden type, Latent<T>, which represents a "speculative" value, whose eventual type will be T, that we can use during evaluation in various ways. Namely, operations against Latent<T>s generally produce new Latent<U>s. During planning, any Latent<T>s that end up in resource properties are transformed into "unknown" property values. An unknown property value is legal only during planning-time activities, such as Check, Name, and InspectChange. As a result, those RPC interfaces have been updated to include lookaside maps indicating which properties have unknown values. My intent is to add some helper functions to make dealing with this circumstance more correct-by-construction. For now, using an unresolved Latent<T> in a conditional will lead to an error. See pulumi/lumi#67. Speculating beyond these -- by supporting iterative planning and application -- is something we want to support eventually, but it makes sense to do that as an additive change beyond this initial support. That is a missing 1/3. Finally, the other missing 1/3rd which will happen much sooner than the rest is restructuing plan application so that it will correctly observe resolution of Latent<T> values. Right now, the evaluation happens in one single pass, prior to the application, and so Latent<T>s never actually get witnessed in a resolved state.
2017-05-24 02:32:59 +02:00
}
return ""
}
func (m *UpdateRequest) GetOlds() *google_protobuf.Struct {
if m != nil {
return m.Olds
}
return nil
}
func (m *UpdateRequest) GetNews() *google_protobuf.Struct {
if m != nil {
return m.News
}
return nil
}
type UpdateResponse struct {
Properties *google_protobuf.Struct `protobuf:"bytes,1,opt,name=properties" json:"properties,omitempty"`
}
func (m *UpdateResponse) Reset() { *m = UpdateResponse{} }
func (m *UpdateResponse) String() string { return proto.CompactTextString(m) }
func (*UpdateResponse) ProtoMessage() {}
func (*UpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{9} }
func (m *UpdateResponse) GetProperties() *google_protobuf.Struct {
if m != nil {
return m.Properties
}
return nil
}
type DeleteRequest struct {
2017-07-19 16:57:22 +02:00
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Urn string `protobuf:"bytes,2,opt,name=urn" json:"urn,omitempty"`
2017-07-19 16:57:22 +02:00
Properties *google_protobuf.Struct `protobuf:"bytes,3,opt,name=properties" json:"properties,omitempty"`
}
func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteRequest) ProtoMessage() {}
func (*DeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{10} }
func (m *DeleteRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *DeleteRequest) GetUrn() string {
Implement resource provider plugins This change adds basic support for discovering, loading, binding to, and invoking RPC methods on, resource provider plugins. In a nutshell, we add a new context object that will share cached state such as loaded plugins and connections to them. It will be a policy decision in server scenarios how much state to share and between whom. This context also controls per-resource context allocation, which in the future will allow us to perform structured cancellation and teardown amongst entire groups of requests. Plugins are loaded based on their name, and can be found in one of two ways: either simply by having them on your path (with a name of "mu-ressrv-<pkg>", where "<pkg>" is the resource package name with any "/"s replaced with "_"s); or by placing them in the standard library installation location, which need not be on the path for this to work (since we know precisely where to look). If we find a protocol, we will load it as a child process. The protocol for plugins is that they will choose a port on their own -- to eliminate races that'd be involved should Mu attempt to pre-pick one for them -- and then write that out as the first line to STDOUT (terminated by a "\n"). This is the only STDERR/STDOUT that Mu cares about; from there, the plugin is free to write all it pleases (e.g., for logging, debugging purposes, etc). Afterwards, we then bind our gRPC connection to that port, and create a typed resource provider client. The CRUD operations that get driven by plan application are then simple wrappers atop the underlying gRPC calls. For now, we interpret all errors as catastrophic; in the near future, we will probably want to introduce a "structured error" mechanism in the gRPC interface for "transactional errors"; that is, errors for which the server was able to recover to a safe checkpoint, which can be interpreted as ResourceOK rather than ResourceUnknown.
2017-02-19 20:08:06 +01:00
if m != nil {
return m.Urn
Implement resource provider plugins This change adds basic support for discovering, loading, binding to, and invoking RPC methods on, resource provider plugins. In a nutshell, we add a new context object that will share cached state such as loaded plugins and connections to them. It will be a policy decision in server scenarios how much state to share and between whom. This context also controls per-resource context allocation, which in the future will allow us to perform structured cancellation and teardown amongst entire groups of requests. Plugins are loaded based on their name, and can be found in one of two ways: either simply by having them on your path (with a name of "mu-ressrv-<pkg>", where "<pkg>" is the resource package name with any "/"s replaced with "_"s); or by placing them in the standard library installation location, which need not be on the path for this to work (since we know precisely where to look). If we find a protocol, we will load it as a child process. The protocol for plugins is that they will choose a port on their own -- to eliminate races that'd be involved should Mu attempt to pre-pick one for them -- and then write that out as the first line to STDOUT (terminated by a "\n"). This is the only STDERR/STDOUT that Mu cares about; from there, the plugin is free to write all it pleases (e.g., for logging, debugging purposes, etc). Afterwards, we then bind our gRPC connection to that port, and create a typed resource provider client. The CRUD operations that get driven by plan application are then simple wrappers atop the underlying gRPC calls. For now, we interpret all errors as catastrophic; in the near future, we will probably want to introduce a "structured error" mechanism in the gRPC interface for "transactional errors"; that is, errors for which the server was able to recover to a safe checkpoint, which can be interpreted as ResourceOK rather than ResourceUnknown.
2017-02-19 20:08:06 +01:00
}
return ""
}
2017-07-19 16:57:22 +02:00
func (m *DeleteRequest) GetProperties() *google_protobuf.Struct {
if m != nil {
return m.Properties
}
return nil
}
type InvokeRequest struct {
Tok string `protobuf:"bytes,1,opt,name=tok" json:"tok,omitempty"`
Args *google_protobuf.Struct `protobuf:"bytes,2,opt,name=args" json:"args,omitempty"`
}
func (m *InvokeRequest) Reset() { *m = InvokeRequest{} }
func (m *InvokeRequest) String() string { return proto.CompactTextString(m) }
func (*InvokeRequest) ProtoMessage() {}
func (*InvokeRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{11} }
func (m *InvokeRequest) GetTok() string {
if m != nil {
return m.Tok
}
return ""
}
func (m *InvokeRequest) GetArgs() *google_protobuf.Struct {
if m != nil {
return m.Args
}
return nil
}
type InvokeResponse struct {
Return *google_protobuf.Struct `protobuf:"bytes,1,opt,name=return" json:"return,omitempty"`
Failures []*CheckFailure `protobuf:"bytes,2,rep,name=failures" json:"failures,omitempty"`
}
func (m *InvokeResponse) Reset() { *m = InvokeResponse{} }
func (m *InvokeResponse) String() string { return proto.CompactTextString(m) }
func (*InvokeResponse) ProtoMessage() {}
func (*InvokeResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{12} }
func (m *InvokeResponse) GetReturn() *google_protobuf.Struct {
if m != nil {
return m.Return
}
return nil
}
func (m *InvokeResponse) GetFailures() []*CheckFailure {
if m != nil {
return m.Failures
}
return nil
}
func init() {
proto.RegisterType((*ConfigureRequest)(nil), "pulumirpc.ConfigureRequest")
proto.RegisterType((*CheckRequest)(nil), "pulumirpc.CheckRequest")
proto.RegisterType((*CheckResponse)(nil), "pulumirpc.CheckResponse")
proto.RegisterType((*CheckFailure)(nil), "pulumirpc.CheckFailure")
proto.RegisterType((*DiffRequest)(nil), "pulumirpc.DiffRequest")
proto.RegisterType((*DiffResponse)(nil), "pulumirpc.DiffResponse")
proto.RegisterType((*CreateRequest)(nil), "pulumirpc.CreateRequest")
proto.RegisterType((*CreateResponse)(nil), "pulumirpc.CreateResponse")
proto.RegisterType((*UpdateRequest)(nil), "pulumirpc.UpdateRequest")
proto.RegisterType((*UpdateResponse)(nil), "pulumirpc.UpdateResponse")
proto.RegisterType((*DeleteRequest)(nil), "pulumirpc.DeleteRequest")
proto.RegisterType((*InvokeRequest)(nil), "pulumirpc.InvokeRequest")
proto.RegisterType((*InvokeResponse)(nil), "pulumirpc.InvokeResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for ResourceProvider service
type ResourceProviderClient interface {
// Configure configures the resource provider with "globals" that control its behavior.
Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
// Check validates that the given property bag is valid for a resource of the given type.
Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error)
// Diff checks what impacts a hypothetical update will have on the resource's properties.
Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error)
// Create allocates a new instance of the provided resource and returns its unique ID afterwards. (The input ID
// must be blank.) If this call fails, the resource must not have been created (i.e., it is "transacational").
Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error)
// Update updates an existing resource with new values.
Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error)
// Delete tears down an existing resource with the given ID. If it fails, the resource is assumed to still exist.
Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
// Invoke dynamically executes a built-in function in the provider.
Invoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (*InvokeResponse, error)
}
type resourceProviderClient struct {
cc *grpc.ClientConn
}
func NewResourceProviderClient(cc *grpc.ClientConn) ResourceProviderClient {
return &resourceProviderClient{cc}
}
func (c *resourceProviderClient) Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
out := new(google_protobuf1.Empty)
err := grpc.Invoke(ctx, "/pulumirpc.ResourceProvider/Configure", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *resourceProviderClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) {
out := new(CheckResponse)
err := grpc.Invoke(ctx, "/pulumirpc.ResourceProvider/Check", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *resourceProviderClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) {
out := new(DiffResponse)
err := grpc.Invoke(ctx, "/pulumirpc.ResourceProvider/Diff", in, out, c.cc, opts...)
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
if err != nil {
return nil, err
}
return out, nil
}
func (c *resourceProviderClient) Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) {
out := new(CreateResponse)
err := grpc.Invoke(ctx, "/pulumirpc.ResourceProvider/Create", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *resourceProviderClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) {
out := new(UpdateResponse)
err := grpc.Invoke(ctx, "/pulumirpc.ResourceProvider/Update", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *resourceProviderClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
out := new(google_protobuf1.Empty)
err := grpc.Invoke(ctx, "/pulumirpc.ResourceProvider/Delete", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *resourceProviderClient) Invoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (*InvokeResponse, error) {
out := new(InvokeResponse)
err := grpc.Invoke(ctx, "/pulumirpc.ResourceProvider/Invoke", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for ResourceProvider service
type ResourceProviderServer interface {
// Configure configures the resource provider with "globals" that control its behavior.
Configure(context.Context, *ConfigureRequest) (*google_protobuf1.Empty, error)
// Check validates that the given property bag is valid for a resource of the given type.
Check(context.Context, *CheckRequest) (*CheckResponse, error)
// Diff checks what impacts a hypothetical update will have on the resource's properties.
Diff(context.Context, *DiffRequest) (*DiffResponse, error)
// Create allocates a new instance of the provided resource and returns its unique ID afterwards. (The input ID
// must be blank.) If this call fails, the resource must not have been created (i.e., it is "transacational").
Create(context.Context, *CreateRequest) (*CreateResponse, error)
// Update updates an existing resource with new values.
Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
// Delete tears down an existing resource with the given ID. If it fails, the resource is assumed to still exist.
Delete(context.Context, *DeleteRequest) (*google_protobuf1.Empty, error)
// Invoke dynamically executes a built-in function in the provider.
Invoke(context.Context, *InvokeRequest) (*InvokeResponse, error)
}
func RegisterResourceProviderServer(s *grpc.Server, srv ResourceProviderServer) {
s.RegisterService(&_ResourceProvider_serviceDesc, srv)
}
func _ResourceProvider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ConfigureRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResourceProviderServer).Configure(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pulumirpc.ResourceProvider/Configure",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResourceProviderServer).Configure(ctx, req.(*ConfigureRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ResourceProvider_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CheckRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResourceProviderServer).Check(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pulumirpc.ResourceProvider/Check",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResourceProviderServer).Check(ctx, req.(*CheckRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ResourceProvider_Diff_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DiffRequest)
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResourceProviderServer).Diff(ctx, in)
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pulumirpc.ResourceProvider/Diff",
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResourceProviderServer).Diff(ctx, req.(*DiffRequest))
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
}
return interceptor(ctx, in, info, handler)
}
func _ResourceProvider_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResourceProviderServer).Create(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pulumirpc.ResourceProvider/Create",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResourceProviderServer).Create(ctx, req.(*CreateRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ResourceProvider_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
Initial support for output properties (1 of 3) This change includes approximately 1/3rd of the change necessary to support output properties, as per pulumi/lumi#90. In short, the runtime now has a new hidden type, Latent<T>, which represents a "speculative" value, whose eventual type will be T, that we can use during evaluation in various ways. Namely, operations against Latent<T>s generally produce new Latent<U>s. During planning, any Latent<T>s that end up in resource properties are transformed into "unknown" property values. An unknown property value is legal only during planning-time activities, such as Check, Name, and InspectChange. As a result, those RPC interfaces have been updated to include lookaside maps indicating which properties have unknown values. My intent is to add some helper functions to make dealing with this circumstance more correct-by-construction. For now, using an unresolved Latent<T> in a conditional will lead to an error. See pulumi/lumi#67. Speculating beyond these -- by supporting iterative planning and application -- is something we want to support eventually, but it makes sense to do that as an additive change beyond this initial support. That is a missing 1/3. Finally, the other missing 1/3rd which will happen much sooner than the rest is restructuing plan application so that it will correctly observe resolution of Latent<T> values. Right now, the evaluation happens in one single pass, prior to the application, and so Latent<T>s never actually get witnessed in a resolved state.
2017-05-24 02:32:59 +02:00
in := new(UpdateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResourceProviderServer).Update(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pulumirpc.ResourceProvider/Update",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
Initial support for output properties (1 of 3) This change includes approximately 1/3rd of the change necessary to support output properties, as per pulumi/lumi#90. In short, the runtime now has a new hidden type, Latent<T>, which represents a "speculative" value, whose eventual type will be T, that we can use during evaluation in various ways. Namely, operations against Latent<T>s generally produce new Latent<U>s. During planning, any Latent<T>s that end up in resource properties are transformed into "unknown" property values. An unknown property value is legal only during planning-time activities, such as Check, Name, and InspectChange. As a result, those RPC interfaces have been updated to include lookaside maps indicating which properties have unknown values. My intent is to add some helper functions to make dealing with this circumstance more correct-by-construction. For now, using an unresolved Latent<T> in a conditional will lead to an error. See pulumi/lumi#67. Speculating beyond these -- by supporting iterative planning and application -- is something we want to support eventually, but it makes sense to do that as an additive change beyond this initial support. That is a missing 1/3. Finally, the other missing 1/3rd which will happen much sooner than the rest is restructuing plan application so that it will correctly observe resolution of Latent<T> values. Right now, the evaluation happens in one single pass, prior to the application, and so Latent<T>s never actually get witnessed in a resolved state.
2017-05-24 02:32:59 +02:00
return srv.(ResourceProviderServer).Update(ctx, req.(*UpdateRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ResourceProvider_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResourceProviderServer).Delete(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pulumirpc.ResourceProvider/Delete",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResourceProviderServer).Delete(ctx, req.(*DeleteRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ResourceProvider_Invoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InvokeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ResourceProviderServer).Invoke(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pulumirpc.ResourceProvider/Invoke",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ResourceProviderServer).Invoke(ctx, req.(*InvokeRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ResourceProvider_serviceDesc = grpc.ServiceDesc{
ServiceName: "pulumirpc.ResourceProvider",
HandlerType: (*ResourceProviderServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Configure",
Handler: _ResourceProvider_Configure_Handler,
},
{
MethodName: "Check",
Handler: _ResourceProvider_Check_Handler,
},
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
{
MethodName: "Diff",
Handler: _ResourceProvider_Diff_Handler,
Redo object monikers This change overhauls the way we do object monikers. The old mechanism, generating monikers using graph paths, was far too brittle and prone to collisions. The new approach mixes some amount of "automatic scoping" plus some "explicit naming." Although there is some explicitness, this is arguably a good thing, as the monikers will be relatable back to the source more readily by developers inspecting the graph and resource state. Each moniker has four parts: <Namespace>::<AllocModule>::<Type>::<Name> wherein each element is the following: <Namespace> The namespace being deployed into <AllocModule> The module in which the object was allocated <Type> The type of the resource <Name> The assigned name of the resource The <Namespace> is essentially the deployment target -- so "prod", "stage", etc -- although it is more general purpose to allow for future namespacing within a target (e.g., "prod/customer1", etc); for now this is rudimentary, however, see marapongo/mu#94. The <AllocModule> is the token for the code that contained the 'new' that led to this object being created. In the future, we may wish to extend this to also track the module under evaluation. (This is a nice aspect of monikers; they can become arbitrarily complex, so long as they are precise, and not prone to false positives/negatives.) The <Name> warrants more discussion. The resource provider is consulted via a new gRPC method, Name, that fetches the name. How the provider does this is entirely up to it. For some resource types, the resource may have properties that developers must set (e.g., `new Bucket("foo")`); for other providers, perhaps the resource intrinsically has a property that explicitly and uniquely qualifies the object (e.g., AWS SecurityGroups, via `new SecurityGroup({groupName: "my-sg"}`); and finally, it's conceivable that a provider might auto-generate the name (e.g., such as an AWS Lambda whose name could simply be a hash of the source code contents). This should overall produce better results with respect to moniker collisions, ability to match resources, and the usability of the system.
2017-02-24 23:50:02 +01:00
},
{
MethodName: "Create",
Handler: _ResourceProvider_Create_Handler,
},
{
MethodName: "Update",
Handler: _ResourceProvider_Update_Handler,
},
{
MethodName: "Delete",
Handler: _ResourceProvider_Delete_Handler,
},
{
MethodName: "Invoke",
Handler: _ResourceProvider_Invoke_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "provider.proto",
}
Implement initial Lumi-as-a-library This is the initial step towards redefining Lumi as a library that runs atop vanilla Node.js/V8, rather than as its own runtime. This change is woefully incomplete but this includes some of the more stable pieces of my current work-in-progress. The new structure is that within the sdk/ directory we will have a client library per language. This client library contains the object model for Lumi (resources, properties, assets, config, etc), in addition to the "language runtime host" components required to interoperate with the Lumi resource monitor. This resource monitor is effectively what we call "Lumi" today, in that it's the thing orchestrating plans and deployments. Inside the sdk/ directory, you will find nodejs/, the Node.js client library, alongside proto/, the definitions for RPC interop between the different pieces of the system. This includes existing RPC definitions for resource providers, etc., in addition to the new ones for hosting different language runtimes from within Lumi. These new interfaces are surprisingly simple. There is effectively a bidirectional RPC channel between the Lumi resource monitor, represented by the lumirpc.ResourceMonitor interface, and each language runtime, represented by the lumirpc.LanguageRuntime interface. The overall orchestration goes as follows: 1) Lumi decides it needs to run a program written in language X, so it dynamically loads the language runtime plugin for language X. 2) Lumi passes that runtime a loopback address to its ResourceMonitor service, while language X will publish a connection back to its LanguageRuntime service, which Lumi will talk to. 3) Lumi then invokes LanguageRuntime.Run, passing information like the desired working directory, program name, arguments, and optional configuration variables to make available to the program. 4) The language X runtime receives this, unpacks it and sets up the necessary context, and then invokes the program. The program then calls into Lumi object model abstractions that internally communicate back to Lumi using the ResourceMonitor interface. 5) The key here is ResourceMonitor.NewResource, which Lumi uses to serialize state about newly allocated resources. Lumi receives these and registers them as part of the plan, doing the usual diffing, etc., to decide how to proceed. This interface is perhaps one of the most subtle parts of the new design, as it necessitates the use of promises internally to allow parallel evaluation of the resource plan, letting dataflow determine the available concurrency. 6) The program exits, and Lumi continues on its merry way. If the program fails, the RunResponse will include information about the failure. Due to (5), all properties on resources are now instances of a new Property<T> type. A Property<T> is just a thin wrapper over a T, but it encodes the special properties of Lumi resource properties. Namely, it is possible to create one out of a T, other Property<T>, Promise<T>, or to freshly allocate one. In all cases, the Property<T> does not "settle" until its final state is known. This cannot occur before the deployment actually completes, and so in general it's not safe to depend on concrete resolutions of values (unlike ordinary Promise<T>s which are usually expected to resolve). As a result, all derived computations are meant to use the `then` function (as in `someValue.then(v => v+x)`). Although this change includes tests that may be run in isolation to test the various RPC interactions, we are nowhere near finished. The remaining work primarily boils down to three things: 1) Wiring all of this up to the Lumi code. 2) Fixing the handful of known loose ends required to make this work, primarily around the serialization of properties (waiting on unresolved ones, serializing assets properly, etc). 3) Implementing lambda closure serialization as a native extension. This ongoing work is part of pulumi/pulumi-fabric#311.
2017-08-26 21:07:54 +02:00
func init() { proto.RegisterFile("provider.proto", fileDescriptor3) }
Implement initial Lumi-as-a-library This is the initial step towards redefining Lumi as a library that runs atop vanilla Node.js/V8, rather than as its own runtime. This change is woefully incomplete but this includes some of the more stable pieces of my current work-in-progress. The new structure is that within the sdk/ directory we will have a client library per language. This client library contains the object model for Lumi (resources, properties, assets, config, etc), in addition to the "language runtime host" components required to interoperate with the Lumi resource monitor. This resource monitor is effectively what we call "Lumi" today, in that it's the thing orchestrating plans and deployments. Inside the sdk/ directory, you will find nodejs/, the Node.js client library, alongside proto/, the definitions for RPC interop between the different pieces of the system. This includes existing RPC definitions for resource providers, etc., in addition to the new ones for hosting different language runtimes from within Lumi. These new interfaces are surprisingly simple. There is effectively a bidirectional RPC channel between the Lumi resource monitor, represented by the lumirpc.ResourceMonitor interface, and each language runtime, represented by the lumirpc.LanguageRuntime interface. The overall orchestration goes as follows: 1) Lumi decides it needs to run a program written in language X, so it dynamically loads the language runtime plugin for language X. 2) Lumi passes that runtime a loopback address to its ResourceMonitor service, while language X will publish a connection back to its LanguageRuntime service, which Lumi will talk to. 3) Lumi then invokes LanguageRuntime.Run, passing information like the desired working directory, program name, arguments, and optional configuration variables to make available to the program. 4) The language X runtime receives this, unpacks it and sets up the necessary context, and then invokes the program. The program then calls into Lumi object model abstractions that internally communicate back to Lumi using the ResourceMonitor interface. 5) The key here is ResourceMonitor.NewResource, which Lumi uses to serialize state about newly allocated resources. Lumi receives these and registers them as part of the plan, doing the usual diffing, etc., to decide how to proceed. This interface is perhaps one of the most subtle parts of the new design, as it necessitates the use of promises internally to allow parallel evaluation of the resource plan, letting dataflow determine the available concurrency. 6) The program exits, and Lumi continues on its merry way. If the program fails, the RunResponse will include information about the failure. Due to (5), all properties on resources are now instances of a new Property<T> type. A Property<T> is just a thin wrapper over a T, but it encodes the special properties of Lumi resource properties. Namely, it is possible to create one out of a T, other Property<T>, Promise<T>, or to freshly allocate one. In all cases, the Property<T> does not "settle" until its final state is known. This cannot occur before the deployment actually completes, and so in general it's not safe to depend on concrete resolutions of values (unlike ordinary Promise<T>s which are usually expected to resolve). As a result, all derived computations are meant to use the `then` function (as in `someValue.then(v => v+x)`). Although this change includes tests that may be run in isolation to test the various RPC interactions, we are nowhere near finished. The remaining work primarily boils down to three things: 1) Wiring all of this up to the Lumi code. 2) Fixing the handful of known loose ends required to make this work, primarily around the serialization of properties (waiting on unresolved ones, serializing assets properly, etc). 3) Implementing lambda closure serialization as a native extension. This ongoing work is part of pulumi/pulumi-fabric#311.
2017-08-26 21:07:54 +02:00
var fileDescriptor3 = []byte{
// 613 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x54, 0xdd, 0x6a, 0xdb, 0x4c,
0x10, 0x8d, 0xac, 0xc4, 0xc4, 0x93, 0x58, 0x98, 0xe5, 0x23, 0xd1, 0xa7, 0xf4, 0x22, 0xe8, 0x2a,
0xa4, 0xa0, 0x80, 0x73, 0xd1, 0x1f, 0x02, 0x81, 0x38, 0x29, 0xcd, 0x4d, 0x29, 0x2a, 0x2d, 0xa4,
0x77, 0xb2, 0x35, 0x72, 0x55, 0x2b, 0x5a, 0x75, 0xb5, 0xeb, 0xe2, 0x47, 0x28, 0x7d, 0x83, 0x3e,
0x55, 0x1f, 0xa9, 0x68, 0xb5, 0xab, 0x68, 0xed, 0xe2, 0xa4, 0x81, 0xd2, 0x3b, 0xad, 0xe6, 0xcc,
0x99, 0x39, 0x67, 0x67, 0x16, 0x9c, 0x82, 0xd1, 0x79, 0x1a, 0x23, 0x0b, 0x0a, 0x46, 0x39, 0x25,
0xbd, 0x42, 0x64, 0xe2, 0x36, 0x65, 0xc5, 0xc4, 0x3b, 0x98, 0x52, 0x3a, 0xcd, 0xf0, 0x44, 0x06,
0xc6, 0x22, 0x39, 0xc1, 0xdb, 0x82, 0x2f, 0x6a, 0x9c, 0xf7, 0x64, 0x39, 0x58, 0x72, 0x26, 0x26,
0xbc, 0x8e, 0xfa, 0x3f, 0x2c, 0x18, 0x8c, 0x68, 0x9e, 0xa4, 0x53, 0xc1, 0x30, 0xc4, 0x2f, 0x02,
0x4b, 0x4e, 0x5e, 0x43, 0x6f, 0x1e, 0xb1, 0x34, 0x1a, 0x67, 0x58, 0xba, 0xd6, 0xa1, 0x7d, 0xb4,
0x33, 0x3c, 0x0e, 0x9a, 0x72, 0xc1, 0x32, 0x3e, 0xf8, 0xa0, 0xc1, 0x57, 0x39, 0x67, 0x8b, 0xf0,
0x2e, 0xd9, 0x3b, 0x03, 0xc7, 0x0c, 0x92, 0x01, 0xd8, 0x33, 0x5c, 0xb8, 0xd6, 0xa1, 0x75, 0xd4,
0x0b, 0xab, 0x4f, 0xf2, 0x1f, 0x6c, 0xcd, 0xa3, 0x4c, 0xa0, 0xdb, 0x91, 0xff, 0xea, 0xc3, 0xcb,
0xce, 0x73, 0xcb, 0xbf, 0x81, 0xdd, 0xd1, 0x27, 0x9c, 0xcc, 0x74, 0x5f, 0x03, 0xb0, 0x05, 0xcb,
0x75, 0xae, 0x60, 0x39, 0x79, 0x06, 0x50, 0x30, 0x5a, 0x20, 0xe3, 0x29, 0x96, 0x92, 0x60, 0x67,
0xb8, 0x1f, 0xd4, 0x8a, 0x03, 0xad, 0x38, 0x78, 0x27, 0x15, 0x87, 0x2d, 0xa8, 0xbf, 0x80, 0xbe,
0xa2, 0x2e, 0x0b, 0x9a, 0x97, 0x48, 0x4e, 0x61, 0x3b, 0xc6, 0x24, 0x12, 0x19, 0x2f, 0x65, 0x81,
0x35, 0x3c, 0x0d, 0xb0, 0x4a, 0x4a, 0xa2, 0x34, 0x13, 0x4c, 0x16, 0xb7, 0x65, 0x52, 0xcb, 0xa7,
0xaa, 0xc0, 0xab, 0x3a, 0x1e, 0x36, 0x40, 0xff, 0x42, 0xa9, 0x52, 0x11, 0xe2, 0xc1, 0xb6, 0x6a,
0x4c, 0xdb, 0xd2, 0x9c, 0xc9, 0x1e, 0x74, 0x19, 0x46, 0x25, 0xcd, 0x95, 0x39, 0xea, 0xe4, 0x7f,
0xb3, 0x60, 0xe7, 0x32, 0x4d, 0x12, 0xed, 0x8c, 0x03, 0x9d, 0x34, 0x56, 0xd9, 0x9d, 0x34, 0xd6,
0x4e, 0x75, 0xee, 0x9c, 0x7a, 0x0a, 0x9b, 0x34, 0x8b, 0x4b, 0xd7, 0x5e, 0xaf, 0x4d, 0x82, 0x2a,
0x70, 0x8e, 0x5f, 0x4b, 0x77, 0xf3, 0x1e, 0x70, 0x05, 0xf2, 0x8f, 0x61, 0xb7, 0x6e, 0x45, 0x39,
0xe9, 0xc1, 0x36, 0xc3, 0x22, 0x8b, 0x26, 0x6a, 0x78, 0x7a, 0x61, 0x73, 0xf6, 0x3f, 0x42, 0x7f,
0xc4, 0x30, 0xe2, 0xf8, 0x17, 0xae, 0xf4, 0x06, 0x1c, 0xcd, 0xad, 0x3a, 0x59, 0x76, 0xe5, 0xd1,
0xd4, 0xdf, 0x2d, 0xe8, 0xbf, 0x2f, 0xe2, 0x56, 0xdf, 0xff, 0xd2, 0xf0, 0x6b, 0x70, 0x74, 0x33,
0x4a, 0xa8, 0x29, 0xcc, 0x7a, 0xb8, 0xb0, 0xcf, 0xd0, 0xbf, 0xc4, 0x0c, 0xff, 0x44, 0x97, 0x59,
0xcb, 0x7e, 0x78, 0xad, 0x37, 0xd0, 0xbf, 0xce, 0xe7, 0x74, 0xd6, 0xbe, 0x7b, 0x4e, 0x67, 0xfa,
0xee, 0x39, 0x9d, 0x55, 0x36, 0x44, 0x6c, 0x7a, 0xef, 0xd5, 0x48, 0x90, 0x3f, 0x07, 0x47, 0xf3,
0x29, 0x1b, 0x4e, 0xaa, 0x6d, 0xe1, 0x7a, 0x9e, 0xd6, 0x10, 0x28, 0xd8, 0xa3, 0xf6, 0x77, 0xf8,
0xd3, 0x86, 0x41, 0x88, 0x25, 0x15, 0x6c, 0x82, 0x6f, 0xd5, 0x9b, 0x4c, 0x2e, 0xa0, 0xd7, 0x3c,
0x8b, 0xe4, 0x60, 0xcd, 0x63, 0xe9, 0xed, 0xad, 0x34, 0x75, 0x55, 0xbd, 0xd6, 0xfe, 0x06, 0x39,
0x83, 0x2d, 0x59, 0x92, 0xac, 0x34, 0xa1, 0x73, 0xdd, 0xd5, 0x40, 0x2d, 0xdd, 0xdf, 0x20, 0x2f,
0x60, 0xb3, 0x5a, 0x43, 0xb2, 0xd7, 0xc2, 0xb4, 0x9e, 0x08, 0x6f, 0x7f, 0xe5, 0x7f, 0x93, 0x7a,
0x0e, 0xdd, 0x7a, 0x73, 0x88, 0x51, 0xa0, 0xbd, 0xa8, 0xde, 0xff, 0xbf, 0x89, 0xb4, 0x09, 0xea,
0x89, 0x34, 0x08, 0x8c, 0x8d, 0x31, 0x08, 0xcc, 0xf1, 0x95, 0xd2, 0xbb, 0xf5, 0x1c, 0x1a, 0x04,
0xc6, 0x68, 0xae, 0x31, 0xee, 0x1c, 0xba, 0xf5, 0x24, 0x18, 0xd9, 0xc6, 0xb0, 0x19, 0xe5, 0xcd,
0xb1, 0xf1, 0x37, 0xc6, 0x5d, 0x49, 0x79, 0xfa, 0x2b, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x42, 0x75,
0x27, 0x64, 0x07, 0x00, 0x00,
}