Fix review comments and new changes in config (#8515)

- Migrate and save only settings which are enabled
- Rename logger_http to logger_webhook and
  logger_http_audit to audit_webhook
- No more pretty printing comments, comment
  is a key=value pair now.
- Avoid quotes on values which do not have space in them
- `state="on"` is implicit for all SetConfigKV unless
  specified explicitly as `state="off"`
- Disabled IAM users should be disabled always
This commit is contained in:
Harshavardhana 2019-11-13 17:38:05 -08:00 committed by GitHub
parent 60690a7e1d
commit 26a866a202
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
37 changed files with 363 additions and 466 deletions

View file

@ -3,9 +3,7 @@
[![MinIO](https://raw.githubusercontent.com/minio/minio/master/.github/logo.svg?sanitize=true)](https://min.io)
MinIO is an object storage server released under Apache License v2.0. It is compatible with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB.
MinIO server is light enough to be bundled with the application stack, similar to NodeJS, Redis and MySQL.
MinIO is High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Using MinIO build high performance infrastructure for machine learning, analytics and application data workloads.
## Docker Container
### Stable

View file

@ -24,6 +24,7 @@ import (
"io"
"net/http"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config"
@ -147,15 +148,14 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
}
}
defaultKVS := configDefaultKVS()
oldCfg := cfg.Clone()
scanner := bufio.NewScanner(bytes.NewReader(kvBytes))
for scanner.Scan() {
// Skip any empty lines
if scanner.Text() == "" {
// Skip any empty lines, or comment like characters
if scanner.Text() == "" || strings.HasPrefix(scanner.Text(), config.KvComment) {
continue
}
if err = cfg.SetKVS(scanner.Text(), defaultKVS); err != nil {
if err = cfg.SetKVS(scanner.Text()); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -307,7 +307,6 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
}
}
defaultKVS := configDefaultKVS()
oldCfg := cfg.Clone()
scanner := bufio.NewScanner(bytes.NewReader(kvBytes))
for scanner.Scan() {
@ -315,7 +314,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
if scanner.Text() == "" {
continue
}
if err = cfg.SetKVS(scanner.Text(), defaultKVS); err != nil {
if err = cfg.SetKVS(scanner.Text()); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}

View file

@ -204,10 +204,8 @@ func handleCommonEnvVars() {
// in-place update is off.
globalInplaceUpdateDisabled = strings.EqualFold(env.Get(config.EnvUpdate, config.StateOn), config.StateOff)
accessKey := env.Get(config.EnvAccessKey, "")
secretKey := env.Get(config.EnvSecretKey, "")
if accessKey != "" && secretKey != "" {
cred, err := auth.CreateCredentials(accessKey, secretKey)
if env.IsSet(config.EnvAccessKey) || env.IsSet(config.EnvSecretKey) {
cred, err := auth.CreateCredentials(env.Get(config.EnvAccessKey, ""), env.Get(config.EnvSecretKey, ""))
if err != nil {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate credentials inherited from the shell environment")

View file

@ -261,28 +261,28 @@ func lookupConfigs(s config.Config) (err error) {
}
var helpMap = map[string]config.HelpKV{
config.RegionSubSys: config.RegionHelp,
config.WormSubSys: config.WormHelp,
config.EtcdSubSys: etcd.Help,
config.CacheSubSys: cache.Help,
config.CompressionSubSys: compress.Help,
config.StorageClassSubSys: storageclass.Help,
config.IdentityOpenIDSubSys: openid.Help,
config.IdentityLDAPSubSys: xldap.Help,
config.PolicyOPASubSys: opa.Help,
config.KmsVaultSubSys: crypto.Help,
config.LoggerHTTPSubSys: logger.Help,
config.LoggerHTTPAuditSubSys: logger.HelpAudit,
config.NotifyAMQPSubSys: notify.HelpAMQP,
config.NotifyKafkaSubSys: notify.HelpKafka,
config.NotifyMQTTSubSys: notify.HelpMQTT,
config.NotifyNATSSubSys: notify.HelpNATS,
config.NotifyNSQSubSys: notify.HelpNSQ,
config.NotifyMySQLSubSys: notify.HelpMySQL,
config.NotifyPostgresSubSys: notify.HelpPostgres,
config.NotifyRedisSubSys: notify.HelpRedis,
config.NotifyWebhookSubSys: notify.HelpWebhook,
config.NotifyESSubSys: notify.HelpES,
config.RegionSubSys: config.RegionHelp,
config.WormSubSys: config.WormHelp,
config.EtcdSubSys: etcd.Help,
config.CacheSubSys: cache.Help,
config.CompressionSubSys: compress.Help,
config.StorageClassSubSys: storageclass.Help,
config.IdentityOpenIDSubSys: openid.Help,
config.IdentityLDAPSubSys: xldap.Help,
config.PolicyOPASubSys: opa.Help,
config.KmsVaultSubSys: crypto.Help,
config.LoggerWebhookSubSys: logger.Help,
config.AuditWebhookSubSys: logger.HelpAudit,
config.NotifyAMQPSubSys: notify.HelpAMQP,
config.NotifyKafkaSubSys: notify.HelpKafka,
config.NotifyMQTTSubSys: notify.HelpMQTT,
config.NotifyNATSSubSys: notify.HelpNATS,
config.NotifyNSQSubSys: notify.HelpNSQ,
config.NotifyMySQLSubSys: notify.HelpMySQL,
config.NotifyPostgresSubSys: notify.HelpPostgres,
config.NotifyRedisSubSys: notify.HelpRedis,
config.NotifyWebhookSubSys: notify.HelpWebhook,
config.NotifyESSubSys: notify.HelpES,
}
// GetHelp - returns help for sub-sys, a key for a sub-system or all the help.
@ -324,51 +324,8 @@ func GetHelp(subSys, key string, envOnly bool) (config.HelpKV, error) {
return help, nil
}
func configDefaultKVS() map[string]config.KVS {
m := make(map[string]config.KVS)
for k, tgt := range newServerConfig() {
m[k] = tgt[config.Default]
}
return m
}
func newServerConfig() config.Config {
srvCfg := config.New()
for k := range srvCfg {
// Initialize with default KVS
switch k {
case config.EtcdSubSys:
srvCfg[k][config.Default] = etcd.DefaultKVS
case config.CacheSubSys:
srvCfg[k][config.Default] = cache.DefaultKVS
case config.CompressionSubSys:
srvCfg[k][config.Default] = compress.DefaultKVS
case config.StorageClassSubSys:
srvCfg[k][config.Default] = storageclass.DefaultKVS
case config.IdentityLDAPSubSys:
srvCfg[k][config.Default] = xldap.DefaultKVS
case config.IdentityOpenIDSubSys:
srvCfg[k][config.Default] = openid.DefaultKVS
case config.PolicyOPASubSys:
srvCfg[k][config.Default] = opa.DefaultKVS
case config.WormSubSys:
srvCfg[k][config.Default] = config.DefaultWormKVS
case config.RegionSubSys:
srvCfg[k][config.Default] = config.DefaultRegionKVS
case config.CredentialsSubSys:
srvCfg[k][config.Default] = config.DefaultCredentialKVS
case config.KmsVaultSubSys:
srvCfg[k][config.Default] = crypto.DefaultKVS
case config.LoggerHTTPSubSys:
srvCfg[k][config.Default] = logger.DefaultKVS
case config.LoggerHTTPAuditSubSys:
srvCfg[k][config.Default] = logger.DefaultAuditKVS
}
}
for k, v := range notify.DefaultNotificationKVS {
srvCfg[k][config.Default] = v
}
return srvCfg
return config.New()
}
// newSrvConfig - initialize a new server config, saves env parameters if
@ -392,20 +349,7 @@ func newSrvConfig(objAPI ObjectLayer) error {
}
func getValidConfig(objAPI ObjectLayer) (config.Config, error) {
srvCfg, err := readServerConfig(context.Background(), objAPI)
if err != nil {
return nil, err
}
defaultKVS := configDefaultKVS()
for _, k := range config.SubSystems.ToSlice() {
_, ok := srvCfg[k][config.Default]
if !ok {
// Populate default configs for any new
// sub-systems added automatically.
srvCfg[k][config.Default] = defaultKVS[k]
}
}
return srvCfg, nil
return readServerConfig(context.Background(), objAPI)
}
// loadConfig - loads a new config from disk, overrides params

View file

@ -25,16 +25,15 @@ import (
// SetCacheConfig - One time migration code needed, for migrating from older config to new for Cache.
func SetCacheConfig(s config.Config, cfg Config) {
if len(cfg.Drives) == 0 {
// Do not save cache if no settings available.
return
}
s[config.CacheSubSys][config.Default] = DefaultKVS
s[config.CacheSubSys][config.Default][Drives] = strings.Join(cfg.Drives, cacheDelimiter)
s[config.CacheSubSys][config.Default][Exclude] = strings.Join(cfg.Exclude, cacheDelimiter)
s[config.CacheSubSys][config.Default][Expiry] = fmt.Sprintf("%d", cfg.Expiry)
s[config.CacheSubSys][config.Default][Quota] = fmt.Sprintf("%d", cfg.MaxUse)
s[config.CacheSubSys][config.Default][config.State] = func() string {
if len(cfg.Drives) > 0 {
return config.StateOn
}
return config.StateOff
}()
s[config.CacheSubSys][config.Default][config.State] = config.StateOn
s[config.CacheSubSys][config.Default][config.Comment] = "Settings for Cache, after migrating config"
}

View file

@ -73,6 +73,10 @@ func LookupConfig(kvs config.KVS) (Config, error) {
// Check if cache is explicitly disabled
stateBool, err := config.ParseBool(env.Get(EnvCacheState, kvs.Get(config.State)))
if err != nil {
// Parsing failures happen due to empty KVS, ignore it.
if kvs.Empty() {
return cfg, nil
}
return cfg, err
}

View file

@ -83,6 +83,10 @@ func LookupConfig(kvs config.KVS) (Config, error) {
}
cfg.Enabled, err = config.ParseBool(compress)
if err != nil {
// Parsing failures happen due to empty KVS, ignore it.
if kvs.Empty() {
return cfg, nil
}
return cfg, err
}
if !cfg.Enabled {

View file

@ -30,13 +30,12 @@ const (
// SetCompressionConfig - One time migration code needed, for migrating from older config to new for Compression.
func SetCompressionConfig(s config.Config, cfg Config) {
if !cfg.Enabled {
// No need to save disabled settings in new config.
return
}
s[config.CompressionSubSys][config.Default] = config.KVS{
config.State: func() string {
if cfg.Enabled {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
config.Comment: "Settings for Compression, after migrating config",
Extensions: strings.Join(cfg.Extensions, config.ValueSeparator),
MimeTypes: strings.Join(cfg.MimeTypes, config.ValueSeparator),

View file

@ -57,19 +57,19 @@ const (
// Top level config constants.
const (
CredentialsSubSys = "credentials"
PolicyOPASubSys = "policy_opa"
IdentityOpenIDSubSys = "identity_openid"
IdentityLDAPSubSys = "identity_ldap"
WormSubSys = "worm"
CacheSubSys = "cache"
RegionSubSys = "region"
EtcdSubSys = "etcd"
StorageClassSubSys = "storageclass"
CompressionSubSys = "compression"
KmsVaultSubSys = "kms_vault"
LoggerHTTPSubSys = "logger_http"
LoggerHTTPAuditSubSys = "logger_http_audit"
CredentialsSubSys = "credentials"
PolicyOPASubSys = "policy_opa"
IdentityOpenIDSubSys = "identity_openid"
IdentityLDAPSubSys = "identity_ldap"
WormSubSys = "worm"
CacheSubSys = "cache"
RegionSubSys = "region"
EtcdSubSys = "etcd"
StorageClassSubSys = "storageclass"
CompressionSubSys = "compression"
KmsVaultSubSys = "kms_vault"
LoggerWebhookSubSys = "logger_webhook"
AuditWebhookSubSys = "audit_webhook"
// Add new constants here if you add new fields to config.
)
@ -100,8 +100,8 @@ var SubSystems = set.CreateStringSet([]string{
StorageClassSubSys,
CompressionSubSys,
KmsVaultSubSys,
LoggerHTTPSubSys,
LoggerHTTPAuditSubSys,
LoggerWebhookSubSys,
AuditWebhookSubSys,
PolicyOPASubSys,
IdentityLDAPSubSys,
IdentityOpenIDSubSys,
@ -137,7 +137,7 @@ const (
SubSystemSeparator = madmin.SubSystemSeparator
KvSeparator = madmin.KvSeparator
KvSpaceSeparator = madmin.KvSpaceSeparator
KvComment = madmin.KvComment
KvComment = `#`
KvNewline = madmin.KvNewline
KvDoubleQuote = madmin.KvDoubleQuote
KvSingleQuote = madmin.KvSingleQuote
@ -151,9 +151,18 @@ const (
// to operate on list of key values.
type KVS map[string]string
// Empty - return if kv is empty
func (kvs KVS) Empty() bool {
return len(kvs) == 0
}
func (kvs KVS) String() string {
var s strings.Builder
for k, v := range kvs {
// Do not need to print if state is on
if k == State && v == StateOn {
continue
}
s.WriteString(k)
s.WriteString(KvSeparator)
s.WriteString(KvDoubleQuote)
@ -215,8 +224,13 @@ func LookupCreds(kv KVS) (auth.Credentials, error) {
if err := CheckValidKeys(CredentialsSubSys, kv, DefaultCredentialKVS); err != nil {
return auth.Credentials{}, err
}
return auth.CreateCredentials(env.Get(EnvAccessKey, kv.Get(AccessKey)),
env.Get(EnvSecretKey, kv.Get(SecretKey)))
accessKey := env.Get(EnvAccessKey, kv.Get(AccessKey))
secretKey := env.Get(EnvSecretKey, kv.Get(SecretKey))
if accessKey == "" && secretKey == "" {
accessKey = auth.DefaultAccessKey
secretKey = auth.DefaultSecretKey
}
return auth.CreateCredentials(accessKey, secretKey)
}
// LookupRegion - get current region.
@ -348,7 +362,8 @@ func (c Config) DelKVS(s string) error {
delete(c[subSystemValue[0]], subSystemValue[1])
return nil
}
return Error(fmt.Sprintf("default config for '%s' sub-system cannot be removed", s))
delete(c[subSystemValue[0]], Default)
return nil
}
// This function is needed, to trim off single or double quotes, creeping into the values.
@ -373,7 +388,7 @@ func (c Config) Clone() Config {
}
// SetKVS - set specific key values per sub-system.
func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
func (c Config) SetKVS(s string) error {
if len(s) == 0 {
return Error("input arguments cannot be empty")
}
@ -418,9 +433,7 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
}
_, ok := c[subSystemValue[0]][tgt]
if !ok {
c[subSystemValue[0]][tgt] = defaultKVS[subSystemValue[0]]
comment := fmt.Sprintf("Settings for sub-system target %s:%s", subSystemValue[0], tgt)
c[subSystemValue[0]][tgt][Comment] = comment
c[subSystemValue[0]][tgt] = KVS{}
}
for k, v := range kvs {

View file

@ -129,9 +129,9 @@ func lookupLegacyConfig(rootCAs *x509.CertPool) (Config, error) {
}
// LookupConfig - Initialize new etcd config.
func LookupConfig(kv config.KVS, rootCAs *x509.CertPool) (Config, error) {
func LookupConfig(kvs config.KVS, rootCAs *x509.CertPool) (Config, error) {
cfg := Config{}
if err := config.CheckValidKeys(config.EtcdSubSys, kv, DefaultKVS); err != nil {
if err := config.CheckValidKeys(config.EtcdSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
@ -152,8 +152,11 @@ func LookupConfig(kv config.KVS, rootCAs *x509.CertPool) (Config, error) {
}
}
stateBool, err = config.ParseBool(env.Get(EnvEtcdState, kv.Get(config.State)))
stateBool, err = config.ParseBool(env.Get(EnvEtcdState, kvs.Get(config.State)))
if err != nil {
if kvs.Empty() {
return cfg, nil
}
return cfg, err
}
@ -161,7 +164,7 @@ func LookupConfig(kv config.KVS, rootCAs *x509.CertPool) (Config, error) {
return cfg, nil
}
endpoints := env.Get(EnvEtcdEndpoints, kv.Get(Endpoints))
endpoints := env.Get(EnvEtcdEndpoints, kvs.Get(Endpoints))
if endpoints == "" {
return cfg, config.Error("'endpoints' key cannot be empty to enable etcd")
}
@ -175,15 +178,15 @@ func LookupConfig(kv config.KVS, rootCAs *x509.CertPool) (Config, error) {
cfg.DialTimeout = defaultDialTimeout
cfg.DialKeepAliveTime = defaultDialKeepAlive
cfg.Endpoints = etcdEndpoints
cfg.CoreDNSPath = env.Get(EnvEtcdCoreDNSPath, kv.Get(CoreDNSPath))
cfg.CoreDNSPath = env.Get(EnvEtcdCoreDNSPath, kvs.Get(CoreDNSPath))
if etcdSecure {
cfg.TLS = &tls.Config{
RootCAs: rootCAs,
}
// This is only to support client side certificate authentication
// https://coreos.com/etcd/docs/latest/op-guide/security.html
etcdClientCertFile := env.Get(EnvEtcdClientCert, kv.Get(ClientCert))
etcdClientCertKey := env.Get(EnvEtcdClientCertKey, kv.Get(ClientCertKey))
etcdClientCertFile := env.Get(EnvEtcdClientCert, kvs.Get(ClientCert))
etcdClientCertKey := env.Get(EnvEtcdClientCertKey, kvs.Get(ClientCertKey))
if etcdClientCertFile != "" && etcdClientCertKey != "" {
cfg.TLS.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
cert, err := tls.LoadX509KeyPair(etcdClientCertFile, etcdClientCertKey)

View file

@ -115,6 +115,9 @@ func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
}
stateBool, err := config.ParseBool(env.Get(EnvLDAPState, kvs.Get(config.State)))
if err != nil {
if kvs.Empty() {
return l, nil
}
return l, err
}
ldapServer := env.Get(EnvServerAddr, kvs.Get(ServerAddr))

View file

@ -20,13 +20,12 @@ import "github.com/minio/minio/cmd/config"
// SetIdentityLDAP - One time migration code needed, for migrating from older config to new for LDAPConfig.
func SetIdentityLDAP(s config.Config, ldapArgs Config) {
if !ldapArgs.Enabled {
// ldap not enabled no need to preserve it in new settings.
return
}
s[config.IdentityLDAPSubSys][config.Default] = config.KVS{
config.State: func() string {
if !ldapArgs.Enabled {
return config.StateOff
}
return config.StateOn
}(),
config.State: config.StateOn,
config.Comment: "Settings for LDAP, after migrating config",
ServerAddr: ldapArgs.ServerAddr,
STSExpiry: ldapArgs.STSExpiryDuration,

View file

@ -267,29 +267,32 @@ var (
)
// LookupConfig lookup jwks from config, override with any ENVs.
func LookupConfig(kv config.KVS, transport *http.Transport, closeRespFn func(io.ReadCloser)) (c Config, err error) {
if err = config.CheckValidKeys(config.IdentityOpenIDSubSys, kv, DefaultKVS); err != nil {
func LookupConfig(kvs config.KVS, transport *http.Transport, closeRespFn func(io.ReadCloser)) (c Config, err error) {
if err = config.CheckValidKeys(config.IdentityOpenIDSubSys, kvs, DefaultKVS); err != nil {
return c, err
}
stateBool, err := config.ParseBool(env.Get(EnvIdentityOpenIDState, kv.Get(config.State)))
stateBool, err := config.ParseBool(env.Get(EnvIdentityOpenIDState, kvs.Get(config.State)))
if err != nil {
if kvs.Empty() {
return c, nil
}
return c, err
}
jwksURL := env.Get(EnvIamJwksURL, "") // Legacy
if jwksURL == "" {
jwksURL = env.Get(EnvIdentityOpenIDJWKSURL, kv.Get(JwksURL))
jwksURL = env.Get(EnvIdentityOpenIDJWKSURL, kvs.Get(JwksURL))
}
c = Config{
ClaimPrefix: env.Get(EnvIdentityOpenIDClaimPrefix, kv.Get(ClaimPrefix)),
ClaimPrefix: env.Get(EnvIdentityOpenIDClaimPrefix, kvs.Get(ClaimPrefix)),
publicKeys: make(map[string]crypto.PublicKey),
transport: transport,
closeRespFn: closeRespFn,
}
configURL := env.Get(EnvIdentityOpenIDURL, kv.Get(ConfigURL))
configURL := env.Get(EnvIdentityOpenIDURL, kvs.Get(ConfigURL))
if configURL != "" {
c.URL, err = xnet.ParseHTTPURL(configURL)
if err != nil {

View file

@ -25,24 +25,15 @@ const (
// SetIdentityOpenID - One time migration code needed, for migrating from older config to new for OpenIDConfig.
func SetIdentityOpenID(s config.Config, cfg Config) {
if cfg.JWKS.URL == nil || cfg.JWKS.URL.String() == "" {
// No need to save not-enabled settings in new config.
return
}
s[config.IdentityOpenIDSubSys][config.Default] = config.KVS{
config.State: func() string {
if cfg.JWKS.URL == nil {
return config.StateOff
}
if cfg.JWKS.URL.String() == "" {
return config.StateOff
}
return config.StateOn
}(),
config.State: config.StateOn,
config.Comment: "Settings for OpenID, after migrating config",
JwksURL: func() string {
if cfg.JWKS.URL != nil {
return cfg.JWKS.URL.String()
}
return ""
}(),
ConfigURL: "",
ClaimPrefix: "",
JwksURL: cfg.JWKS.URL.String(),
ConfigURL: "",
ClaimPrefix: "",
}
}

View file

@ -42,14 +42,13 @@ func SetRegion(c Config, name string) {
// SetWorm - One time migration code needed, for migrating from older config to new for Worm mode.
func SetWorm(c Config, b bool) {
if !b {
// We don't save disabled configs
return
}
// Set the new value.
c[WormSubSys][Default] = KVS{
State: func() string {
if b {
return StateOn
}
return StateOff
}(),
State: StateOn,
Comment: "Settings for WORM, after migrating config",
}
}

View file

@ -48,8 +48,8 @@ var (
target.KafkaSASLUsername: "Username for SASL/PLAIN or SASL/SCRAM authentication",
target.KafkaSASLPassword: "Password for SASL/PLAIN or SASL/SCRAM authentication",
target.KafkaTLSClientAuth: "ClientAuth determines the Kafka server's policy for TLS client auth",
target.KafkaSASLEnable: "Set this to 'on' to enable SASL authentication",
target.KafkaTLSEnable: "Set this to 'on' to enable TLS",
target.KafkaSASL: "Set this to 'on' to enable SASL authentication",
target.KafkaTLS: "Set this to 'on' to enable TLS",
target.KafkaTLSSkipVerify: "Set this to 'on' to disable client verification of server certificate chain",
target.KafkaQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
target.KafkaQueueDir: "Local directory where events are stored eg: '/home/events'",
@ -139,7 +139,7 @@ var (
target.NATSToken: "Token to be used when connecting to a server",
target.NATSSecure: "Set this to 'on', enables TLS secure connections that skip server verification (not recommended)",
target.NATSPingInterval: "Client ping commands interval to the server, disabled by default",
target.NATSStreamingEnable: "Set this to 'on', to use streaming NATS server",
target.NATSStreaming: "Set this to 'on', to use streaming NATS server",
target.NATSStreamingAsync: "Set this to 'on', to enable asynchronous publish, process the ACK or error state",
target.NATSStreamingMaxPubAcksInFlight: "Specifies how many messages can be published without getting ACKs back from NATS streaming server",
target.NATSStreamingClusterID: "Unique ID for the NATS streaming cluster",
@ -152,7 +152,7 @@ var (
config.Comment: "A comment to describe the NSQ target setting",
target.NSQAddress: "NSQ server address eg: '127.0.0.1:4150'",
target.NSQTopic: "NSQ topic unique per target",
target.NSQTLSEnable: "Set this to 'on', to enable TLS negotiation",
target.NSQTLS: "Set this to 'on', to enable TLS negotiation",
target.NSQTLSSkipVerify: "Set this to 'on', to disable client verification of server certificates",
target.NSQQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
target.NSQQueueDir: "Local directory where events are stored eg: '/home/events'",

View file

@ -11,17 +11,16 @@ import (
// SetNotifyKafka - helper for config migration from older config.
func SetNotifyKafka(s config.Config, kName string, cfg target.KafkaArgs) error {
if !cfg.Enable {
return nil
}
if err := cfg.Validate(); err != nil {
return err
}
s[config.NotifyKafkaSubSys][kName] = config.KVS{
config.State: func() string {
if cfg.Enable {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
target.KafkaBrokers: func() string {
var brokers []string
for _, broker := range cfg.Brokers {
@ -33,10 +32,10 @@ func SetNotifyKafka(s config.Config, kName string, cfg target.KafkaArgs) error {
target.KafkaTopic: cfg.Topic,
target.KafkaQueueDir: cfg.QueueDir,
target.KafkaQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
target.KafkaTLSEnable: config.FormatBool(cfg.TLS.Enable),
target.KafkaTLS: config.FormatBool(cfg.TLS.Enable),
target.KafkaTLSSkipVerify: config.FormatBool(cfg.TLS.SkipVerify),
target.KafkaTLSClientAuth: strconv.Itoa(int(cfg.TLS.ClientAuth)),
target.KafkaSASLEnable: config.FormatBool(cfg.SASL.Enable),
target.KafkaSASL: config.FormatBool(cfg.SASL.Enable),
target.KafkaSASLUsername: cfg.SASL.User,
target.KafkaSASLPassword: cfg.SASL.Password,
}
@ -45,17 +44,16 @@ func SetNotifyKafka(s config.Config, kName string, cfg target.KafkaArgs) error {
// SetNotifyAMQP - helper for config migration from older config.
func SetNotifyAMQP(s config.Config, amqpName string, cfg target.AMQPArgs) error {
if !cfg.Enable {
return nil
}
if err := cfg.Validate(); err != nil {
return err
}
s[config.NotifyAMQPSubSys][amqpName] = config.KVS{
config.State: func() string {
if cfg.Enable {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
config.Comment: "Settings for AMQP notification, after migrating config",
target.AmqpURL: cfg.URL.String(),
target.AmqpExchange: cfg.Exchange,
@ -76,17 +74,16 @@ func SetNotifyAMQP(s config.Config, amqpName string, cfg target.AMQPArgs) error
// SetNotifyES - helper for config migration from older config.
func SetNotifyES(s config.Config, esName string, cfg target.ElasticsearchArgs) error {
if !cfg.Enable {
return nil
}
if err := cfg.Validate(); err != nil {
return err
}
s[config.NotifyESSubSys][esName] = config.KVS{
config.State: func() string {
if cfg.Enable {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
config.Comment: "Settings for Elasticsearch notification, after migrating config",
target.ElasticFormat: cfg.Format,
target.ElasticURL: cfg.URL.String(),
@ -100,17 +97,16 @@ func SetNotifyES(s config.Config, esName string, cfg target.ElasticsearchArgs) e
// SetNotifyRedis - helper for config migration from older config.
func SetNotifyRedis(s config.Config, redisName string, cfg target.RedisArgs) error {
if !cfg.Enable {
return nil
}
if err := cfg.Validate(); err != nil {
return err
}
s[config.NotifyRedisSubSys][redisName] = config.KVS{
config.State: func() string {
if cfg.Enable {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
config.Comment: "Settings for Redis notification, after migrating config",
target.RedisFormat: cfg.Format,
target.RedisAddress: cfg.Addr.String(),
@ -125,17 +121,16 @@ func SetNotifyRedis(s config.Config, redisName string, cfg target.RedisArgs) err
// SetNotifyWebhook - helper for config migration from older config.
func SetNotifyWebhook(s config.Config, whName string, cfg target.WebhookArgs) error {
if !cfg.Enable {
return nil
}
if err := cfg.Validate(); err != nil {
return err
}
s[config.NotifyWebhookSubSys][whName] = config.KVS{
config.State: func() string {
if cfg.Enable {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
config.Comment: "Settings for Webhook notification, after migrating config",
target.WebhookEndpoint: cfg.Endpoint.String(),
target.WebhookAuthToken: cfg.AuthToken,
@ -148,17 +143,16 @@ func SetNotifyWebhook(s config.Config, whName string, cfg target.WebhookArgs) er
// SetNotifyPostgres - helper for config migration from older config.
func SetNotifyPostgres(s config.Config, psqName string, cfg target.PostgreSQLArgs) error {
if !cfg.Enable {
return nil
}
if err := cfg.Validate(); err != nil {
return err
}
s[config.NotifyPostgresSubSys][psqName] = config.KVS{
config.State: func() string {
if cfg.Enable {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
config.Comment: "Settings for Postgres notification, after migrating config",
target.PostgresFormat: cfg.Format,
target.PostgresConnectionString: cfg.ConnectionString,
@ -177,21 +171,20 @@ func SetNotifyPostgres(s config.Config, psqName string, cfg target.PostgreSQLArg
// SetNotifyNSQ - helper for config migration from older config.
func SetNotifyNSQ(s config.Config, nsqName string, cfg target.NSQArgs) error {
if !cfg.Enable {
return nil
}
if err := cfg.Validate(); err != nil {
return err
}
s[config.NotifyNSQSubSys][nsqName] = config.KVS{
config.State: func() string {
if cfg.Enable {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
config.Comment: "Settings for NSQ notification, after migrating config",
target.NSQAddress: cfg.NSQDAddress.String(),
target.NSQTopic: cfg.Topic,
target.NSQTLSEnable: config.FormatBool(cfg.TLS.Enable),
target.NSQTLS: config.FormatBool(cfg.TLS.Enable),
target.NSQTLSSkipVerify: config.FormatBool(cfg.TLS.SkipVerify),
target.NSQQueueDir: cfg.QueueDir,
target.NSQQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
@ -202,17 +195,16 @@ func SetNotifyNSQ(s config.Config, nsqName string, cfg target.NSQArgs) error {
// SetNotifyNATS - helper for config migration from older config.
func SetNotifyNATS(s config.Config, natsName string, cfg target.NATSArgs) error {
if !cfg.Enable {
return nil
}
if err := cfg.Validate(); err != nil {
return err
}
s[config.NotifyNATSSubSys][natsName] = config.KVS{
config.State: func() string {
if cfg.Enable {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
config.Comment: "Settings for NATS notification, after migrating config",
target.NATSAddress: cfg.Address.String(),
target.NATSSubject: cfg.Subject,
@ -223,7 +215,7 @@ func SetNotifyNATS(s config.Config, natsName string, cfg target.NATSArgs) error
target.NATSPingInterval: strconv.FormatInt(cfg.PingInterval, 10),
target.NATSQueueDir: cfg.QueueDir,
target.NATSQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
target.NATSStreamingEnable: func() string {
target.NATSStreaming: func() string {
if cfg.Streaming.Enable {
return config.StateOn
}
@ -239,17 +231,16 @@ func SetNotifyNATS(s config.Config, natsName string, cfg target.NATSArgs) error
// SetNotifyMySQL - helper for config migration from older config.
func SetNotifyMySQL(s config.Config, sqlName string, cfg target.MySQLArgs) error {
if !cfg.Enable {
return nil
}
if err := cfg.Validate(); err != nil {
return err
}
s[config.NotifyMySQLSubSys][sqlName] = config.KVS{
config.State: func() string {
if cfg.Enable {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
config.Comment: "Settings for MySQL notification, after migrating config",
target.MySQLFormat: cfg.Format,
target.MySQLDSNString: cfg.DSN,
@ -268,17 +259,16 @@ func SetNotifyMySQL(s config.Config, sqlName string, cfg target.MySQLArgs) error
// SetNotifyMQTT - helper for config migration from older config.
func SetNotifyMQTT(s config.Config, mqttName string, cfg target.MQTTArgs) error {
if !cfg.Enable {
return nil
}
if err := cfg.Validate(); err != nil {
return err
}
s[config.NotifyMQTTSubSys][mqttName] = config.KVS{
config.State: func() string {
if cfg.Enable {
return config.StateOn
}
return config.StateOff
}(),
config.State: config.StateOn,
config.Comment: "Settings for MQTT notification, after migrating config",
target.MqttBroker: cfg.Broker.String(),
target.MqttTopic: cfg.Topic,

View file

@ -347,8 +347,8 @@ var (
target.KafkaSASLUsername: "",
target.KafkaSASLPassword: "",
target.KafkaTLSClientAuth: "0",
target.KafkaSASLEnable: config.StateOff,
target.KafkaTLSEnable: config.StateOff,
target.KafkaSASL: config.StateOff,
target.KafkaTLS: config.StateOff,
target.KafkaTLSSkipVerify: config.StateOff,
target.KafkaQueueLimit: "0",
target.KafkaQueueDir: "",
@ -427,7 +427,7 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs
QueueLimit: queueLimit,
}
tlsEnableEnv := target.EnvKafkaTLSEnable
tlsEnableEnv := target.EnvKafkaTLS
if k != config.Default {
tlsEnableEnv = tlsEnableEnv + config.Default + k
}
@ -435,7 +435,7 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs
if k != config.Default {
tlsSkipVerifyEnv = tlsSkipVerifyEnv + config.Default + k
}
kafkaArgs.TLS.Enable = env.Get(tlsEnableEnv, kv.Get(target.KafkaTLSEnable)) == config.StateOn
kafkaArgs.TLS.Enable = env.Get(tlsEnableEnv, kv.Get(target.KafkaTLS)) == config.StateOn
kafkaArgs.TLS.SkipVerify = env.Get(tlsSkipVerifyEnv, kv.Get(target.KafkaTLSSkipVerify)) == config.StateOn
kafkaArgs.TLS.ClientAuth = tls.ClientAuthType(clientAuth)
@ -451,7 +451,7 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs
if k != config.Default {
saslPasswordEnv = saslPasswordEnv + config.Default + k
}
kafkaArgs.SASL.Enable = env.Get(saslEnableEnv, kv.Get(target.KafkaSASLEnable)) == config.StateOn
kafkaArgs.SASL.Enable = env.Get(saslEnableEnv, kv.Get(target.KafkaSASL)) == config.StateOn
kafkaArgs.SASL.User = env.Get(saslUsernameEnv, kv.Get(target.KafkaSASLUsername))
kafkaArgs.SASL.Password = env.Get(saslPasswordEnv, kv.Get(target.KafkaSASLPassword))
@ -711,7 +711,7 @@ var (
target.NATSPingInterval: "0",
target.NATSQueueLimit: "0",
target.NATSQueueDir: "",
target.NATSStreamingEnable: config.StateOff,
target.NATSStreaming: config.StateOff,
target.NATSStreamingAsync: config.StateOff,
target.NATSStreamingMaxPubAcksInFlight: "0",
target.NATSStreamingClusterID: "",
@ -808,12 +808,12 @@ func GetNotifyNATS(natsKVS map[string]config.KVS) (map[string]target.NATSArgs, e
QueueLimit: queueLimit,
}
streamingEnableEnv := target.EnvNATSStreamingEnable
streamingEnableEnv := target.EnvNATSStreaming
if k != config.Default {
streamingEnableEnv = streamingEnableEnv + config.Default + k
}
streamingEnabled := env.Get(streamingEnableEnv, kv.Get(target.NATSStreamingEnable)) == config.StateOn
streamingEnabled := env.Get(streamingEnableEnv, kv.Get(target.NATSStreaming)) == config.StateOn
if streamingEnabled {
asyncEnv := target.EnvNATSStreamingAsync
if k != config.Default {
@ -854,7 +854,7 @@ var (
config.Comment: "Default settings for NSQ notification",
target.NSQAddress: "",
target.NSQTopic: "",
target.NSQTLSEnable: config.StateOff,
target.NSQTLS: config.StateOff,
target.NSQTLSSkipVerify: config.StateOff,
target.NSQQueueLimit: "0",
target.NSQQueueDir: "",
@ -886,7 +886,7 @@ func GetNotifyNSQ(nsqKVS map[string]config.KVS) (map[string]target.NSQArgs, erro
if err != nil {
return nil, err
}
tlsEnableEnv := target.EnvNSQTLSEnable
tlsEnableEnv := target.EnvNSQTLS
if k != config.Default {
tlsEnableEnv = tlsEnableEnv + config.Default + k
}
@ -920,7 +920,7 @@ func GetNotifyNSQ(nsqKVS map[string]config.KVS) (map[string]target.NSQArgs, erro
QueueDir: env.Get(queueDirEnv, kv.Get(target.NSQQueueDir)),
QueueLimit: queueLimit,
}
nsqArgs.TLS.Enable = env.Get(tlsEnableEnv, kv.Get(target.NSQTLSEnable)) == config.StateOn
nsqArgs.TLS.Enable = env.Get(tlsEnableEnv, kv.Get(target.NSQTLS)) == config.StateOn
nsqArgs.TLS.SkipVerify = env.Get(tlsSkipVerifyEnv, kv.Get(target.NSQTLSSkipVerify)) == config.StateOn
if err = nsqArgs.Validate(); err != nil {

View file

@ -28,23 +28,14 @@ const (
// SetPolicyOPAConfig - One time migration code needed, for migrating from older config to new for PolicyOPAConfig.
func SetPolicyOPAConfig(s config.Config, opaArgs Args) {
if opaArgs.URL == nil || opaArgs.URL.String() == "" {
// Do not enable if opaArgs was empty.
return
}
s[config.PolicyOPASubSys][config.Default] = config.KVS{
config.State: func() string {
if opaArgs.URL == nil {
return config.StateOff
}
if opaArgs.URL.String() == "" {
return config.StateOff
}
return config.StateOn
}(),
config.State: config.StateOn,
config.Comment: "Settings for OPA, after migrating config",
URL: func() string {
if opaArgs.URL != nil {
return opaArgs.URL.String()
}
return ""
}(),
AuthToken: opaArgs.AuthToken,
URL: opaArgs.URL.String(),
AuthToken: opaArgs.AuthToken,
}
}

View file

@ -22,15 +22,14 @@ import (
// SetStorageClass - One time migration code needed, for migrating from older config to new for StorageClass.
func SetStorageClass(s config.Config, cfg Config) {
if len(cfg.Standard.String()) == 0 && len(cfg.RRS.String()) == 0 {
// Do not enable storage-class if no settings found.
return
}
s[config.StorageClassSubSys][config.Default] = config.KVS{
ClassStandard: cfg.Standard.String(),
ClassRRS: cfg.RRS.String(),
config.State: func() string {
if len(cfg.Standard.String()) > 0 || len(cfg.RRS.String()) > 0 {
return config.StateOn
}
return config.StateOff
}(),
ClassStandard: cfg.Standard.String(),
ClassRRS: cfg.RRS.String(),
config.State: config.StateOn,
config.Comment: "Settings for StorageClass, after migrating config",
}
}

View file

@ -222,6 +222,9 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
stateBool, err := config.ParseBool(env.Get(EnvStorageClass, kvs.Get(config.State)))
if err != nil {
if kvs.Empty() {
return cfg, nil
}
return cfg, err
}
ssc := env.Get(StandardEnv, kvs.Get(ClassStandard))

View file

@ -149,8 +149,12 @@ func LookupConfig(kvs config.KVS) (KMSConfig, error) {
if kmsCfg.Vault.Enabled {
return kmsCfg, nil
}
stateBool, err := config.ParseBool(env.Get(EnvKMSVaultState, kvs.Get(config.State)))
if err != nil {
if kvs.Empty() {
return kmsCfg, nil
}
return kmsCfg, err
}
if !stateBool {

View file

@ -79,6 +79,9 @@ const (
// SetKMSConfig helper to migrate from older KMSConfig to new KV.
func SetKMSConfig(s config.Config, cfg KMSConfig) {
if cfg.Vault.Endpoint == "" {
return
}
s[config.KmsVaultSubSys][config.Default] = config.KVS{
KMSVaultEndpoint: cfg.Vault.Endpoint,
KMSVaultCAPath: cfg.Vault.CAPath,
@ -93,13 +96,8 @@ func SetKMSConfig(s config.Config, cfg KMSConfig) {
KMSVaultKeyName: cfg.Vault.Key.Name,
KMSVaultKeyVersion: strconv.Itoa(cfg.Vault.Key.Version),
KMSVaultNamespace: cfg.Vault.Namespace,
config.State: func() string {
if cfg.Vault.Endpoint != "" {
return config.StateOn
}
return config.StateOff
}(),
config.Comment: "Settings for KMS Vault, after migrating config",
config.State: config.StateOn,
config.Comment: "Settings for KMS Vault, after migrating config",
}
}

View file

@ -24,6 +24,7 @@ import (
"sync"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
@ -651,7 +652,12 @@ func (sys *IAMSys) SetUserStatus(accessKey string, status madmin.AccountStatus)
uinfo := newUserIdentity(auth.Credentials{
AccessKey: accessKey,
SecretKey: cred.SecretKey,
Status: string(status),
Status: func() string {
if status == madmin.AccountEnabled {
return config.StateOn
}
return config.StateOff
}(),
})
if err := sys.store.saveUserIdentity(accessKey, false, uinfo); err != nil {
return err

View file

@ -47,11 +47,11 @@ const (
Endpoint = "endpoint"
AuthToken = "auth_token"
EnvLoggerHTTPEndpoint = "MINIO_LOGGER_HTTP_ENDPOINT"
EnvLoggerHTTPAuthToken = "MINIO_LOGGER_HTTP_AUTH_TOKEN"
EnvLoggerWebhookEndpoint = "MINIO_LOGGER_WEBHOOK_ENDPOINT"
EnvLoggerWebhookAuthToken = "MINIO_LOGGER_WEBHOOK_AUTH_TOKEN"
EnvLoggerHTTPAuditEndpoint = "MINIO_LOGGER_HTTP_AUDIT_ENDPOINT"
EnvLoggerHTTPAuditAuthToken = "MINIO_LOGGER_HTTP_AUDIT_AUTH_TOKEN"
EnvAuditWebhookEndpoint = "MINIO_AUDIT_WEBHOOK_ENDPOINT"
EnvAuditWebhookAuthToken = "MINIO_AUDIT_WEBHOOK_AUTH_TOKEN"
)
// Default KVS for loggerHTTP and loggerAuditHTTP
@ -98,21 +98,21 @@ func NewConfig() Config {
func LookupConfig(scfg config.Config) (Config, error) {
cfg := NewConfig()
envs := env.List(EnvLoggerHTTPEndpoint)
envs := env.List(EnvLoggerWebhookEndpoint)
var loggerTargets []string
for _, k := range envs {
target := strings.TrimPrefix(k, EnvLoggerHTTPEndpoint+config.Default)
if target == EnvLoggerHTTPEndpoint {
target := strings.TrimPrefix(k, EnvLoggerWebhookEndpoint+config.Default)
if target == EnvLoggerWebhookEndpoint {
target = config.Default
}
loggerTargets = append(loggerTargets, target)
}
var loggerAuditTargets []string
envs = env.List(EnvLoggerHTTPAuditEndpoint)
envs = env.List(EnvAuditWebhookEndpoint)
for _, k := range envs {
target := strings.TrimPrefix(k, EnvLoggerHTTPAuditEndpoint+config.Default)
if target == EnvLoggerHTTPAuditEndpoint {
target := strings.TrimPrefix(k, EnvAuditWebhookEndpoint+config.Default)
if target == EnvAuditWebhookEndpoint {
target = config.Default
}
loggerAuditTargets = append(loggerAuditTargets, target)
@ -128,10 +128,10 @@ func LookupConfig(scfg config.Config) (Config, error) {
loggerAuditTargets = append(loggerAuditTargets, target)
}
for starget, kv := range scfg[config.LoggerHTTPSubSys] {
subSysTarget := config.LoggerHTTPSubSys
for starget, kv := range scfg[config.LoggerWebhookSubSys] {
subSysTarget := config.LoggerWebhookSubSys
if starget != config.Default {
subSysTarget = config.LoggerHTTPSubSys + config.SubSystemSeparator + starget
subSysTarget = config.LoggerWebhookSubSys + config.SubSystemSeparator + starget
}
if err := config.CheckValidKeys(subSysTarget, kv, DefaultKVS); err != nil {
return cfg, err
@ -145,13 +145,13 @@ func LookupConfig(scfg config.Config) (Config, error) {
continue
}
endpointEnv := EnvLoggerHTTPEndpoint
endpointEnv := EnvLoggerWebhookEndpoint
if starget != config.Default {
endpointEnv = EnvLoggerHTTPEndpoint + config.Default + starget
endpointEnv = EnvLoggerWebhookEndpoint + config.Default + starget
}
authTokenEnv := EnvLoggerHTTPAuthToken
authTokenEnv := EnvLoggerWebhookAuthToken
if starget != config.Default {
authTokenEnv = EnvLoggerHTTPAuthToken + config.Default + starget
authTokenEnv = EnvLoggerWebhookAuthToken + config.Default + starget
}
cfg.HTTP[starget] = HTTP{
Enabled: true,
@ -160,10 +160,10 @@ func LookupConfig(scfg config.Config) (Config, error) {
}
}
for starget, kv := range scfg[config.LoggerHTTPAuditSubSys] {
subSysTarget := config.LoggerHTTPAuditSubSys
for starget, kv := range scfg[config.AuditWebhookSubSys] {
subSysTarget := config.AuditWebhookSubSys
if starget != config.Default {
subSysTarget = config.LoggerHTTPAuditSubSys + config.SubSystemSeparator + starget
subSysTarget = config.AuditWebhookSubSys + config.SubSystemSeparator + starget
}
if err := config.CheckValidKeys(subSysTarget, kv, DefaultAuditKVS); err != nil {
return cfg, err
@ -177,9 +177,9 @@ func LookupConfig(scfg config.Config) (Config, error) {
continue
}
endpointEnv := EnvLoggerHTTPAuditEndpoint
endpointEnv := EnvAuditWebhookEndpoint
if starget != config.Default {
endpointEnv = EnvLoggerHTTPAuditEndpoint + config.Default + starget
endpointEnv = EnvAuditWebhookEndpoint + config.Default + starget
}
legacyEndpointEnv := EnvAuditLoggerHTTPEndpoint
if starget != config.Default {
@ -189,9 +189,9 @@ func LookupConfig(scfg config.Config) (Config, error) {
if endpoint == "" {
endpoint = env.Get(endpointEnv, kv.Get(Endpoint))
}
authTokenEnv := EnvLoggerHTTPAuditAuthToken
authTokenEnv := EnvAuditWebhookAuthToken
if starget != config.Default {
authTokenEnv = EnvLoggerHTTPAuditAuthToken + config.Default + starget
authTokenEnv = EnvAuditWebhookAuthToken + config.Default + starget
}
cfg.Audit[starget] = HTTP{
Enabled: true,
@ -201,13 +201,13 @@ func LookupConfig(scfg config.Config) (Config, error) {
}
for _, target := range loggerTargets {
endpointEnv := EnvLoggerHTTPEndpoint
endpointEnv := EnvLoggerWebhookEndpoint
if target != config.Default {
endpointEnv = EnvLoggerHTTPEndpoint + config.Default + target
endpointEnv = EnvLoggerWebhookEndpoint + config.Default + target
}
authTokenEnv := EnvLoggerHTTPAuthToken
authTokenEnv := EnvLoggerWebhookAuthToken
if target != config.Default {
authTokenEnv = EnvLoggerHTTPAuthToken + config.Default + target
authTokenEnv = EnvLoggerWebhookAuthToken + config.Default + target
}
cfg.HTTP[target] = HTTP{
Enabled: true,
@ -217,9 +217,9 @@ func LookupConfig(scfg config.Config) (Config, error) {
}
for _, target := range loggerAuditTargets {
endpointEnv := EnvLoggerHTTPAuditEndpoint
endpointEnv := EnvLoggerWebhookEndpoint
if target != config.Default {
endpointEnv = EnvLoggerHTTPAuditEndpoint + config.Default + target
endpointEnv = EnvLoggerWebhookEndpoint + config.Default + target
}
legacyEndpointEnv := EnvAuditLoggerHTTPEndpoint
if target != config.Default {
@ -229,9 +229,9 @@ func LookupConfig(scfg config.Config) (Config, error) {
if endpoint == "" {
endpoint = env.Get(endpointEnv, "")
}
authTokenEnv := EnvLoggerHTTPAuditAuthToken
authTokenEnv := EnvLoggerWebhookAuthToken
if target != config.Default {
authTokenEnv = EnvLoggerHTTPAuditAuthToken + config.Default + target
authTokenEnv = EnvLoggerWebhookAuthToken + config.Default + target
}
cfg.Audit[target] = HTTP{
Enabled: true,

View file

@ -25,14 +25,13 @@ const (
// SetLoggerHTTPAudit - helper for migrating older config to newer KV format.
func SetLoggerHTTPAudit(scfg config.Config, k string, args HTTP) {
scfg[config.LoggerHTTPAuditSubSys][k] = config.KVS{
config.State: func() string {
if args.Enabled {
return config.StateOn
}
return config.StateOff
}(),
config.Comment: "Settings for HTTP Audit logging, after migrating config",
if !args.Enabled {
// Do not enable audit targets, if not enabled
return
}
scfg[config.AuditWebhookSubSys][k] = config.KVS{
config.State: config.StateOn,
config.Comment: "Settings for Webhook Audit logging, after migrating config",
Endpoint: args.Endpoint,
AuthToken: args.AuthToken,
}
@ -40,14 +39,14 @@ func SetLoggerHTTPAudit(scfg config.Config, k string, args HTTP) {
// SetLoggerHTTP helper for migrating older config to newer KV format.
func SetLoggerHTTP(scfg config.Config, k string, args HTTP) {
scfg[config.LoggerHTTPSubSys][k] = config.KVS{
config.State: func() string {
if args.Enabled {
return config.StateOn
}
return config.StateOff
}(),
config.Comment: "Settings for HTTP logging, after migrating config",
if !args.Enabled {
// Do not enable logger http targets, if not enabled
return
}
scfg[config.LoggerWebhookSubSys][k] = config.KVS{
config.State: config.StateOn,
config.Comment: "Settings for Webhook logging, after migrating config",
Endpoint: args.Endpoint,
AuthToken: args.AuthToken,
}

View file

@ -53,7 +53,7 @@ var GlobalFlags = []cli.Flag{
},
cli.BoolFlag{
Name: "compat",
Usage: "trade off performance for S3 compatibility",
Usage: "enable strict S3 compatibility by turning off certain performance optimizations",
},
}
@ -116,7 +116,6 @@ func newApp(name string) *cli.App {
// Register all commands.
registerCommand(serverCmd)
registerCommand(gatewayCmd)
registerCommand(versionCmd)
// Set up app.
cli.HelpFlag = cli.BoolFlag{
@ -128,8 +127,8 @@ func newApp(name string) *cli.App {
app.Name = name
app.Author = "MinIO, Inc."
app.Version = Version
app.Usage = "Cloud Storage Server."
app.Description = `MinIO is an Amazon S3 compatible object storage server. Use it to store photos, videos, VMs, containers, log files, or any blob of data as objects.`
app.Usage = "High Performance Object Storage"
app.Description = `Build high performance data infrastructure for machine learning, analytics and application data workloads with MinIO`
app.Flags = GlobalFlags
app.HideVersion = true // Hide `--version` flag, we already have `minio version`.
app.HideHelpCommand = true // Hide `help, h` command, we already have `minio --help`.

View file

@ -19,6 +19,7 @@ package cmd
import (
"context"
"encoding/gob"
"errors"
"fmt"
"net/http"
"os"
@ -193,8 +194,8 @@ func newAllSubsystems() {
func initSafeModeInit(buckets []BucketInfo) (err error) {
defer func() {
if err != nil {
switch err.(type) {
case config.Err:
var cerr config.Err
if errors.As(err, &cerr) {
return
}
// Enable logger
@ -377,7 +378,7 @@ func serverMain(ctx *cli.Context) {
initFederatorBackend(buckets, newObject)
}
initSafeModeInit(buckets)
logger.FatalIf(initSafeModeInit(buckets), "Unable to initialize server")
if globalCacheConfig.Enabled {
msg := color.RedBold("Disk caching is disabled in 'server' mode, 'caching' is only supported in gateway deployments")

View file

@ -199,6 +199,10 @@ func printServerCommonMsg(apiEndpoints []string) {
// Prints bucket notification configurations.
func printEventNotifiers() {
if globalNotificationSys == nil {
return
}
arns := globalNotificationSys.GetARNList()
if len(arns) == 0 {
return

View file

@ -1,51 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"github.com/minio/cli"
"github.com/minio/mc/pkg/console"
)
var versionCmd = cli.Command{
Name: "version",
Usage: "print version",
Action: mainVersion,
CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}}{{if .VisibleFlags}} [FLAGS]{{end}}
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
EXAMPLES:
1. Prints server version:
{{.Prompt}} {{.HelpName}}
`,
}
func mainVersion(ctx *cli.Context) {
if len(ctx.Args()) != 0 {
cli.ShowCommandHelpAndExit(ctx, "version", 1)
}
console.Println("Version: " + Version)
console.Println("Release-Tag: " + ReleaseTag)
console.Println("Commit-ID: " + CommitID)
}

View file

@ -15,12 +15,12 @@ HTTP target logs to a generic HTTP endpoint in JSON format and is not enabled by
Assuming `mc` is already [configured](https://docs.min.io/docs/minio-client-quickstart-guide.html)
```
mc admin config get myminio/ logger_http
logger_http:target1 auth_token="" endpoint="" state="off"
mc admin config get myminio/ logger_webhook
logger_webhook:target1 auth_token="" endpoint="" state="off"
```
```
mc admin config set myminio logger_http:target1 auth_token="" endpoint="http://endpoint:port/path" state="on"
mc admin config set myminio logger_webhook:target1 auth_token="" endpoint="http://endpoint:port/path" state="on"
mc admin service restart myminio
```
@ -28,21 +28,21 @@ NOTE: `http://endpoint:port/path` is a placeholder value to indicate the URL for
MinIO also honors environment variable for HTTP target logging as shown below, this setting will override the endpoint settings in the MinIO server config.
```
export MINIO_LOGGER_HTTP_STATE_target1="on"
export MINIO_LOGGER_HTTP_AUTH_TOKEN_target1="token"
export MINIO_LOGGER_HTTP_ENDPOINT_target1=http://localhost:8080/minio/logs
export MINIO_LOGGER_WEBHOOK_STATE_target1="on"
export MINIO_LOGGER_WEBHOOK_AUTH_TOKEN_target1="token"
export MINIO_LOGGER_WEBHOOK_ENDPOINT_target1=http://localhost:8080/minio/logs
minio server /mnt/data
```
## Audit Targets
Assuming `mc` is already [configured](https://docs.min.io/docs/minio-client-quickstart-guide.html)
```
mc admin config get myminio/ logger_http_audit
logger_http_audit:target1 auth_token="" endpoint="" state="off"
mc admin config get myminio/ audit_webhook
audit_webhook:target1 auth_token="" endpoint="" state="off"
```
```
mc admin config set myminio logger_http_audit:target1 auth_token="" endpoint="http://endpoint:port/path" state="on"
mc admin config set myminio audit_webhook:target1 auth_token="" endpoint="http://endpoint:port/path" state="on"
mc admin service restart myminio
```
@ -50,9 +50,9 @@ NOTE: `http://endpoint:port/path` is a placeholder value to indicate the URL for
MinIO also honors environment variable for HTTP target Audit logging as shown below, this setting will override the endpoint settings in the MinIO server config.
```
export MINIO_LOGGER_HTTP_AUDIT_STATE_target1="on"
export MINIO_LOGGER_HTTP_AUDIT_AUTH_TOKEN_target1="token"
export MINIO_LOGGER_HTTP_AUDIT_ENDPOINT_target1=http://localhost:8080/minio/logs
export MINIO_AUDIT_WEBHOOK_STATE_target1="on"
export MINIO_AUDIT_WEBHOOK_AUTH_TOKEN_target1="token"
export MINIO_AUDIT_WEBHOOK_ENDPOINT_target1=http://localhost:8080/minio/logs
minio server /mnt/data
```

6
pkg/env/env.go vendored
View file

@ -27,6 +27,12 @@ func SetEnvOn() {
envOff = false
}
// IsSet returns if the given env key is set.
func IsSet(key string) bool {
_, ok := os.LookupEnv(key)
return ok
}
// Get retrieves the value of the environment variable named
// by the key. If the variable is present in the environment the
// value (which may be empty) is returned. Otherwise it returns

View file

@ -39,10 +39,10 @@ const (
KafkaTopic = "topic"
KafkaQueueDir = "queue_dir"
KafkaQueueLimit = "queue_limit"
KafkaTLSEnable = "tls_enable"
KafkaTLS = "tls"
KafkaTLSSkipVerify = "tls_skip_verify"
KafkaTLSClientAuth = "tls_client_auth"
KafkaSASLEnable = "sasl_enable"
KafkaSASL = "sasl"
KafkaSASLUsername = "sasl_username"
KafkaSASLPassword = "sasl_password"
@ -51,10 +51,10 @@ const (
EnvKafkaTopic = "MINIO_NOTIFY_KAFKA_TOPIC"
EnvKafkaQueueDir = "MINIO_NOTIFY_KAFKA_QUEUE_DIR"
EnvKafkaQueueLimit = "MINIO_NOTIFY_KAFKA_QUEUE_LIMIT"
EnvKafkaTLSEnable = "MINIO_NOTIFY_KAFKA_TLS_ENABLE"
EnvKafkaTLS = "MINIO_NOTIFY_KAFKA_TLS"
EnvKafkaTLSSkipVerify = "MINIO_NOTIFY_KAFKA_TLS_SKIP_VERIFY"
EnvKafkaTLSClientAuth = "MINIO_NOTIFY_KAFKA_TLS_CLIENT_AUTH"
EnvKafkaSASLEnable = "MINIO_NOTIFY_KAFKA_SASL_ENABLE"
EnvKafkaSASLEnable = "MINIO_NOTIFY_KAFKA_SASL"
EnvKafkaSASLUsername = "MINIO_NOTIFY_KAFKA_SASL_USERNAME"
EnvKafkaSASLPassword = "MINIO_NOTIFY_KAFKA_SASL_PASSWORD"
)

View file

@ -43,7 +43,7 @@ const (
NATSQueueLimit = "queue_limit"
// Streaming constants
NATSStreamingEnable = "streaming_enable"
NATSStreaming = "streaming"
NATSStreamingClusterID = "streaming_cluster_id"
NATSStreamingAsync = "streaming_async"
NATSStreamingMaxPubAcksInFlight = "streaming_max_pub_acks_in_flight"
@ -60,7 +60,7 @@ const (
EnvNATSQueueLimit = "MINIO_NOTIFY_NATS_QUEUE_LIMIT"
// Streaming constants
EnvNATSStreamingEnable = "MINIO_NOTIFY_NATS_STREAMING_ENABLE"
EnvNATSStreaming = "MINIO_NOTIFY_NATS_STREAMING"
EnvNATSStreamingClusterID = "MINIO_NOTIFY_NATS_STREAMING_CLUSTER_ID"
EnvNATSStreamingAsync = "MINIO_NOTIFY_NATS_STREAMING_ASYNC"
EnvNATSStreamingMaxPubAcksInFlight = "MINIO_NOTIFY_NATS_STREAMING_MAX_PUB_ACKS_IN_FLIGHT"

View file

@ -35,7 +35,7 @@ import (
const (
NSQAddress = "nsqd_address"
NSQTopic = "topic"
NSQTLSEnable = "tls_enable"
NSQTLS = "tls"
NSQTLSSkipVerify = "tls_skip_verify"
NSQQueueDir = "queue_dir"
NSQQueueLimit = "queue_limit"
@ -43,7 +43,7 @@ const (
EnvNSQState = "MINIO_NOTIFY_NSQ"
EnvNSQAddress = "MINIO_NOTIFY_NSQ_NSQD_ADDRESS"
EnvNSQTopic = "MINIO_NOTIFY_NSQ_TOPIC"
EnvNSQTLSEnable = "MINIO_NOTIFY_NSQ_TLS_ENABLE"
EnvNSQTLS = "MINIO_NOTIFY_NSQ_TLS"
EnvNSQTLSSkipVerify = "MINIO_NOTIFY_NSQ_TLS_SKIP_VERIFY"
EnvNSQQueueDir = "MINIO_NOTIFY_NSQ_QUEUE_DIR"
EnvNSQQueueLimit = "MINIO_NOTIFY_NSQ_QUEUE_LIMIT"

View file

@ -18,11 +18,8 @@
package madmin
import (
"bufio"
"encoding/base64"
"net/http"
"net/url"
"strings"
)
// DelConfigKV - delete key from server config.
@ -54,33 +51,21 @@ func (adm *AdminClient) DelConfigKV(k string) (err error) {
// SetConfigKV - set key value config to server.
func (adm *AdminClient) SetConfigKV(kv string) (err error) {
bio := bufio.NewScanner(strings.NewReader(kv))
var s strings.Builder
var comment string
for bio.Scan() {
if bio.Text() == "" {
continue
}
if strings.HasPrefix(bio.Text(), KvComment) {
// Join multiple comments for each newline, separated by "\n"
comments := []string{comment, strings.TrimPrefix(bio.Text(), KvComment)}
comment = strings.Join(comments, KvNewline)
continue
}
s.WriteString(bio.Text())
if comment != "" {
s.WriteString(KvSpaceSeparator)
s.WriteString(commentKey)
s.WriteString(KvSeparator)
s.WriteString(KvDoubleQuote)
s.WriteString(base64.RawStdEncoding.EncodeToString([]byte(comment)))
s.WriteString(KvDoubleQuote)
}
s.WriteString(KvNewline)
comment = ""
targets, err := ParseSubSysTarget([]byte(kv))
if err != nil {
return err
}
econfigBytes, err := EncryptData(adm.secretAccessKey, []byte(s.String()))
for subSys, targetKV := range targets {
for target := range targetKV {
_, ok := targets[subSys][target][stateKey]
if !ok {
// If client asked for state preserve.
// otherwise implicitly add state to "on"
targets[subSys][target][stateKey] = stateOn
}
}
}
econfigBytes, err := EncryptData(adm.secretAccessKey, []byte(targets.String()))
if err != nil {
return err
}

View file

@ -20,11 +20,9 @@ package madmin
import (
"bufio"
"bytes"
"encoding/base64"
"fmt"
"strings"
"github.com/minio/minio/pkg/color"
"unicode"
)
// KVS each sub-system key, value
@ -34,9 +32,35 @@ type KVS map[string]string
type Targets map[string]map[string]KVS
const (
stateKey = "state"
commentKey = "comment"
stateOn = "on"
stateOff = "off"
)
func (kvs KVS) String() string {
var s strings.Builder
for k, v := range kvs {
// Do not need to print if state is on
if k == stateKey && v == stateOn {
continue
}
s.WriteString(k)
s.WriteString(KvSeparator)
spc := hasSpace(v)
if spc {
s.WriteString(KvDoubleQuote)
}
s.WriteString(v)
if spc {
s.WriteString(KvDoubleQuote)
}
s.WriteString(KvSpaceSeparator)
}
return s.String()
}
// Count - returns total numbers of target
func (t Targets) Count() int {
var count int
@ -48,44 +72,28 @@ func (t Targets) Count() int {
return count
}
func hasSpace(s string) bool {
for _, r := range s {
if unicode.IsSpace(r) {
return true
}
}
return false
}
func (t Targets) String() string {
var s strings.Builder
count := t.Count()
for subSys, targetKV := range t {
for target, kv := range targetKV {
count--
c := kv[commentKey]
data, err := base64.RawStdEncoding.DecodeString(c)
if err == nil {
c = string(data)
}
for _, c1 := range strings.Split(c, KvNewline) {
if c1 == "" {
continue
}
s.WriteString(color.YellowBold(KvComment))
s.WriteString(KvSpaceSeparator)
s.WriteString(color.BlueBold(strings.TrimSpace(c1)))
s.WriteString(KvNewline)
}
s.WriteString(subSys)
if target != Default {
s.WriteString(SubSystemSeparator)
s.WriteString(target)
}
s.WriteString(KvSpaceSeparator)
for k, v := range kv {
// Comment is already printed, do not print it here.
if k == commentKey {
continue
}
s.WriteString(k)
s.WriteString(KvSeparator)
s.WriteString(KvDoubleQuote)
s.WriteString(v)
s.WriteString(KvDoubleQuote)
s.WriteString(KvSpaceSeparator)
}
s.WriteString(kv.String())
if (len(t) > 1 || len(targetKV) > 1) && count > 0 {
s.WriteString(KvNewline)
s.WriteString(KvNewline)
@ -100,7 +108,6 @@ const (
SubSystemSeparator = `:`
KvSeparator = `=`
KvSpaceSeparator = ` `
KvComment = `#`
KvNewline = "\n"
KvDoubleQuote = `"`
KvSingleQuote = `'`