admin: ServerInfo returns info without object layer initialized (#11142)

This commit is contained in:
Anis Elleuch 2020-12-21 18:35:19 +01:00 committed by GitHub
parent 3e792ae2a2
commit 2ecaab55a6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 181 additions and 139 deletions

View file

@ -1497,22 +1497,13 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
defer logger.AuditLog(w, r, "ServerInfo", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerInfoAdminAction)
if objectAPI == nil {
// Validate request signature.
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.ServerInfoAdminAction, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return
}
buckets := madmin.Buckets{}
objects := madmin.Objects{}
usage := madmin.Usage{}
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err == nil {
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
}
vault := fetchVaultStatus()
ldap := madmin.LDAP{}
@ -1534,31 +1525,54 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
// Get the notification target info
notifyTarget := fetchLambdaInfo()
// Fetching the Storage information, ignore any errors.
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
var backend interface{}
if storageInfo.Backend.Type == BackendType(madmin.Erasure) {
backend = madmin.ErasureBackend{
Type: madmin.ErasureType,
OnlineDisks: storageInfo.Backend.OnlineDisks.Sum(),
OfflineDisks: storageInfo.Backend.OfflineDisks.Sum(),
StandardSCData: storageInfo.Backend.StandardSCData,
StandardSCParity: storageInfo.Backend.StandardSCParity,
RRSCData: storageInfo.Backend.RRSCData,
RRSCParity: storageInfo.Backend.RRSCParity,
}
} else {
backend = madmin.FSBackend{
Type: madmin.FsType,
}
}
mode := "online"
server := getLocalServerProperty(globalEndpoints, r)
servers := globalNotificationSys.ServerInfo()
servers = append(servers, server)
var backend interface{}
mode := madmin.ObjectLayerInitializing
buckets := madmin.Buckets{}
objects := madmin.Objects{}
usage := madmin.Usage{}
objectAPI := newObjectLayerFn()
if objectAPI != nil {
mode = madmin.ObjectLayerOnline
// Load data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err == nil {
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
}
// Fetching the backend information
backendInfo := objectAPI.BackendInfo()
if backendInfo.Type == BackendType(madmin.Erasure) {
// Calculate the number of online/offline disks of all nodes
var allDisks []madmin.Disk
for _, s := range servers {
allDisks = append(allDisks, s.Disks...)
}
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(allDisks)
backend = madmin.ErasureBackend{
Type: madmin.ErasureType,
OnlineDisks: onlineDisks.Sum(),
OfflineDisks: offlineDisks.Sum(),
StandardSCData: backendInfo.StandardSCData,
StandardSCParity: backendInfo.StandardSCParity,
RRSCData: backendInfo.RRSCData,
RRSCParity: backendInfo.RRSCParity,
}
} else {
backend = madmin.FSBackend{
Type: madmin.FsType,
}
}
}
domain := globalDomainNames
services := madmin.Services{
Vault: vault,
@ -1568,25 +1582,6 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
Notifications: notifyTarget,
}
// find all disks which belong to each respective endpoints
for i := range servers {
for _, disk := range storageInfo.Disks {
if strings.Contains(disk.Endpoint, servers[i].Endpoint) {
servers[i].Disks = append(servers[i].Disks, disk)
}
}
}
// add all the disks local to this server.
for _, disk := range storageInfo.Disks {
if disk.DrivePath == "" && disk.Endpoint == "" {
continue
}
if disk.Endpoint == disk.DrivePath {
servers[len(servers)-1].Disks = append(servers[len(servers)-1].Disks, disk)
}
}
infoMsg := madmin.InfoMessage{
Mode: mode,
Domain: domain,

View file

@ -25,6 +25,7 @@ import (
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
var localEndpoints Endpoints
addr := r.Host
if globalIsDistErasure {
addr = GetLocalPeer(endpointServerPools)
@ -39,6 +40,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
if endpoint.IsLocal {
// Only proceed for local endpoints
network[nodeName] = "online"
localEndpoints = append(localEndpoints, endpoint)
continue
}
_, present := network[nodeName]
@ -52,6 +54,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
}
}
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
defer closeStorageDisks(localDisks)
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
return madmin.ServerProperties{
State: "ok",
Endpoint: addr,
@ -59,5 +66,6 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
Version: Version,
CommitID: CommitID,
Network: network,
Disks: storageInfo.Disks,
}
}

View file

@ -353,6 +353,14 @@ func (endpoints Endpoints) GetString(i int) string {
return endpoints[i].String()
}
// GetAllStrings - returns allstring of all endpoints
func (endpoints Endpoints) GetAllStrings() (all []string) {
for _, e := range endpoints {
all = append(all, e.String())
}
return
}
func hostResolveToLocalhost(endpoint Endpoint) bool {
hostIPs, err := getHostIP(endpoint.Hostname())
if err != nil {

View file

@ -273,9 +273,24 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error {
return nil
}
func (z *erasureServerPools) BackendInfo() (b BackendInfo) {
b.Type = BackendErasure
scParity := globalStorageClass.GetParityForSC(storageclass.STANDARD)
if scParity == 0 {
scParity = z.SetDriveCount() / 2
}
b.StandardSCData = z.SetDriveCount() - scParity
b.StandardSCParity = scParity
rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS)
b.RRSCData = z.SetDriveCount() - rrSCParity
b.RRSCParity = rrSCParity
return
}
func (z *erasureServerPools) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) {
var storageInfo StorageInfo
storageInfo.Backend.Type = BackendErasure
storageInfos := make([]StorageInfo, len(z.serverPools))
storageInfosErrs := make([][]error, len(z.serverPools))
@ -291,23 +306,11 @@ func (z *erasureServerPools) StorageInfo(ctx context.Context, local bool) (Stora
// Wait for the go routines.
g.Wait()
storageInfo.Backend = z.BackendInfo()
for _, lstorageInfo := range storageInfos {
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks.Merge(lstorageInfo.Backend.OnlineDisks)
storageInfo.Backend.OfflineDisks = storageInfo.Backend.OfflineDisks.Merge(lstorageInfo.Backend.OfflineDisks)
}
scParity := globalStorageClass.GetParityForSC(storageclass.STANDARD)
if scParity == 0 {
scParity = z.SetDriveCount() / 2
}
storageInfo.Backend.StandardSCData = z.SetDriveCount() - scParity
storageInfo.Backend.StandardSCParity = scParity
rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS)
storageInfo.Backend.RRSCData = z.SetDriveCount() - rrSCParity
storageInfo.Backend.RRSCParity = rrSCParity
var errs []error
for i := range z.serverPools {
errs = append(errs, storageInfosErrs[i]...)

View file

@ -491,8 +491,6 @@ func (s *erasureSets) StorageUsageInfo(ctx context.Context) StorageInfo {
for _, lstorageInfo := range storageInfos {
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks.Merge(lstorageInfo.Backend.OnlineDisks)
storageInfo.Backend.OfflineDisks = storageInfo.Backend.OfflineDisks.Merge(lstorageInfo.Backend.OfflineDisks)
}
return storageInfo
@ -530,8 +528,6 @@ func (s *erasureSets) StorageInfo(ctx context.Context, local bool) (StorageInfo,
for _, lstorageInfo := range storageInfos {
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks.Merge(lstorageInfo.Backend.OnlineDisks)
storageInfo.Backend.OfflineDisks = storageInfo.Backend.OfflineDisks.Merge(lstorageInfo.Backend.OfflineDisks)
}
if local {

View file

@ -109,13 +109,12 @@ func diskErrToDriveState(err error) (state string) {
return
}
// getDisksInfo - fetch disks info across all other storage API.
func getDisksInfo(disks []StorageAPI, endpoints []string) (disksInfo []madmin.Disk, errs []error, onlineDisks, offlineDisks madmin.BackendDisks) {
disksInfo = make([]madmin.Disk, len(disks))
func getOnlineOfflineDisksStats(disksInfo []madmin.Disk) (onlineDisks, offlineDisks madmin.BackendDisks) {
onlineDisks = make(madmin.BackendDisks)
offlineDisks = make(madmin.BackendDisks)
for _, ep := range endpoints {
for _, disk := range disksInfo {
ep := disk.Endpoint
if _, ok := offlineDisks[ep]; !ok {
offlineDisks[ep] = 0
}
@ -124,6 +123,47 @@ func getDisksInfo(disks []StorageAPI, endpoints []string) (disksInfo []madmin.Di
}
}
// Wait for the routines.
for _, disk := range disksInfo {
ep := disk.Endpoint
state := disk.State
if state != madmin.DriveStateOk && state != madmin.DriveStateUnformatted {
offlineDisks[ep]++
continue
}
onlineDisks[ep]++
}
rootDiskCount := 0
for _, di := range disksInfo {
if di.RootDisk {
rootDiskCount++
}
}
// Count offline disks as well to ensure consistent
// reportability of offline drives on local setups.
if len(disksInfo) == (rootDiskCount + offlineDisks.Sum()) {
// Success.
return onlineDisks, offlineDisks
}
// Root disk should be considered offline
for i := range disksInfo {
ep := disksInfo[i].Endpoint
if disksInfo[i].RootDisk {
offlineDisks[ep]++
onlineDisks[ep]--
}
}
return onlineDisks, offlineDisks
}
// getDisksInfo - fetch disks info across all other storage API.
func getDisksInfo(disks []StorageAPI, endpoints []string) (disksInfo []madmin.Disk, errs []error) {
disksInfo = make([]madmin.Disk, len(disks))
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
@ -157,46 +197,12 @@ func getDisksInfo(disks []StorageAPI, endpoints []string) (disksInfo []madmin.Di
}, index)
}
errs = g.Wait()
// Wait for the routines.
for i, diskInfoErr := range errs {
ep := disksInfo[i].Endpoint
if diskInfoErr != nil && !errors.Is(diskInfoErr, errUnformattedDisk) {
offlineDisks[ep]++
continue
}
onlineDisks[ep]++
}
rootDiskCount := 0
for _, di := range disksInfo {
if di.RootDisk {
rootDiskCount++
}
}
// Count offline disks as well to ensure consistent
// reportability of offline drives on local setups.
if len(disksInfo) == (rootDiskCount + offlineDisks.Sum()) {
// Success.
return disksInfo, errs, onlineDisks, offlineDisks
}
// Root disk should be considered offline
for i := range disksInfo {
ep := disksInfo[i].Endpoint
if disksInfo[i].RootDisk {
offlineDisks[ep]++
onlineDisks[ep]--
}
}
return disksInfo, errs, onlineDisks, offlineDisks
return disksInfo, g.Wait()
}
// Get an aggregated storage info across all disks.
func getStorageInfo(disks []StorageAPI, endpoints []string) (StorageInfo, []error) {
disksInfo, errs, onlineDisks, offlineDisks := getDisksInfo(disks, endpoints)
disksInfo, errs := getDisksInfo(disks, endpoints)
// Sort so that the first element is the smallest.
sort.Sort(byDiskTotal(disksInfo))
@ -206,9 +212,6 @@ func getStorageInfo(disks []StorageAPI, endpoints []string) (StorageInfo, []erro
}
storageInfo.Backend.Type = BackendErasure
storageInfo.Backend.OnlineDisks = onlineDisks
storageInfo.Backend.OfflineDisks = offlineDisks
return storageInfo, errs
}

View file

@ -200,6 +200,11 @@ func (fs *FSObjects) Shutdown(ctx context.Context) error {
return fsRemoveAll(ctx, pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID))
}
// BackendInfo - returns backend information
func (fs *FSObjects) BackendInfo() BackendInfo {
return BackendInfo{Type: BackendFS}
}
// StorageInfo - returns underlying storage statistics.
func (fs *FSObjects) StorageInfo(ctx context.Context, _ bool) (StorageInfo, []error) {

View file

@ -34,6 +34,11 @@ import (
// GatewayUnsupported list of unsupported call stubs for gateway.
type GatewayUnsupported struct{}
// BackendInfo returns the underlying backend information
func (a GatewayUnsupported) BackendInfo() BackendInfo {
return BackendInfo{Type: BackendGateway}
}
// CrawlAndGetDataUsage - crawl is not implemented for gateway
func (a GatewayUnsupported) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
logger.CriticalIf(ctx, errors.New("not implemented"))

View file

@ -500,8 +500,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
// Fetch disk space info, ignore errors
storageInfo, _ := objLayer.StorageInfo(GlobalContext, true)
offlineDisks := storageInfo.Backend.OfflineDisks
onlineDisks := storageInfo.Backend.OnlineDisks
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks)
totalDisks := offlineDisks.Merge(onlineDisks)
// MinIO Offline Disks per node

View file

@ -42,26 +42,25 @@ const (
// Add your own backend.
)
// BackendInfo - contains info of the underlying backend
type BackendInfo struct {
// Represents various backend types, currently on FS, Erasure and Gateway
Type BackendType
// Following fields are only meaningful if BackendType is Gateway.
GatewayOnline bool
// Following fields are only meaningful if BackendType is Erasure.
StandardSCData int // Data disks for currently configured Standard storage class.
StandardSCParity int // Parity disks for currently configured Standard storage class.
RRSCData int // Data disks for currently configured Reduced Redundancy storage class.
RRSCParity int // Parity disks for currently configured Reduced Redundancy storage class.
}
// StorageInfo - represents total capacity of underlying storage.
type StorageInfo struct {
Disks []madmin.Disk
// Backend type.
Backend struct {
// Represents various backend types, currently on FS, Erasure and Gateway
Type BackendType
// Following fields are only meaningful if BackendType is Gateway.
GatewayOnline bool
// Following fields are only meaningful if BackendType is Erasure.
OnlineDisks madmin.BackendDisks // Online disks during server startup.
OfflineDisks madmin.BackendDisks // Offline disks during server startup.
StandardSCData int // Data disks for currently configured Standard storage class.
StandardSCParity int // Parity disks for currently configured Standard storage class.
RRSCData int // Data disks for currently configured Reduced Redundancy storage class.
RRSCParity int // Parity disks for currently configured Reduced Redundancy storage class.
}
Disks []madmin.Disk
Backend BackendInfo
}
// objectHistogramInterval is an interval that will be

View file

@ -79,6 +79,8 @@ type ObjectLayer interface {
// Storage operations.
Shutdown(context.Context) error
CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error
BackendInfo() BackendInfo
StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) // local queries only local disks
// Bucket operations.

View file

@ -205,12 +205,13 @@ func printObjectAPIMsg() {
func getStorageInfoMsg(storageInfo StorageInfo) string {
var msg string
var mcMessage string
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks)
if storageInfo.Backend.Type == BackendErasure {
if storageInfo.Backend.OfflineDisks.Sum() > 0 {
if offlineDisks.Sum() > 0 {
mcMessage = "Use `mc admin info` to look for latest server/disk info\n"
}
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks.Sum(), storageInfo.Backend.OfflineDisks.Sum())
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", onlineDisks.Sum(), offlineDisks.Sum())
msg += color.Blue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo)
if len(mcMessage) > 0 {
msg = fmt.Sprintf("%s %s", mcMessage, msg)

View file

@ -28,9 +28,17 @@ import (
// Tests if we generate storage info.
func TestStorageInfoMsg(t *testing.T) {
infoStorage := StorageInfo{}
infoStorage.Disks = []madmin.Disk{
{Endpoint: "http://127.0.0.1:9000/data/1/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9000/data/2/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9000/data/3/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9000/data/4/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9001/data/1/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9001/data/2/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9001/data/3/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9001/data/4/", State: madmin.DriveStateOffline},
}
infoStorage.Backend.Type = BackendErasure
infoStorage.Backend.OnlineDisks = madmin.BackendDisks{"127.0.0.1:9000": 4, "127.0.0.1:9001": 3}
infoStorage.Backend.OfflineDisks = madmin.BackendDisks{"127.0.0.1:9000": 0, "127.0.0.1:9001": 1}
if msg := getStorageInfoMsg(infoStorage); !strings.Contains(msg, "7 Online, 1 Offline") {
t.Fatal("Unexpected storage info message, found:", msg)

View file

@ -39,6 +39,16 @@ const (
// Add your own backend.
)
// ObjectLayerState - represents the status of the object layer
type ObjectLayerState string
const (
// ObjectLayerInitializing indicates that the object layer is still in initialization phase
ObjectLayerInitializing = ObjectLayerState("initializing")
// ObjectLayerOnline indicates that the object layer is ready
ObjectLayerOnline = ObjectLayerState("online")
)
// StorageInfo - represents total capacity of underlying storage.
type StorageInfo struct {
Disks []Disk
@ -163,7 +173,7 @@ func (adm *AdminClient) DataUsageInfo(ctx context.Context) (DataUsageInfo, error
// InfoMessage container to hold server admin related information.
type InfoMessage struct {
Mode string `json:"mode,omitempty"`
Mode ObjectLayerState `json:"mode,omitempty"`
Domain []string `json:"domain,omitempty"`
Region string `json:"region,omitempty"`
SQSARN []string `json:"sqsARN,omitempty"`