add additional metrics per disk API latency, API call counts #11250)

```
mc admin info --json
```

provides these details, for now, we shall eventually 
expose this at Prometheus level eventually. 

Co-authored-by: Harshavardhana <harsha@minio.io>
This commit is contained in:
Anis Elleuch 2021-03-17 04:06:57 +01:00 committed by GitHub
parent b379ca3bb0
commit 0eb146e1b2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 655 additions and 140 deletions

View file

@ -1559,6 +1559,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
objectAPI := newObjectLayerFn()
if objectAPI != nil {
mode = madmin.ItemOnline
// Load data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err == nil {

View file

@ -1618,7 +1618,7 @@ func (z *erasureServerPools) HealObject(ctx context.Context, bucket, object, ver
}
}
// GetMetrics - no op
// GetMetrics - returns metrics of local disks
func (z *erasureServerPools) GetMetrics(ctx context.Context) (*BackendMetrics, error) {
logger.LogIf(ctx, NotImplemented{})
return &BackendMetrics{}, NotImplemented{}

View file

@ -204,6 +204,16 @@ func getDisksInfo(disks []StorageAPI, endpoints []string) (disksInfo []madmin.Di
di.HealInfo = &hd
}
}
di.Metrics = &madmin.DiskMetrics{
APILatencies: make(map[string]string),
APICalls: make(map[string]uint64),
}
for k, v := range info.Metrics.APILatencies {
di.Metrics.APILatencies[k] = v
}
for k, v := range info.Metrics.APICalls {
di.Metrics.APICalls[k] = v
}
if info.Total > 0 {
di.Utilization = float64(info.Used / info.Total * 100)
}

View file

@ -25,7 +25,6 @@ import (
"sort"
"strconv"
"strings"
"sync/atomic"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
@ -57,11 +56,6 @@ type WalkDirOptions struct {
// WalkDir will traverse a directory and return all entries found.
// On success a sorted meta cache stream will be returned.
func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) error {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
// Verify if volume is valid and it exists.
volumeDir, err := s.getVolDir(opts.Bucket)
if err != nil {
@ -266,6 +260,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
}
func (p *xlStorageDiskIDCheck) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) error {
defer p.updateStorageMetrics(storageMetricWalkDir)()
if err := p.checkDiskStale(); err != nil {
return err
}

View file

@ -69,7 +69,7 @@ func newStorageAPIWithoutHealthCheck(endpoint Endpoint) (storage StorageAPI, err
if err != nil {
return nil, err
}
return &xlStorageDiskIDCheck{storage: storage}, nil
return newXLStorageDiskIDCheck(storage), nil
}
return newStorageRESTClient(endpoint, false), nil
@ -82,7 +82,7 @@ func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) {
if err != nil {
return nil, err
}
return &xlStorageDiskIDCheck{storage: storage}, nil
return newXLStorageDiskIDCheck(storage), nil
}
return newStorageRESTClient(endpoint, true), nil

View file

@ -37,9 +37,18 @@ type DiskInfo struct {
Endpoint string
MountPath string
ID string
Metrics DiskMetrics
Error string // carries the error over the network
}
// DiskMetrics has the information about XL Storage APIs
// the number of calls of each API and the moving average of
// the duration of each API.
type DiskMetrics struct {
APILatencies map[string]string `json:"apiLatencies,omitempty"`
APICalls map[string]uint64 `json:"apiCalls,omitempty"`
}
// VolsInfo is a collection of volume(bucket) information
type VolsInfo []VolInfo

View file

@ -14,8 +14,8 @@ func (z *DiskInfo) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err)
return
}
if zb0001 != 11 {
err = msgp.ArrayError{Wanted: 11, Got: zb0001}
if zb0001 != 12 {
err = msgp.ArrayError{Wanted: 12, Got: zb0001}
return
}
z.Total, err = dc.ReadUint64()
@ -68,6 +68,11 @@ func (z *DiskInfo) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "ID")
return
}
err = z.Metrics.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
z.Error, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Error")
@ -78,8 +83,8 @@ func (z *DiskInfo) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable
func (z *DiskInfo) EncodeMsg(en *msgp.Writer) (err error) {
// array header, size 11
err = en.Append(0x9b)
// array header, size 12
err = en.Append(0x9c)
if err != nil {
return
}
@ -133,6 +138,11 @@ func (z *DiskInfo) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "ID")
return
}
err = z.Metrics.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
err = en.WriteString(z.Error)
if err != nil {
err = msgp.WrapError(err, "Error")
@ -144,8 +154,8 @@ func (z *DiskInfo) EncodeMsg(en *msgp.Writer) (err error) {
// MarshalMsg implements msgp.Marshaler
func (z *DiskInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// array header, size 11
o = append(o, 0x9b)
// array header, size 12
o = append(o, 0x9c)
o = msgp.AppendUint64(o, z.Total)
o = msgp.AppendUint64(o, z.Free)
o = msgp.AppendUint64(o, z.Used)
@ -156,6 +166,11 @@ func (z *DiskInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.AppendString(o, z.Endpoint)
o = msgp.AppendString(o, z.MountPath)
o = msgp.AppendString(o, z.ID)
o, err = z.Metrics.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
o = msgp.AppendString(o, z.Error)
return
}
@ -168,8 +183,8 @@ func (z *DiskInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err)
return
}
if zb0001 != 11 {
err = msgp.ArrayError{Wanted: 11, Got: zb0001}
if zb0001 != 12 {
err = msgp.ArrayError{Wanted: 12, Got: zb0001}
return
}
z.Total, bts, err = msgp.ReadUint64Bytes(bts)
@ -222,6 +237,11 @@ func (z *DiskInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "ID")
return
}
bts, err = z.Metrics.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
z.Error, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error")
@ -233,7 +253,276 @@ func (z *DiskInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *DiskInfo) Msgsize() (s int) {
s = 1 + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.StringPrefixSize + len(z.FSType) + msgp.BoolSize + msgp.BoolSize + msgp.StringPrefixSize + len(z.Endpoint) + msgp.StringPrefixSize + len(z.MountPath) + msgp.StringPrefixSize + len(z.ID) + msgp.StringPrefixSize + len(z.Error)
s = 1 + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.StringPrefixSize + len(z.FSType) + msgp.BoolSize + msgp.BoolSize + msgp.StringPrefixSize + len(z.Endpoint) + msgp.StringPrefixSize + len(z.MountPath) + msgp.StringPrefixSize + len(z.ID) + z.Metrics.Msgsize() + msgp.StringPrefixSize + len(z.Error)
return
}
// DecodeMsg implements msgp.Decodable
func (z *DiskMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "APILatencies":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
if z.APILatencies == nil {
z.APILatencies = make(map[string]string, zb0002)
} else if len(z.APILatencies) > 0 {
for key := range z.APILatencies {
delete(z.APILatencies, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 string
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
za0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "APILatencies", za0001)
return
}
z.APILatencies[za0001] = za0002
}
case "APICalls":
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
if z.APICalls == nil {
z.APICalls = make(map[string]uint64, zb0003)
} else if len(z.APICalls) > 0 {
for key := range z.APICalls {
delete(z.APICalls, key)
}
}
for zb0003 > 0 {
zb0003--
var za0003 string
var za0004 uint64
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
za0004, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "APICalls", za0003)
return
}
z.APICalls[za0003] = za0004
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *DiskMetrics) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "APILatencies"
err = en.Append(0x82, 0xac, 0x41, 0x50, 0x49, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.APILatencies)))
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
for za0001, za0002 := range z.APILatencies {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
err = en.WriteString(za0002)
if err != nil {
err = msgp.WrapError(err, "APILatencies", za0001)
return
}
}
// write "APICalls"
err = en.Append(0xa8, 0x41, 0x50, 0x49, 0x43, 0x61, 0x6c, 0x6c, 0x73)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.APICalls)))
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
for za0003, za0004 := range z.APICalls {
err = en.WriteString(za0003)
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
err = en.WriteUint64(za0004)
if err != nil {
err = msgp.WrapError(err, "APICalls", za0003)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *DiskMetrics) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "APILatencies"
o = append(o, 0x82, 0xac, 0x41, 0x50, 0x49, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.APILatencies)))
for za0001, za0002 := range z.APILatencies {
o = msgp.AppendString(o, za0001)
o = msgp.AppendString(o, za0002)
}
// string "APICalls"
o = append(o, 0xa8, 0x41, 0x50, 0x49, 0x43, 0x61, 0x6c, 0x6c, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.APICalls)))
for za0003, za0004 := range z.APICalls {
o = msgp.AppendString(o, za0003)
o = msgp.AppendUint64(o, za0004)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *DiskMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "APILatencies":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
if z.APILatencies == nil {
z.APILatencies = make(map[string]string, zb0002)
} else if len(z.APILatencies) > 0 {
for key := range z.APILatencies {
delete(z.APILatencies, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APILatencies")
return
}
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APILatencies", za0001)
return
}
z.APILatencies[za0001] = za0002
}
case "APICalls":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
if z.APICalls == nil {
z.APICalls = make(map[string]uint64, zb0003)
} else if len(z.APICalls) > 0 {
for key := range z.APICalls {
delete(z.APICalls, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 uint64
zb0003--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "APICalls")
return
}
za0004, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "APICalls", za0003)
return
}
z.APICalls[za0003] = za0004
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *DiskMetrics) Msgsize() (s int) {
s = 1 + 13 + msgp.MapHeaderSize
if z.APILatencies != nil {
for za0001, za0002 := range z.APILatencies {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
}
}
s += 9 + msgp.MapHeaderSize
if z.APICalls != nil {
for za0003, za0004 := range z.APICalls {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.Uint64Size
}
}
return
}

View file

@ -122,6 +122,119 @@ func BenchmarkDecodeDiskInfo(b *testing.B) {
}
}
func TestMarshalUnmarshalDiskMetrics(t *testing.T) {
v := DiskMetrics{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDiskMetrics(b *testing.B) {
v := DiskMetrics{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDiskMetrics(b *testing.B) {
v := DiskMetrics{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDiskMetrics(b *testing.B) {
v := DiskMetrics{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDiskMetrics(t *testing.T) {
v := DiskMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDiskMetrics Msgsize() is inaccurate")
}
vn := DiskMetrics{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDiskMetrics(b *testing.B) {
v := DiskMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDiskMetrics(b *testing.B) {
v := DiskMetrics{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalFileInfo(t *testing.T) {
v := FileInfo{}
bts, err := v.MarshalMsg(nil)

View file

@ -0,0 +1,46 @@
// Code generated by "stringer -type=storageMetric -trimprefix=storageMetric xl-storage-disk-id-check.go"; DO NOT EDIT.
package cmd
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[storageMetricMakeVolBulk-0]
_ = x[storageMetricMakeVol-1]
_ = x[storageMetricListVols-2]
_ = x[storageMetricStatVol-3]
_ = x[storageMetricDeleteVol-4]
_ = x[storageMetricWalkDir-5]
_ = x[storageMetricListDir-6]
_ = x[storageMetricReadFile-7]
_ = x[storageMetricAppendFile-8]
_ = x[storageMetricCreateFile-9]
_ = x[storageMetricReadFileStream-10]
_ = x[storageMetricRenameFile-11]
_ = x[storageMetricRenameData-12]
_ = x[storageMetricCheckParts-13]
_ = x[storageMetricCheckFile-14]
_ = x[storageMetricDelete-15]
_ = x[storageMetricDeleteVersions-16]
_ = x[storageMetricVerifyFile-17]
_ = x[storageMetricWriteAll-18]
_ = x[storageMetricDeleteVersion-19]
_ = x[storageMetricWriteMetadata-20]
_ = x[storageMetricReadVersion-21]
_ = x[storageMetricReadAll-22]
_ = x[metricLast-23]
}
const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsCheckFileDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataReadVersionReadAllmetricLast"
var _storageMetric_index = [...]uint8{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 137, 143, 157, 167, 175, 188, 201, 212, 219, 229}
func (i storageMetric) String() string {
if i >= storageMetric(len(_storageMetric_index)-1) {
return "storageMetric(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _storageMetric_name[_storageMetric_index[i]:_storageMetric_index[i+1]]
}

View file

@ -19,12 +19,104 @@ package cmd
import (
"context"
"io"
"sync"
"sync/atomic"
"time"
ewma "github.com/VividCortex/ewma"
)
//go:generate stringer -type=storageMetric -trimprefix=storageMetric $GOFILE
type storageMetric uint8
const (
storageMetricMakeVolBulk storageMetric = iota
storageMetricMakeVol
storageMetricListVols
storageMetricStatVol
storageMetricDeleteVol
storageMetricWalkDir
storageMetricListDir
storageMetricReadFile
storageMetricAppendFile
storageMetricCreateFile
storageMetricReadFileStream
storageMetricRenameFile
storageMetricRenameData
storageMetricCheckParts
storageMetricCheckFile
storageMetricDelete
storageMetricDeleteVersions
storageMetricVerifyFile
storageMetricWriteAll
storageMetricDeleteVersion
storageMetricWriteMetadata
storageMetricReadVersion
storageMetricReadAll
// .... add more
metricLast
)
// Detects change in underlying disk.
type xlStorageDiskIDCheck struct {
storage *xlStorage
diskID string
apiCalls [metricLast]uint64
apiLatencies [metricLast]ewma.MovingAverage
}
func (p *xlStorageDiskIDCheck) getMetrics() DiskMetrics {
diskMetric := DiskMetrics{
APILatencies: make(map[string]string),
APICalls: make(map[string]uint64),
}
for i, v := range p.apiLatencies {
diskMetric.APILatencies[storageMetric(i).String()] = time.Duration(v.Value()).String()
}
for i := range p.apiCalls {
diskMetric.APICalls[storageMetric(i).String()] = atomic.LoadUint64(&p.apiCalls[i])
}
return diskMetric
}
type lockedSimpleEWMA struct {
sync.RWMutex
*ewma.SimpleEWMA
}
func (e *lockedSimpleEWMA) Add(value float64) {
e.Lock()
defer e.Unlock()
e.SimpleEWMA.Add(value)
}
func (e *lockedSimpleEWMA) Set(value float64) {
e.Lock()
defer e.Unlock()
e.SimpleEWMA.Set(value)
}
func (e *lockedSimpleEWMA) Value() float64 {
e.RLock()
defer e.RUnlock()
return e.SimpleEWMA.Value()
}
func newXLStorageDiskIDCheck(storage *xlStorage) *xlStorageDiskIDCheck {
xl := xlStorageDiskIDCheck{
storage: storage,
}
for i := range xl.apiLatencies[:] {
xl.apiLatencies[i] = &lockedSimpleEWMA{
SimpleEWMA: new(ewma.SimpleEWMA),
}
}
return &xl
}
func (p *xlStorageDiskIDCheck) String() string {
@ -117,6 +209,8 @@ func (p *xlStorageDiskIDCheck) DiskInfo(ctx context.Context) (info DiskInfo, err
if err != nil {
return info, err
}
info.Metrics = p.getMetrics()
// check cached diskID against backend
// only if its non-empty.
if p.diskID != "" {
@ -128,6 +222,8 @@ func (p *xlStorageDiskIDCheck) DiskInfo(ctx context.Context) (info DiskInfo, err
}
func (p *xlStorageDiskIDCheck) MakeVolBulk(ctx context.Context, volumes ...string) (err error) {
defer p.updateStorageMetrics(storageMetricMakeVolBulk)()
select {
case <-ctx.Done():
return ctx.Err()
@ -141,6 +237,8 @@ func (p *xlStorageDiskIDCheck) MakeVolBulk(ctx context.Context, volumes ...strin
}
func (p *xlStorageDiskIDCheck) MakeVol(ctx context.Context, volume string) (err error) {
defer p.updateStorageMetrics(storageMetricMakeVol)()
select {
case <-ctx.Done():
return ctx.Err()
@ -154,6 +252,8 @@ func (p *xlStorageDiskIDCheck) MakeVol(ctx context.Context, volume string) (err
}
func (p *xlStorageDiskIDCheck) ListVols(ctx context.Context) ([]VolInfo, error) {
defer p.updateStorageMetrics(storageMetricListVols)()
select {
case <-ctx.Done():
return nil, ctx.Err()
@ -167,6 +267,8 @@ func (p *xlStorageDiskIDCheck) ListVols(ctx context.Context) ([]VolInfo, error)
}
func (p *xlStorageDiskIDCheck) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) {
defer p.updateStorageMetrics(storageMetricStatVol)()
select {
case <-ctx.Done():
return VolInfo{}, ctx.Err()
@ -180,6 +282,8 @@ func (p *xlStorageDiskIDCheck) StatVol(ctx context.Context, volume string) (vol
}
func (p *xlStorageDiskIDCheck) DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error) {
defer p.updateStorageMetrics(storageMetricDeleteVol)()
select {
case <-ctx.Done():
return ctx.Err()
@ -193,6 +297,8 @@ func (p *xlStorageDiskIDCheck) DeleteVol(ctx context.Context, volume string, for
}
func (p *xlStorageDiskIDCheck) ListDir(ctx context.Context, volume, dirPath string, count int) ([]string, error) {
defer p.updateStorageMetrics(storageMetricListDir)()
select {
case <-ctx.Done():
return nil, ctx.Err()
@ -207,6 +313,8 @@ func (p *xlStorageDiskIDCheck) ListDir(ctx context.Context, volume, dirPath stri
}
func (p *xlStorageDiskIDCheck) ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
defer p.updateStorageMetrics(storageMetricReadFile)()
select {
case <-ctx.Done():
return 0, ctx.Err()
@ -221,6 +329,8 @@ func (p *xlStorageDiskIDCheck) ReadFile(ctx context.Context, volume string, path
}
func (p *xlStorageDiskIDCheck) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) {
defer p.updateStorageMetrics(storageMetricAppendFile)()
select {
case <-ctx.Done():
return ctx.Err()
@ -235,6 +345,8 @@ func (p *xlStorageDiskIDCheck) AppendFile(ctx context.Context, volume string, pa
}
func (p *xlStorageDiskIDCheck) CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) error {
defer p.updateStorageMetrics(storageMetricCreateFile)()
select {
case <-ctx.Done():
return ctx.Err()
@ -249,6 +361,8 @@ func (p *xlStorageDiskIDCheck) CreateFile(ctx context.Context, volume, path stri
}
func (p *xlStorageDiskIDCheck) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) {
defer p.updateStorageMetrics(storageMetricReadFileStream)()
select {
case <-ctx.Done():
return nil, ctx.Err()
@ -263,6 +377,8 @@ func (p *xlStorageDiskIDCheck) ReadFileStream(ctx context.Context, volume, path
}
func (p *xlStorageDiskIDCheck) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error {
defer p.updateStorageMetrics(storageMetricRenameFile)()
select {
case <-ctx.Done():
return ctx.Err()
@ -277,6 +393,8 @@ func (p *xlStorageDiskIDCheck) RenameFile(ctx context.Context, srcVolume, srcPat
}
func (p *xlStorageDiskIDCheck) RenameData(ctx context.Context, srcVolume, srcPath, dataDir, dstVolume, dstPath string) error {
defer p.updateStorageMetrics(storageMetricRenameData)()
select {
case <-ctx.Done():
return ctx.Err()
@ -291,6 +409,8 @@ func (p *xlStorageDiskIDCheck) RenameData(ctx context.Context, srcVolume, srcPat
}
func (p *xlStorageDiskIDCheck) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (err error) {
defer p.updateStorageMetrics(storageMetricCheckParts)()
select {
case <-ctx.Done():
return ctx.Err()
@ -305,6 +425,8 @@ func (p *xlStorageDiskIDCheck) CheckParts(ctx context.Context, volume string, pa
}
func (p *xlStorageDiskIDCheck) CheckFile(ctx context.Context, volume string, path string) (err error) {
defer p.updateStorageMetrics(storageMetricCheckFile)()
select {
case <-ctx.Done():
return ctx.Err()
@ -319,6 +441,8 @@ func (p *xlStorageDiskIDCheck) CheckFile(ctx context.Context, volume string, pat
}
func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) {
defer p.updateStorageMetrics(storageMetricDelete)()
select {
case <-ctx.Done():
return ctx.Err()
@ -333,6 +457,18 @@ func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path s
}
func (p *xlStorageDiskIDCheck) DeleteVersions(ctx context.Context, volume string, versions []FileInfo) (errs []error) {
defer p.updateStorageMetrics(storageMetricDeleteVersions)()
select {
case <-ctx.Done():
errs = make([]error, len(versions))
for i := range errs {
errs[i] = ctx.Err()
}
return errs
default:
}
if err := p.checkDiskStale(); err != nil {
errs = make([]error, len(versions))
for i := range errs {
@ -344,6 +480,8 @@ func (p *xlStorageDiskIDCheck) DeleteVersions(ctx context.Context, volume string
}
func (p *xlStorageDiskIDCheck) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error {
defer p.updateStorageMetrics(storageMetricVerifyFile)()
select {
case <-ctx.Done():
return ctx.Err()
@ -358,6 +496,8 @@ func (p *xlStorageDiskIDCheck) VerifyFile(ctx context.Context, volume, path stri
}
func (p *xlStorageDiskIDCheck) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) {
defer p.updateStorageMetrics(storageMetricWriteAll)()
select {
case <-ctx.Done():
return ctx.Err()
@ -372,6 +512,8 @@ func (p *xlStorageDiskIDCheck) WriteAll(ctx context.Context, volume string, path
}
func (p *xlStorageDiskIDCheck) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool) (err error) {
defer p.updateStorageMetrics(storageMetricDeleteVersion)()
select {
case <-ctx.Done():
return ctx.Err()
@ -386,6 +528,8 @@ func (p *xlStorageDiskIDCheck) DeleteVersion(ctx context.Context, volume, path s
}
func (p *xlStorageDiskIDCheck) WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) (err error) {
defer p.updateStorageMetrics(storageMetricWriteMetadata)()
select {
case <-ctx.Done():
return ctx.Err()
@ -400,6 +544,8 @@ func (p *xlStorageDiskIDCheck) WriteMetadata(ctx context.Context, volume, path s
}
func (p *xlStorageDiskIDCheck) ReadVersion(ctx context.Context, volume, path, versionID string, readData bool) (fi FileInfo, err error) {
defer p.updateStorageMetrics(storageMetricReadVersion)()
select {
case <-ctx.Done():
return fi, ctx.Err()
@ -414,6 +560,8 @@ func (p *xlStorageDiskIDCheck) ReadVersion(ctx context.Context, volume, path, ve
}
func (p *xlStorageDiskIDCheck) ReadAll(ctx context.Context, volume string, path string) (buf []byte, err error) {
defer p.updateStorageMetrics(storageMetricReadAll)()
select {
case <-ctx.Done():
return nil, ctx.Err()
@ -426,3 +574,12 @@ func (p *xlStorageDiskIDCheck) ReadAll(ctx context.Context, volume string, path
return p.storage.ReadAll(ctx, volume, path)
}
// Update storage metrics
func (p *xlStorageDiskIDCheck) updateStorageMetrics(s storageMetric) func() {
startTime := time.Now()
return func() {
atomic.AddUint64(&p.apiCalls[s], 1)
p.apiLatencies[s].Add(float64(time.Since(startTime)))
}
}

View file

@ -33,7 +33,6 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/dustin/go-humanize"
@ -89,8 +88,6 @@ func isValidVolname(volname string) bool {
// xlStorage - implements StorageAPI interface.
type xlStorage struct {
activeIOCount int32
diskPath string
endpoint Endpoint
@ -458,11 +455,6 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache) (dataUs
// DiskInfo provides current information about disk space usage,
// total free inodes and underlying filesystem.
func (s *xlStorage) DiskInfo(context.Context) (info DiskInfo, err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
s.diskInfoCache.Once.Do(func() {
s.diskInfoCache.TTL = time.Second
s.diskInfoCache.Update = func() (interface{}, error) {
@ -630,11 +622,6 @@ func (s *xlStorage) MakeVol(ctx context.Context, volume string) error {
return errInvalidArgument
}
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
volumeDir, err := s.getVolDir(volume)
if err != nil {
return err
@ -660,11 +647,6 @@ func (s *xlStorage) MakeVol(ctx context.Context, volume string) error {
// ListVols - list volumes.
func (s *xlStorage) ListVols(context.Context) (volsInfo []VolInfo, err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
return listVols(s.diskPath)
}
@ -692,11 +674,6 @@ func listVols(dirPath string) ([]VolInfo, error) {
// StatVol - get volume info.
func (s *xlStorage) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
// Verify if volume is valid and it exists.
volumeDir, err := s.getVolDir(volume)
if err != nil {
@ -728,11 +705,6 @@ func (s *xlStorage) StatVol(ctx context.Context, volume string) (vol VolInfo, er
// DeleteVol - delete a volume.
func (s *xlStorage) DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
// Verify if volume is valid and it exists.
volumeDir, err := s.getVolDir(volume)
if err != nil {
@ -794,11 +766,6 @@ func (s *xlStorage) isLeafDir(volume, leafPath string) bool {
// ListDir - return all the entries at the given directory path.
// If an entry is a directory it will be returned with a trailing SlashSeparator.
func (s *xlStorage) ListDir(ctx context.Context, volume, dirPath string, count int) (entries []string, err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
// Verify if volume is valid and it exists.
volumeDir, err := s.getVolDir(volume)
if err != nil {
@ -869,11 +836,6 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
return errFileNotFound
}
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
volumeDir, err := s.getVolDir(volume)
if err != nil {
return err
@ -938,11 +900,6 @@ func (s *xlStorage) WriteMetadata(ctx context.Context, volume, path string, fi F
return err
}
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
var xlMeta xlMetaV2
if !isXL2V1Format(buf) {
xlMeta, err = newXLMetaV2(fi)
@ -979,11 +936,6 @@ func (s *xlStorage) renameLegacyMetadata(volumeDir, path string) (err error) {
return errFileNotFound
}
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
// Validate file path length, before reading.
filePath := pathJoin(volumeDir, path)
if err = checkPathLength(filePath); err != nil {
@ -1131,20 +1083,10 @@ func (s *xlStorage) readAllData(volumeDir string, filePath string, requireDirect
return nil, err
}
atomic.AddInt32(&s.activeIOCount, 1)
or := &odirectReader{f, nil, nil, true, true, s, nil}
rd := struct {
io.Reader
io.Closer
}{Reader: or, Closer: closeWrapper(func() error {
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
return or.Close()
})}
defer rd.Close() // activeIOCount is decremented in Close()
defer or.Close()
buf, err = ioutil.ReadAll(rd)
buf, err = ioutil.ReadAll(or)
if err != nil {
err = osErrToFileErr(err)
}
@ -1191,11 +1133,6 @@ func (s *xlStorage) ReadFile(ctx context.Context, volume string, path string, of
return 0, errInvalidArgument
}
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
volumeDir, err := s.getVolDir(volume)
if err != nil {
return 0, err
@ -1438,7 +1375,6 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off
return nil, errIsNotRegular
}
atomic.AddInt32(&s.activeIOCount, 1)
if offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite && s.readODirectSupported {
or := &odirectReader{file, nil, nil, true, false, s, nil}
if length <= smallFileThreshold {
@ -1448,9 +1384,6 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off
io.Reader
io.Closer
}{Reader: io.LimitReader(or, length), Closer: closeWrapper(func() error {
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
return or.Close()
})}
return r, nil
@ -1460,9 +1393,6 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off
io.Reader
io.Closer
}{Reader: io.LimitReader(file, length), Closer: closeWrapper(func() error {
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
return file.Close()
})}
@ -1502,11 +1432,6 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz
return errInvalidArgument
}
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
volumeDir, err := s.getVolDir(volume)
if err != nil {
return err
@ -1614,11 +1539,6 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz
}
func (s *xlStorage) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
w, err := s.openFile(volume, path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC)
if err != nil {
return err
@ -1640,11 +1560,6 @@ func (s *xlStorage) WriteAll(ctx context.Context, volume string, path string, b
// AppendFile - append a byte array at path, if file doesn't exist at
// path this call explicitly creates it.
func (s *xlStorage) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
volumeDir, err := s.getVolDir(volume)
if err != nil {
return err
@ -1681,11 +1596,6 @@ func (s *xlStorage) AppendFile(ctx context.Context, volume string, path string,
// CheckParts check if path has necessary parts available.
func (s *xlStorage) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
volumeDir, err := s.getVolDir(volume)
if err != nil {
return err
@ -1732,11 +1642,6 @@ func (s *xlStorage) CheckParts(ctx context.Context, volume string, path string,
// - "a/b/"
// - "a/"
func (s *xlStorage) CheckFile(ctx context.Context, volume string, path string) error {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
volumeDir, err := s.getVolDir(volume)
if err != nil {
return err
@ -1842,11 +1747,6 @@ func (s *xlStorage) deleteFile(basePath, deletePath string, recursive bool) erro
// DeleteFile - delete a file at path.
func (s *xlStorage) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
volumeDir, err := s.getVolDir(volume)
if err != nil {
return err
@ -1878,11 +1778,6 @@ func (s *xlStorage) Delete(ctx context.Context, volume string, path string, recu
// RenameData - rename source path to destination path atomically, metadata and data directory.
func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir, dstVolume, dstPath string) (err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
srcVolumeDir, err := s.getVolDir(srcVolume)
if err != nil {
return err
@ -2134,11 +2029,6 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath, dataDir,
// RenameFile - rename source path to destination path atomically.
func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) (err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
srcVolumeDir, err := s.getVolDir(srcVolume)
if err != nil {
return err
@ -2287,11 +2177,6 @@ func (s *xlStorage) bitrotVerify(partPath string, partSize int64, algo BitrotAlg
}
func (s *xlStorage) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (err error) {
atomic.AddInt32(&s.activeIOCount, 1)
defer func() {
atomic.AddInt32(&s.activeIOCount, -1)
}()
volumeDir, err := s.getVolDir(volume)
if err != nil {
return err

View file

@ -132,7 +132,9 @@ func newXLStorageTestSetup() (*xlStorageDiskIDCheck, string, error) {
if err != nil {
return nil, "", err
}
return &xlStorageDiskIDCheck{storage: storage, diskID: "da017d62-70e3-45f1-8a1a-587707e69ad1"}, diskPath, nil
disk := newXLStorageDiskIDCheck(storage)
disk.diskID = "da017d62-70e3-45f1-8a1a-587707e69ad1"
return disk, diskPath, nil
}
// createPermDeniedFile - creates temporary directory and file with path '/mybucket/myobject'

2
go.mod
View file

@ -9,6 +9,7 @@ require (
github.com/Azure/azure-storage-blob-go v0.10.0
github.com/Azure/go-autorest/autorest/adal v0.9.1 // indirect
github.com/Shopify/sarama v1.27.2
github.com/VividCortex/ewma v1.1.1
github.com/alecthomas/participle v0.2.1
github.com/bcicen/jstream v1.0.1
github.com/beevik/ntp v0.3.0
@ -76,7 +77,6 @@ require (
github.com/tidwall/gjson v1.6.7
github.com/tidwall/sjson v1.0.4
github.com/tinylib/msgp v1.1.3
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31 // indirect
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a
github.com/willf/bitset v1.1.11 // indirect
github.com/willf/bloom v2.0.3+incompatible

5
go.sum
View file

@ -41,6 +41,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWso
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/alecthomas/participle v0.2.1 h1:4AVLj1viSGa4LG5HDXKXrm5xRx19SB/rS/skPQB1Grw=
@ -331,7 +333,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk=
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
@ -595,8 +596,6 @@ github.com/tinylib/msgp v1.1.3 h1:3giwAkmtaEDLSV0MdO1lDLuPgklgPzmk8H9+So2BVfA=
github.com/tinylib/msgp v1.1.3/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 h1:ndzgwNDnKIqyCvHTXaCqh9KlOWKvBry6nuXMJmonVsE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31 h1:OXcKh35JaYsGMRzpvFkLv/MEyPuL49CThT1pZ8aSml4=
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=

View file

@ -291,6 +291,14 @@ type ServerProperties struct {
PoolNumber int `json:"poolNumber,omitempty"`
}
// DiskMetrics has the information about XL Storage APIs
// the number of calls of each API and the moving average of
// the duration of each API.
type DiskMetrics struct {
APILatencies map[string]string `json:"apiLatencies,omitempty"`
APICalls map[string]uint64 `json:"apiCalls,omitempty"`
}
// Disk holds Disk information
type Disk struct {
Endpoint string `json:"endpoint,omitempty"`
@ -308,6 +316,7 @@ type Disk struct {
ReadLatency float64 `json:"readlatency,omitempty"`
WriteLatency float64 `json:"writelatency,omitempty"`
Utilization float64 `json:"utilization,omitempty"`
Metrics *DiskMetrics `json:"metrics,omitempty"`
HealInfo *HealingDisk `json:"heal_info,omitempty"`
// Indexes, will be -1 until assigned a set.