minio/cmd/xl-v1_test.go
Harshavardhana fb96779a8a Add large bucket support for erasure coded backend (#5160)
This PR implements an object layer which
combines input erasure sets of XL layers
into a unified namespace.

This object layer extends the existing
erasure coded implementation, it is assumed
in this design that providing > 16 disks is
a static configuration as well i.e if you started
the setup with 32 disks with 4 sets 8 disks per
pack then you would need to provide 4 sets always.

Some design details and restrictions:

- Objects are distributed using consistent ordering
  to a unique erasure coded layer.
- Each pack has its own dsync so locks are synchronized
  properly at pack (erasure layer).
- Each pack still has a maximum of 16 disks
  requirement, you can start with multiple
  such sets statically.
- Static sets set of disks and cannot be
  changed, there is no elastic expansion allowed.
- Static sets set of disks and cannot be
  changed, there is no elastic removal allowed.
- ListObjects() across sets can be noticeably
  slower since List happens on all servers,
  and is merged at this sets layer.

Fixes #5465
Fixes #5464
Fixes #5461
Fixes #5460
Fixes #5459
Fixes #5458
Fixes #5460
Fixes #5488
Fixes #5489
Fixes #5497
Fixes #5496
2018-02-15 17:45:57 -08:00

98 lines
2.5 KiB
Go

/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"os"
"reflect"
"testing"
"github.com/minio/minio/pkg/disk"
)
// TestStorageInfo - tests storage info.
func TestStorageInfo(t *testing.T) {
objLayer, fsDirs, err := prepareXL16()
if err != nil {
t.Fatalf("Unable to initialize 'XL' object layer.")
}
// Remove all dirs.
for _, dir := range fsDirs {
defer os.RemoveAll(dir)
}
// Get storage info first attempt.
disks16Info := objLayer.StorageInfo()
// This test assumes homogenity between all disks,
// i.e if we loose one disk the effective storage
// usage values is assumed to decrease. If we have
// heterogenous environment this is not true all the time.
if disks16Info.Free <= 0 {
t.Fatalf("Diskinfo total free values should be greater 0")
}
if disks16Info.Total <= 0 {
t.Fatalf("Diskinfo total values should be greater 0")
}
}
// Sort valid disks info.
func TestSortingValidDisks(t *testing.T) {
testCases := []struct {
disksInfo []disk.Info
validDisksInfo []disk.Info
}{
// One of the disks is offline.
{
disksInfo: []disk.Info{
{Total: 150, Free: 10},
{Total: 0, Free: 0},
{Total: 200, Free: 10},
{Total: 100, Free: 10},
},
validDisksInfo: []disk.Info{
{Total: 100, Free: 10},
{Total: 150, Free: 10},
{Total: 200, Free: 10},
},
},
// All disks are online.
{
disksInfo: []disk.Info{
{Total: 150, Free: 10},
{Total: 200, Free: 10},
{Total: 100, Free: 10},
{Total: 115, Free: 10},
},
validDisksInfo: []disk.Info{
{Total: 100, Free: 10},
{Total: 115, Free: 10},
{Total: 150, Free: 10},
{Total: 200, Free: 10},
},
},
}
for i, testCase := range testCases {
validDisksInfo := sortValidDisksInfo(testCase.disksInfo)
if !reflect.DeepEqual(validDisksInfo, testCase.validDisksInfo) {
t.Errorf("Test %d: Expected %#v, Got %#v", i+1, testCase.validDisksInfo, validDisksInfo)
}
}
}