Merge pull request #670 from harshavardhana/pr_out_go_vet_fixes_for_donut

This commit is contained in:
Harshavardhana 2015-06-25 04:08:49 +00:00
commit 5abcb7f348
21 changed files with 630 additions and 909 deletions

View file

@ -47,8 +47,8 @@ const (
SIMDAlign = 32
)
// ErasureParams is a configuration set for building an encoder. It is created using ValidateParams().
type ErasureParams struct {
// Params is a configuration set for building an encoder. It is created using ValidateParams().
type Params struct {
K uint8
M uint8
Technique Technique // cauchy or vandermonde matrix (RS)
@ -56,18 +56,18 @@ type ErasureParams struct {
// Erasure is an object used to encode and decode data.
type Erasure struct {
params *ErasureParams
params *Params
encodeMatrix, encodeTbls *C.uchar
decodeMatrix, decodeTbls *C.uchar
decodeIndex *C.uint32_t
}
// ValidateParams creates an ErasureParams object.
// ValidateParams creates an Params object.
//
// k and m represent the matrix size, which corresponds to the protection level
// technique is the matrix type. Valid inputs are Cauchy (recommended) or Vandermonde.
//
func ValidateParams(k, m uint8, technique Technique) (*ErasureParams, error) {
func ValidateParams(k, m uint8, technique Technique) (*Params, error) {
if k < 1 {
return nil, errors.New("k cannot be zero")
}
@ -89,7 +89,7 @@ func ValidateParams(k, m uint8, technique Technique) (*ErasureParams, error) {
return nil, errors.New("Technique can be either vandermonde or cauchy")
}
return &ErasureParams{
return &Params{
K: k,
M: m,
Technique: technique,
@ -97,7 +97,7 @@ func ValidateParams(k, m uint8, technique Technique) (*ErasureParams, error) {
}
// NewErasure creates an encoder object with a given set of parameters.
func NewErasure(ep *ErasureParams) *Erasure {
func NewErasure(ep *Params) *Erasure {
var k = C.int(ep.K)
var m = C.int(ep.M)

View file

@ -18,21 +18,212 @@ package donut
import (
"bytes"
"crypto/md5"
"encoding/hex"
"encoding/json"
"fmt"
"hash"
"io"
"path/filepath"
"strconv"
"strings"
"time"
"crypto/md5"
"encoding/hex"
"encoding/json"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/utils/split"
)
/// This file contains all the internal functions used by Bucket interface
// internal struct carrying bucket specific information
type bucket struct {
name string
acl string
time time.Time
donutName string
nodes map[string]Node
objects map[string]object
}
// newBucket - instantiate a new bucket
func newBucket(bucketName, aclType, donutName string, nodes map[string]Node) (bucket, map[string]string, error) {
errParams := map[string]string{
"bucketName": bucketName,
"donutName": donutName,
"aclType": aclType,
}
if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" {
return bucket{}, nil, iodine.New(InvalidArgument{}, errParams)
}
bucketMetadata := make(map[string]string)
bucketMetadata["acl"] = aclType
t := time.Now().UTC()
bucketMetadata["created"] = t.Format(time.RFC3339Nano)
b := bucket{}
b.name = bucketName
b.acl = aclType
b.time = t
b.donutName = donutName
b.objects = make(map[string]object)
b.nodes = nodes
return b, bucketMetadata, nil
}
// ListObjects - list all objects
func (b bucket) ListObjects() (map[string]object, error) {
nodeSlice := 0
for _, node := range b.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, iodine.New(err, nil)
}
for order, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
bucketPath := filepath.Join(b.donutName, bucketSlice)
objects, err := disk.ListDir(bucketPath)
if err != nil {
return nil, iodine.New(err, nil)
}
for _, object := range objects {
newObject, err := newObject(object.Name(), filepath.Join(disk.GetPath(), bucketPath))
if err != nil {
return nil, iodine.New(err, nil)
}
newObjectMetadata, err := newObject.GetObjectMetadata()
if err != nil {
return nil, iodine.New(err, nil)
}
objectName, ok := newObjectMetadata["object"]
if !ok {
return nil, iodine.New(ObjectCorrupted{Object: object.Name()}, nil)
}
b.objects[objectName] = newObject
}
}
nodeSlice = nodeSlice + 1
}
return b.objects, nil
}
// ReadObject - open an object to read
func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, err error) {
reader, writer := io.Pipe()
// get list of objects
objects, err := b.ListObjects()
if err != nil {
return nil, 0, iodine.New(err, nil)
}
// check if object exists
object, ok := objects[objectName]
if !ok {
return nil, 0, iodine.New(ObjectNotFound{Object: objectName}, nil)
}
// verify if objectMetadata is readable, before we serve the request
objectMetadata, err := object.GetObjectMetadata()
if err != nil {
return nil, 0, iodine.New(err, nil)
}
if objectName == "" || writer == nil || len(objectMetadata) == 0 {
return nil, 0, iodine.New(InvalidArgument{}, nil)
}
size, err = strconv.ParseInt(objectMetadata["size"], 10, 64)
if err != nil {
return nil, 0, iodine.New(err, nil)
}
// verify if donutObjectMetadata is readable, before we server the request
donutObjectMetadata, err := object.GetDonutObjectMetadata()
if err != nil {
return nil, 0, iodine.New(err, nil)
}
// read and reply back to GetObject() request in a go-routine
go b.readEncodedData(b.normalizeObjectName(objectName), writer, donutObjectMetadata)
return reader, size, nil
}
// WriteObject - write a new object into bucket
func (b bucket) WriteObject(objectName string, objectData io.Reader, expectedMD5Sum string, metadata map[string]string) (string, error) {
if objectName == "" || objectData == nil {
return "", iodine.New(InvalidArgument{}, nil)
}
writers, err := b.getDiskWriters(b.normalizeObjectName(objectName), "data")
if err != nil {
return "", iodine.New(err, nil)
}
summer := md5.New()
objectMetadata := make(map[string]string)
donutObjectMetadata := make(map[string]string)
objectMetadata["version"] = objectMetadataVersion
donutObjectMetadata["version"] = donutObjectMetadataVersion
size := metadata["contentLength"]
sizeInt, err := strconv.ParseInt(size, 10, 64)
if err != nil {
return "", iodine.New(err, nil)
}
// if total writers are only '1' do not compute erasure
switch len(writers) == 1 {
case true:
mw := io.MultiWriter(writers[0], summer)
totalLength, err := io.CopyN(mw, objectData, sizeInt)
if err != nil {
return "", iodine.New(err, nil)
}
donutObjectMetadata["sys.size"] = strconv.FormatInt(totalLength, 10)
objectMetadata["size"] = strconv.FormatInt(totalLength, 10)
case false:
// calculate data and parity dictated by total number of writers
k, m, err := b.getDataAndParity(len(writers))
if err != nil {
return "", iodine.New(err, nil)
}
// encoded data with k, m and write
chunkCount, totalLength, err := b.writeEncodedData(k, m, writers, objectData, summer)
if err != nil {
return "", iodine.New(err, nil)
}
/// donutMetadata section
donutObjectMetadata["sys.blockSize"] = strconv.Itoa(10 * 1024 * 1024)
donutObjectMetadata["sys.chunkCount"] = strconv.Itoa(chunkCount)
donutObjectMetadata["sys.erasureK"] = strconv.FormatUint(uint64(k), 10)
donutObjectMetadata["sys.erasureM"] = strconv.FormatUint(uint64(m), 10)
donutObjectMetadata["sys.erasureTechnique"] = "Cauchy"
donutObjectMetadata["sys.size"] = strconv.Itoa(totalLength)
// keep size inside objectMetadata as well for Object API requests
objectMetadata["size"] = strconv.Itoa(totalLength)
}
objectMetadata["bucket"] = b.name
objectMetadata["object"] = objectName
// store all user provided metadata
for k, v := range metadata {
objectMetadata[k] = v
}
dataMd5sum := summer.Sum(nil)
objectMetadata["created"] = time.Now().UTC().Format(time.RFC3339Nano)
// keeping md5sum for the object in two different places
// one for object storage and another is for internal use
objectMetadata["md5"] = hex.EncodeToString(dataMd5sum)
donutObjectMetadata["sys.md5"] = hex.EncodeToString(dataMd5sum)
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objectMetadata["md5"]); err != nil {
return "", iodine.New(err, nil)
}
}
// write donut specific metadata
if err := b.writeDonutObjectMetadata(b.normalizeObjectName(objectName), donutObjectMetadata); err != nil {
return "", iodine.New(err, nil)
}
// write object specific metadata
if err := b.writeObjectMetadata(b.normalizeObjectName(objectName), objectMetadata); err != nil {
return "", iodine.New(err, nil)
}
// close all writers, when control flow reaches here
for _, writer := range writers {
writer.Close()
}
return objectMetadata["md5"], nil
}
// isMD5SumEqual - returns error if md5sum mismatches, other its `nil`
func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
@ -127,7 +318,7 @@ func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err error)
// writeEncodedData -
func (b bucket) writeEncodedData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, summer hash.Hash) (int, int, error) {
chunks := split.Stream(objectData, 10*1024*1024)
encoder, err := NewEncoder(k, m, "Cauchy")
encoder, err := newEncoder(k, m, "Cauchy")
if err != nil {
return 0, 0, iodine.New(err, nil)
}
@ -176,7 +367,7 @@ func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutO
writer.CloseWithError(iodine.New(MissingErasureTechnique{}, nil))
return
}
encoder, err := NewEncoder(uint8(k), uint8(m), technique)
encoder, err := newEncoder(uint8(k), uint8(m), technique)
if err != nil {
writer.CloseWithError(iodine.New(err, nil))
return
@ -211,7 +402,7 @@ func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutO
}
// decodeEncodedData -
func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers []io.ReadCloser, encoder Encoder, writer *io.PipeWriter) ([]byte, error) {
func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers []io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, error) {
var curBlockSize int64
if blockSize < totalLeft {
curBlockSize = blockSize
@ -273,14 +464,14 @@ func (b bucket) getDiskReaders(objectName, objectMeta string) ([]io.ReadCloser,
return nil, iodine.New(err, nil)
}
readers = make([]io.ReadCloser, len(disks))
for _, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, disk.GetOrder())
for order, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
objectPath := filepath.Join(b.donutName, bucketSlice, objectName, objectMeta)
objectSlice, err := disk.OpenFile(objectPath)
if err != nil {
return nil, iodine.New(err, nil)
}
readers[disk.GetOrder()] = objectSlice
readers[order] = objectSlice
}
nodeSlice = nodeSlice + 1
}
@ -297,14 +488,14 @@ func (b bucket) getDiskWriters(objectName, objectMeta string) ([]io.WriteCloser,
return nil, iodine.New(err, nil)
}
writers = make([]io.WriteCloser, len(disks))
for _, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, disk.GetOrder())
for order, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
objectPath := filepath.Join(b.donutName, bucketSlice, objectName, objectMeta)
objectSlice, err := disk.MakeFile(objectPath)
objectSlice, err := disk.CreateFile(objectPath)
if err != nil {
return nil, iodine.New(err, nil)
}
writers[disk.GetOrder()] = objectSlice
writers[order] = objectSlice
}
nodeSlice = nodeSlice + 1
}

View file

@ -0,0 +1,175 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedisk.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package disk
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/minio/minio/pkg/iodine"
)
// Disk container for disk parameters
type Disk struct {
path string
fsInfo map[string]string
}
// New - instantiate new disk
func New(diskPath string) (Disk, error) {
if diskPath == "" {
return Disk{}, iodine.New(InvalidArgument{}, nil)
}
s := syscall.Statfs_t{}
err := syscall.Statfs(diskPath, &s)
if err != nil {
return Disk{}, iodine.New(err, nil)
}
st, err := os.Stat(diskPath)
if err != nil {
return Disk{}, iodine.New(err, nil)
}
if !st.IsDir() {
return Disk{}, iodine.New(syscall.ENOTDIR, nil)
}
disk := Disk{
path: diskPath,
fsInfo: make(map[string]string),
}
if fsType := getFSType(s.Type); fsType != "UNKNOWN" {
disk.fsInfo["FSType"] = fsType
disk.fsInfo["MountPoint"] = disk.path
return disk, nil
}
return Disk{}, iodine.New(UnsupportedFilesystem{Type: strconv.FormatInt(s.Type, 10)},
map[string]string{"Type": strconv.FormatInt(s.Type, 10)})
}
// GetPath - get root disk path
func (disk Disk) GetPath() string {
return disk.path
}
// GetFSInfo - get disk filesystem and its usage information
func (disk Disk) GetFSInfo() map[string]string {
s := syscall.Statfs_t{}
err := syscall.Statfs(disk.path, &s)
if err != nil {
return nil
}
disk.fsInfo["Total"] = formatBytes(s.Bsize * int64(s.Blocks))
disk.fsInfo["Free"] = formatBytes(s.Bsize * int64(s.Bfree))
disk.fsInfo["TotalB"] = strconv.FormatInt(s.Bsize*int64(s.Blocks), 10)
disk.fsInfo["FreeB"] = strconv.FormatInt(s.Bsize*int64(s.Bfree), 10)
return disk.fsInfo
}
// MakeDir - make a directory inside disk root path
func (disk Disk) MakeDir(dirname string) error {
return os.MkdirAll(filepath.Join(disk.path, dirname), 0700)
}
// ListDir - list a directory inside disk root path, get only directories
func (disk Disk) ListDir(dirname string) ([]os.FileInfo, error) {
dir, err := os.Open(filepath.Join(disk.path, dirname))
if err != nil {
return nil, iodine.New(err, nil)
}
contents, err := dir.Readdir(-1)
if err != nil {
return nil, iodine.New(err, nil)
}
var directories []os.FileInfo
for _, content := range contents {
// Include only directories, ignore everything else
if content.IsDir() {
directories = append(directories, content)
}
}
return directories, nil
}
// ListFiles - list a directory inside disk root path, get only files
func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, error) {
dir, err := os.Open(filepath.Join(disk.path, dirname))
if err != nil {
return nil, iodine.New(err, nil)
}
contents, err := dir.Readdir(-1)
if err != nil {
return nil, iodine.New(err, nil)
}
var files []os.FileInfo
for _, content := range contents {
// Include only regular files, ignore everything else
if content.Mode().IsRegular() {
files = append(files, content)
}
}
return files, nil
}
// CreateFile - create a file inside disk root path
func (disk Disk) CreateFile(filename string) (*os.File, error) {
if filename == "" {
return nil, iodine.New(InvalidArgument{}, nil)
}
filePath := filepath.Join(disk.path, filename)
// Create directories if they don't exist
if err := os.MkdirAll(filepath.Dir(filePath), 0700); err != nil {
return nil, iodine.New(err, nil)
}
dataFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return nil, iodine.New(err, nil)
}
return dataFile, nil
}
// OpenFile - read a file inside disk root path
func (disk Disk) OpenFile(filename string) (*os.File, error) {
if filename == "" {
return nil, iodine.New(InvalidArgument{}, nil)
}
dataFile, err := os.Open(filepath.Join(disk.path, filename))
if err != nil {
return nil, iodine.New(err, nil)
}
return dataFile, nil
}
// formatBytes - Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
func formatBytes(i int64) (result string) {
switch {
case i > (1024 * 1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024)
case i > (1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024)
case i > (1024 * 1024):
result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024)
case i > 1024:
result = fmt.Sprintf("%.02f KB", float64(i)/1024)
default:
result = fmt.Sprintf("%d B", i)
}
result = strings.Trim(result, " ")
return
}

View file

@ -14,33 +14,9 @@
* limitations under the License.
*/
package donut
package disk
import (
"fmt"
"strconv"
"strings"
)
/// This file contains all the internal functions used inside Disk interface
// formatBytes - Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
func (d disk) formatBytes(i uint64) (result string) {
switch {
case i > (1024 * 1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024)
case i > (1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024)
case i > (1024 * 1024):
result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024)
case i > 1024:
result = fmt.Sprintf("%.02f KB", float64(i)/1024)
default:
result = fmt.Sprintf("%d B", i)
}
result = strings.Trim(result, " ")
return
}
import "strconv"
// fsType2StrinMap - list of filesystems supported by donut
var fsType2StringMap = map[string]string{
@ -48,7 +24,7 @@ var fsType2StringMap = map[string]string{
}
// getFSType - get filesystem type
func (d disk) getFSType(fsType uint32) string {
func getFSType(fsType uint32) string {
fsTypeHex := strconv.FormatUint(uint64(fsType), 16)
fsTypeString, ok := fsType2StringMap[fsTypeHex]
if ok == false {

View file

@ -14,35 +14,11 @@
* limitations under the License.
*/
package donut
package disk
import (
"fmt"
"strconv"
"strings"
)
import "strconv"
/// This file contains all the internal functions used inside Disk interface
// formatBytes - Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
func (d disk) formatBytes(i int64) (result string) {
switch {
case i > (1024 * 1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024)
case i > (1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024)
case i > (1024 * 1024):
result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024)
case i > 1024:
result = fmt.Sprintf("%.02f KB", float64(i)/1024)
default:
result = fmt.Sprintf("%d B", i)
}
result = strings.Trim(result, " ")
return
}
// fsType2StrinMap - list of filesystems supported by donut
// fsType2StringMap - list of filesystems supported by donut on linux
var fsType2StringMap = map[string]string{
"1021994": "TMPFS",
"137d": "EXT",
@ -58,7 +34,7 @@ var fsType2StringMap = map[string]string{
}
// getFSType - get filesystem type
func (d disk) getFSType(fsType int64) string {
func getFSType(fsType int64) string {
fsTypeHex := strconv.FormatInt(fsType, 16)
fsTypeString, ok := fsType2StringMap[fsTypeHex]
if ok == false {

View file

@ -0,0 +1,17 @@
package disk
// InvalidArgument invalid argument
type InvalidArgument struct{}
func (e InvalidArgument) Error() string {
return "Invalid argument"
}
// UnsupportedFilesystem unsupported filesystem type
type UnsupportedFilesystem struct {
Type string
}
func (e UnsupportedFilesystem) Error() string {
return "Unsupported filesystem: " + e.Type
}

View file

@ -16,12 +16,15 @@
package donut
import "github.com/minio/minio/pkg/iodine"
import (
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/donut/disk"
)
// donut struct internal data
type donut struct {
name string
buckets map[string]Bucket
buckets map[string]bucket
nodes map[string]Node
}
@ -47,16 +50,17 @@ func (d donut) attachDonutNode(hostname string, disks []string) error {
if err != nil {
return iodine.New(err, nil)
}
for i, disk := range disks {
donutName := d.name
for i, d := range disks {
// Order is necessary for maps, keep order number separately
newDisk, err := NewDisk(disk, i)
newDisk, err := disk.New(d)
if err != nil {
return iodine.New(err, nil)
}
if err := newDisk.MakeDir(d.name); err != nil {
if err := newDisk.MakeDir(donutName); err != nil {
return iodine.New(err, nil)
}
if err := node.AttachDisk(newDisk); err != nil {
if err := node.AttachDisk(newDisk, i); err != nil {
return iodine.New(err, nil)
}
}
@ -72,7 +76,7 @@ func NewDonut(donutName string, nodeDiskMap map[string][]string) (Donut, error)
return nil, iodine.New(InvalidArgument{}, nil)
}
nodes := make(map[string]Node)
buckets := make(map[string]Bucket)
buckets := make(map[string]bucket)
d := donut{
name: donutName,
nodes: nodes,

View file

@ -1,223 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"crypto/md5"
"encoding/hex"
"github.com/minio/minio/pkg/iodine"
)
// internal struct carrying bucket specific information
type bucket struct {
name string
acl string
time time.Time
donutName string
nodes map[string]Node
objects map[string]Object
}
// NewBucket - instantiate a new bucket
func NewBucket(bucketName, aclType, donutName string, nodes map[string]Node) (Bucket, map[string]string, error) {
errParams := map[string]string{
"bucketName": bucketName,
"donutName": donutName,
"aclType": aclType,
}
if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" {
return nil, nil, iodine.New(InvalidArgument{}, errParams)
}
bucketMetadata := make(map[string]string)
bucketMetadata["acl"] = aclType
t := time.Now().UTC()
bucketMetadata["created"] = t.Format(time.RFC3339Nano)
b := bucket{}
b.name = bucketName
b.acl = aclType
b.time = t
b.donutName = donutName
b.objects = make(map[string]Object)
b.nodes = nodes
return b, bucketMetadata, nil
}
// ListObjects - list all objects
func (b bucket) ListObjects() (map[string]Object, error) {
nodeSlice := 0
for _, node := range b.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, iodine.New(err, nil)
}
for _, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, disk.GetOrder())
bucketPath := filepath.Join(b.donutName, bucketSlice)
objects, err := disk.ListDir(bucketPath)
if err != nil {
return nil, iodine.New(err, nil)
}
for _, object := range objects {
newObject, err := NewObject(object.Name(), filepath.Join(disk.GetPath(), bucketPath))
if err != nil {
return nil, iodine.New(err, nil)
}
newObjectMetadata, err := newObject.GetObjectMetadata()
if err != nil {
return nil, iodine.New(err, nil)
}
objectName, ok := newObjectMetadata["object"]
if !ok {
return nil, iodine.New(ObjectCorrupted{Object: object.Name()}, nil)
}
b.objects[objectName] = newObject
}
}
nodeSlice = nodeSlice + 1
}
return b.objects, nil
}
// GetObject - get object
func (b bucket) GetObject(objectName string) (reader io.ReadCloser, size int64, err error) {
reader, writer := io.Pipe()
// get list of objects
objects, err := b.ListObjects()
if err != nil {
return nil, 0, iodine.New(err, nil)
}
// check if object exists
object, ok := objects[objectName]
if !ok {
return nil, 0, iodine.New(os.ErrNotExist, nil)
}
// verify if objectMetadata is readable, before we serve the request
objectMetadata, err := object.GetObjectMetadata()
if err != nil {
return nil, 0, iodine.New(err, nil)
}
if objectName == "" || writer == nil || len(objectMetadata) == 0 {
return nil, 0, iodine.New(InvalidArgument{}, nil)
}
size, err = strconv.ParseInt(objectMetadata["size"], 10, 64)
if err != nil {
return nil, 0, iodine.New(err, nil)
}
// verify if donutObjectMetadata is readable, before we server the request
donutObjectMetadata, err := object.GetDonutObjectMetadata()
if err != nil {
return nil, 0, iodine.New(err, nil)
}
// read and reply back to GetObject() request in a go-routine
go b.readEncodedData(b.normalizeObjectName(objectName), writer, donutObjectMetadata)
return reader, size, nil
}
// PutObject - put a new object
func (b bucket) PutObject(objectName string, objectData io.Reader, expectedMD5Sum string, metadata map[string]string) (string, error) {
if objectName == "" || objectData == nil {
return "", iodine.New(InvalidArgument{}, nil)
}
writers, err := b.getDiskWriters(b.normalizeObjectName(objectName), "data")
if err != nil {
return "", iodine.New(err, nil)
}
summer := md5.New()
objectMetadata := make(map[string]string)
donutObjectMetadata := make(map[string]string)
objectMetadata["version"] = objectMetadataVersion
donutObjectMetadata["version"] = donutObjectMetadataVersion
size := metadata["contentLength"]
sizeInt, err := strconv.ParseInt(size, 10, 64)
if err != nil {
return "", iodine.New(err, nil)
}
// if total writers are only '1' do not compute erasure
switch len(writers) == 1 {
case true:
mw := io.MultiWriter(writers[0], summer)
totalLength, err := io.CopyN(mw, objectData, sizeInt)
if err != nil {
return "", iodine.New(err, nil)
}
donutObjectMetadata["sys.size"] = strconv.FormatInt(totalLength, 10)
objectMetadata["size"] = strconv.FormatInt(totalLength, 10)
case false:
// calculate data and parity dictated by total number of writers
k, m, err := b.getDataAndParity(len(writers))
if err != nil {
return "", iodine.New(err, nil)
}
// encoded data with k, m and write
chunkCount, totalLength, err := b.writeEncodedData(k, m, writers, objectData, summer)
if err != nil {
return "", iodine.New(err, nil)
}
/// donutMetadata section
donutObjectMetadata["sys.blockSize"] = strconv.Itoa(10 * 1024 * 1024)
donutObjectMetadata["sys.chunkCount"] = strconv.Itoa(chunkCount)
donutObjectMetadata["sys.erasureK"] = strconv.FormatUint(uint64(k), 10)
donutObjectMetadata["sys.erasureM"] = strconv.FormatUint(uint64(m), 10)
donutObjectMetadata["sys.erasureTechnique"] = "Cauchy"
donutObjectMetadata["sys.size"] = strconv.Itoa(totalLength)
// keep size inside objectMetadata as well for Object API requests
objectMetadata["size"] = strconv.Itoa(totalLength)
}
objectMetadata["bucket"] = b.name
objectMetadata["object"] = objectName
// store all user provided metadata
for k, v := range metadata {
objectMetadata[k] = v
}
dataMd5sum := summer.Sum(nil)
objectMetadata["created"] = time.Now().UTC().Format(time.RFC3339Nano)
// keeping md5sum for the object in two different places
// one for object storage and another is for internal use
objectMetadata["md5"] = hex.EncodeToString(dataMd5sum)
donutObjectMetadata["sys.md5"] = hex.EncodeToString(dataMd5sum)
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objectMetadata["md5"]); err != nil {
return "", iodine.New(err, nil)
}
}
// write donut specific metadata
if err := b.writeDonutObjectMetadata(b.normalizeObjectName(objectName), donutObjectMetadata); err != nil {
return "", iodine.New(err, nil)
}
// write object specific metadata
if err := b.writeObjectMetadata(b.normalizeObjectName(objectName), objectMetadata); err != nil {
return "", iodine.New(err, nil)
}
// close all writers, when control flow reaches here
for _, writer := range writers {
writer.Close()
}
return objectMetadata["md5"], nil
}

View file

@ -1,155 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"os"
"path/filepath"
"strconv"
"syscall"
"io/ioutil"
"github.com/minio/minio/pkg/iodine"
)
// internal disk struct
type disk struct {
root string
order int
filesystem map[string]string
}
// NewDisk - instantiate new disk
func NewDisk(diskPath string, diskOrder int) (Disk, error) {
if diskPath == "" || diskOrder < 0 {
return nil, iodine.New(InvalidArgument{}, nil)
}
s := syscall.Statfs_t{}
err := syscall.Statfs(diskPath, &s)
if err != nil {
return nil, iodine.New(err, nil)
}
st, err := os.Stat(diskPath)
if err != nil {
return nil, iodine.New(err, nil)
}
if !st.IsDir() {
return nil, iodine.New(syscall.ENOTDIR, nil)
}
d := disk{
root: diskPath,
order: diskOrder,
filesystem: make(map[string]string),
}
if fsType := d.getFSType(s.Type); fsType != "UNKNOWN" {
d.filesystem["FSType"] = fsType
d.filesystem["MountPoint"] = d.root
return d, nil
}
return nil, iodine.New(UnsupportedFilesystem{
Type: strconv.FormatInt(s.Type, 10),
}, map[string]string{"Type": strconv.FormatInt(s.Type, 10)})
}
// GetPath - get root disk path
func (d disk) GetPath() string {
return d.root
}
// GetOrder - get order of disk present in graph
func (d disk) GetOrder() int {
return d.order
}
// GetFSInfo - get disk filesystem and its usage information
func (d disk) GetFSInfo() map[string]string {
s := syscall.Statfs_t{}
err := syscall.Statfs(d.root, &s)
if err != nil {
return nil
}
d.filesystem["Total"] = d.formatBytes(uint64(s.Bsize) * s.Blocks)
d.filesystem["Free"] = d.formatBytes(uint64(s.Bsize) * s.Bfree)
return d.filesystem
}
// MakeDir - make a directory inside disk root path
func (d disk) MakeDir(dirname string) error {
return os.MkdirAll(filepath.Join(d.root, dirname), 0700)
}
// ListDir - list a directory inside disk root path, get only directories
func (d disk) ListDir(dirname string) ([]os.FileInfo, error) {
contents, err := ioutil.ReadDir(filepath.Join(d.root, dirname))
if err != nil {
return nil, iodine.New(err, nil)
}
var directories []os.FileInfo
for _, content := range contents {
// Include only directories, ignore everything else
if content.IsDir() {
directories = append(directories, content)
}
}
return directories, nil
}
// ListFiles - list a directory inside disk root path, get only files
func (d disk) ListFiles(dirname string) ([]os.FileInfo, error) {
contents, err := ioutil.ReadDir(filepath.Join(d.root, dirname))
if err != nil {
return nil, iodine.New(err, nil)
}
var files []os.FileInfo
for _, content := range contents {
// Include only regular files, ignore everything else
if content.Mode().IsRegular() {
files = append(files, content)
}
}
return files, nil
}
// MakeFile - create a file inside disk root path
func (d disk) MakeFile(filename string) (*os.File, error) {
if filename == "" {
return nil, iodine.New(InvalidArgument{}, nil)
}
filePath := filepath.Join(d.root, filename)
// Create directories if they don't exist
if err := os.MkdirAll(filepath.Dir(filePath), 0700); err != nil {
return nil, iodine.New(err, nil)
}
dataFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return nil, iodine.New(err, nil)
}
return dataFile, nil
}
// OpenFile - read a file inside disk root path
func (d disk) OpenFile(filename string) (*os.File, error) {
if filename == "" {
return nil, iodine.New(InvalidArgument{}, nil)
}
dataFile, err := os.Open(filepath.Join(d.root, filename))
if err != nil {
return nil, iodine.New(err, nil)
}
return dataFile, nil
}

View file

@ -1,155 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"os"
"path/filepath"
"strconv"
"syscall"
"io/ioutil"
"github.com/minio/minio/pkg/iodine"
)
// internal disk struct
type disk struct {
root string
order int
filesystem map[string]string
}
// NewDisk - instantiate new disk
func NewDisk(diskPath string, diskOrder int) (Disk, error) {
if diskPath == "" || diskOrder < 0 {
return nil, iodine.New(InvalidArgument{}, nil)
}
s := syscall.Statfs_t{}
err := syscall.Statfs(diskPath, &s)
if err != nil {
return nil, iodine.New(err, nil)
}
st, err := os.Stat(diskPath)
if err != nil {
return nil, iodine.New(err, nil)
}
if !st.IsDir() {
return nil, iodine.New(syscall.ENOTDIR, nil)
}
d := disk{
root: diskPath,
order: diskOrder,
filesystem: make(map[string]string),
}
if fsType := d.getFSType(s.Type); fsType != "UNKNOWN" {
d.filesystem["FSType"] = fsType
d.filesystem["MountPoint"] = d.root
return d, nil
}
return nil, iodine.New(UnsupportedFilesystem{
Type: strconv.FormatInt(s.Type, 10),
}, map[string]string{"Type": strconv.FormatInt(s.Type, 10)})
}
// GetPath - get root disk path
func (d disk) GetPath() string {
return d.root
}
// GetOrder - get order of disk present in graph
func (d disk) GetOrder() int {
return d.order
}
// GetFSInfo - get disk filesystem and its usage information
func (d disk) GetFSInfo() map[string]string {
s := syscall.Statfs_t{}
err := syscall.Statfs(d.root, &s)
if err != nil {
return nil
}
d.filesystem["Total"] = d.formatBytes(s.Bsize * int64(s.Blocks))
d.filesystem["Free"] = d.formatBytes(s.Bsize * int64(s.Bfree))
return d.filesystem
}
// MakeDir - make a directory inside disk root path
func (d disk) MakeDir(dirname string) error {
return os.MkdirAll(filepath.Join(d.root, dirname), 0700)
}
// ListDir - list a directory inside disk root path, get only directories
func (d disk) ListDir(dirname string) ([]os.FileInfo, error) {
contents, err := ioutil.ReadDir(filepath.Join(d.root, dirname))
if err != nil {
return nil, iodine.New(err, nil)
}
var directories []os.FileInfo
for _, content := range contents {
// Include only directories, ignore everything else
if content.IsDir() {
directories = append(directories, content)
}
}
return directories, nil
}
// ListFiles - list a directory inside disk root path, get only files
func (d disk) ListFiles(dirname string) ([]os.FileInfo, error) {
contents, err := ioutil.ReadDir(filepath.Join(d.root, dirname))
if err != nil {
return nil, iodine.New(err, nil)
}
var files []os.FileInfo
for _, content := range contents {
// Include only regular files, ignore everything else
if content.Mode().IsRegular() {
files = append(files, content)
}
}
return files, nil
}
// MakeFile - create a file inside disk root path
func (d disk) MakeFile(filename string) (*os.File, error) {
if filename == "" {
return nil, iodine.New(InvalidArgument{}, nil)
}
filePath := filepath.Join(d.root, filename)
// Create directories if they don't exist
if err := os.MkdirAll(filepath.Dir(filePath), 0700); err != nil {
return nil, iodine.New(err, nil)
}
dataFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return nil, iodine.New(err, nil)
}
return dataFile, nil
}
// OpenFile - read a file inside disk root path
func (d disk) OpenFile(filename string) (*os.File, error) {
if filename == "" {
return nil, iodine.New(InvalidArgument{}, nil)
}
dataFile, err := os.Open(filepath.Join(d.root, filename))
if err != nil {
return nil, iodine.New(err, nil)
}
return dataFile, nil
}

View file

@ -1,69 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"io"
"os"
)
// Encoder interface
type Encoder interface {
GetEncodedBlockLen(dataLength int) (int, error)
Encode(data []byte) (encodedData [][]byte, err error)
Decode(encodedData [][]byte, dataLength int) (data []byte, err error)
}
// Bucket interface
type Bucket interface {
ListObjects() (map[string]Object, error)
GetObject(object string) (io.ReadCloser, int64, error)
PutObject(object string, contents io.Reader, expectedMD5Sum string, metadata map[string]string) (string, error)
}
// Object interface
type Object interface {
GetObjectMetadata() (map[string]string, error)
GetDonutObjectMetadata() (map[string]string, error)
}
// Node interface
type Node interface {
ListDisks() (map[string]Disk, error)
AttachDisk(disk Disk) error
DetachDisk(disk Disk) error
GetNodeName() string
SaveConfig() error
LoadConfig() error
}
// Disk interface
type Disk interface {
MakeDir(dirname string) error
ListDir(dirname string) ([]os.FileInfo, error)
ListFiles(dirname string) ([]os.FileInfo, error)
MakeFile(path string) (*os.File, error)
OpenFile(path string) (*os.File, error)
GetPath() string
GetOrder() int
GetFSInfo() map[string]string
}

View file

@ -42,8 +42,8 @@ func getErasureTechnique(technique string) (encoding.Technique, error) {
}
}
// NewEncoder - instantiate a new encoder
func NewEncoder(k, m uint8, technique string) (Encoder, error) {
// newEncoder - instantiate a new encoder
func newEncoder(k, m uint8, technique string) (encoder, error) {
errParams := map[string]string{
"k": strconv.FormatUint(uint64(k), 10),
"m": strconv.FormatUint(uint64(m), 10),
@ -52,11 +52,11 @@ func NewEncoder(k, m uint8, technique string) (Encoder, error) {
e := encoder{}
t, err := getErasureTechnique(technique)
if err != nil {
return nil, iodine.New(err, errParams)
return encoder{}, iodine.New(err, errParams)
}
params, err := encoding.ValidateParams(k, m, t)
if err != nil {
return nil, iodine.New(err, errParams)
return encoder{}, iodine.New(err, errParams)
}
e.encoder = encoding.NewErasure(params)
e.k = k

View file

@ -16,7 +16,11 @@
package donut
import "io"
import (
"io"
"github.com/minio/minio/pkg/storage/donut/disk"
)
// Collection of Donut specification interfaces
@ -28,16 +32,16 @@ type Donut interface {
// ObjectStorage is a donut object storage interface
type ObjectStorage interface {
// Storage service Operations
// Storage service operations
GetBucketMetadata(bucket string) (map[string]string, error)
SetBucketMetadata(bucket string, metadata map[string]string) error
ListBuckets() (map[string]map[string]string, error)
MakeBucket(bucket, acl string) error
// Bucket Operations
ListObjects(bucket, prefix, marker, delim string, maxKeys int) (result []string, prefixes []string, isTruncated bool, err error)
// Bucket operations
ListObjects(bucket, prefix, marker, delim string, maxKeys int) (objects []string, prefixes []string, isTruncated bool, err error)
// Object Operations
// Object operations
GetObject(bucket, object string) (io.ReadCloser, int64, error)
GetObjectMetadata(bucket, object string) (map[string]string, error)
PutObject(bucket, object, expectedMD5Sum string, reader io.ReadCloser, metadata map[string]string) (string, error)
@ -55,3 +59,14 @@ type Management interface {
SaveConfig() error
LoadConfig() error
}
// Node interface for node management
type Node interface {
ListDisks() (map[int]disk.Disk, error)
AttachDisk(disk disk.Disk, diskOrder int) error
DetachDisk(diskOrder int) error
GetNodeName() string
SaveConfig() error
LoadConfig() error
}

View file

@ -21,8 +21,8 @@ func (d donut) Info() (nodeDiskMap map[string][]string, err error) {
return nil, iodine.New(err, nil)
}
diskList := make([]string, len(disks))
for diskName, disk := range disks {
diskList[disk.GetOrder()] = diskName
for diskOrder, disk := range disks {
diskList[diskOrder] = disk.GetPath()
}
nodeDiskMap[nodeName] = diskList
}
@ -52,14 +52,14 @@ func (d donut) SaveConfig() error {
if err != nil {
return iodine.New(err, nil)
}
for _, disk := range disks {
for order, disk := range disks {
donutConfigPath := filepath.Join(d.name, donutConfig)
donutConfigWriter, err := disk.MakeFile(donutConfigPath)
donutConfigWriter, err := disk.CreateFile(donutConfigPath)
defer donutConfigWriter.Close()
if err != nil {
return iodine.New(err, nil)
}
nodeDiskMap[hostname][disk.GetOrder()] = disk.GetPath()
nodeDiskMap[hostname][order] = disk.GetPath()
jenc := json.NewEncoder(donutConfigWriter)
if err := jenc.Encode(nodeDiskMap); err != nil {
return iodine.New(err, nil)

View file

@ -16,12 +16,15 @@
package donut
import "github.com/minio/minio/pkg/iodine"
import (
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/donut/disk"
)
// node struct internal
type node struct {
hostname string
disks map[string]Disk
disks map[int]disk.Disk
}
// NewNode - instantiates a new node
@ -29,7 +32,7 @@ func NewNode(hostname string) (Node, error) {
if hostname == "" {
return nil, iodine.New(InvalidArgument{}, nil)
}
disks := make(map[string]Disk)
disks := make(map[int]disk.Disk)
n := node{
hostname: hostname,
disks: disks,
@ -43,22 +46,22 @@ func (n node) GetNodeName() string {
}
// ListDisks - return number of disks
func (n node) ListDisks() (map[string]Disk, error) {
func (n node) ListDisks() (map[int]disk.Disk, error) {
return n.disks, nil
}
// AttachDisk - attach a disk
func (n node) AttachDisk(disk Disk) error {
if disk == nil {
func (n node) AttachDisk(disk disk.Disk, diskOrder int) error {
if diskOrder < 0 {
return iodine.New(InvalidArgument{}, nil)
}
n.disks[disk.GetPath()] = disk
n.disks[diskOrder] = disk
return nil
}
// DetachDisk - detach a disk
func (n node) DetachDisk(disk Disk) error {
delete(n.disks, disk.GetPath())
func (n node) DetachDisk(diskOrder int) error {
delete(n.disks, diskOrder)
return nil
}

View file

@ -32,10 +32,10 @@ type object struct {
donutObjectMetadata map[string]string
}
// NewObject - instantiate a new object
func NewObject(objectName, p string) (Object, error) {
// newObject - instantiate a new object
func newObject(objectName, p string) (object, error) {
if objectName == "" {
return nil, iodine.New(InvalidArgument{}, nil)
return object{}, iodine.New(InvalidArgument{}, nil)
}
o := object{}
o.name = objectName
@ -47,7 +47,7 @@ func (o object) GetObjectMetadata() (map[string]string, error) {
objectMetadata := make(map[string]string)
objectMetadataBytes, err := ioutil.ReadFile(filepath.Join(o.objectPath, objectMetadataConfig))
if err != nil {
return nil, iodine.New(err, nil)
return nil, iodine.New(ObjectNotFound{Object: o.name}, nil)
}
if err := json.Unmarshal(objectMetadataBytes, &objectMetadata); err != nil {
return nil, iodine.New(err, nil)
@ -60,7 +60,7 @@ func (o object) GetDonutObjectMetadata() (map[string]string, error) {
donutObjectMetadata := make(map[string]string)
donutObjectMetadataBytes, err := ioutil.ReadFile(filepath.Join(o.objectPath, donutObjectMetadataConfig))
if err != nil {
return nil, iodine.New(err, nil)
return nil, iodine.New(ObjectNotFound{Object: o.name}, nil)
}
if err := json.Unmarshal(donutObjectMetadataBytes, &donutObjectMetadata); err != nil {
return nil, iodine.New(err, nil)

View file

@ -17,7 +17,11 @@
package donut
import (
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
@ -185,7 +189,7 @@ func (d donut) PutObject(bucket, object, expectedMD5Sum string, reader io.ReadCl
return "", iodine.New(ObjectExists{Object: object}, nil)
}
}
md5sum, err := d.buckets[bucket].PutObject(object, reader, expectedMD5Sum, metadata)
md5sum, err := d.buckets[bucket].WriteObject(object, reader, expectedMD5Sum, metadata)
if err != nil {
return "", iodine.New(err, errParams)
}
@ -211,16 +215,7 @@ func (d donut) GetObject(bucket, object string) (reader io.ReadCloser, size int6
if _, ok := d.buckets[bucket]; !ok {
return nil, 0, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
}
objectList, err := d.buckets[bucket].ListObjects()
if err != nil {
return nil, 0, iodine.New(err, nil)
}
for objectName := range objectList {
if objectName == object {
return d.buckets[bucket].GetObject(object)
}
}
return nil, 0, iodine.New(ObjectNotFound{Object: object}, nil)
return d.buckets[bucket].ReadObject(object)
}
// GetObjectMetadata - get object metadata
@ -246,3 +241,157 @@ func (d donut) GetObjectMetadata(bucket, object string) (map[string]string, erro
}
return donutObject.GetObjectMetadata()
}
// getDiskWriters -
func (d donut) getBucketMetadataWriters() ([]io.WriteCloser, error) {
var writers []io.WriteCloser
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, iodine.New(err, nil)
}
writers = make([]io.WriteCloser, len(disks))
for order, disk := range disks {
bucketMetaDataWriter, err := disk.CreateFile(filepath.Join(d.name, bucketMetadataConfig))
if err != nil {
return nil, iodine.New(err, nil)
}
writers[order] = bucketMetaDataWriter
}
}
return writers, nil
}
func (d donut) getBucketMetadataReaders() ([]io.ReadCloser, error) {
var readers []io.ReadCloser
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, iodine.New(err, nil)
}
readers = make([]io.ReadCloser, len(disks))
for order, disk := range disks {
bucketMetaDataReader, err := disk.OpenFile(filepath.Join(d.name, bucketMetadataConfig))
if err != nil {
return nil, iodine.New(err, nil)
}
readers[order] = bucketMetaDataReader
}
}
return readers, nil
}
//
func (d donut) setDonutBucketMetadata(metadata map[string]map[string]string) error {
writers, err := d.getBucketMetadataWriters()
if err != nil {
return iodine.New(err, nil)
}
for _, writer := range writers {
defer writer.Close()
}
for _, writer := range writers {
jenc := json.NewEncoder(writer)
if err := jenc.Encode(metadata); err != nil {
return iodine.New(err, nil)
}
}
return nil
}
func (d donut) getDonutBucketMetadata() (map[string]map[string]string, error) {
metadata := make(map[string]map[string]string)
readers, err := d.getBucketMetadataReaders()
if err != nil {
return nil, iodine.New(err, nil)
}
for _, reader := range readers {
defer reader.Close()
}
for _, reader := range readers {
jenc := json.NewDecoder(reader)
if err := jenc.Decode(&metadata); err != nil {
return nil, iodine.New(err, nil)
}
}
return metadata, nil
}
func (d donut) makeDonutBucket(bucketName, acl string) error {
err := d.getDonutBuckets()
if err != nil {
return iodine.New(err, nil)
}
if _, ok := d.buckets[bucketName]; ok {
return iodine.New(BucketExists{Bucket: bucketName}, nil)
}
bucket, bucketMetadata, err := newBucket(bucketName, acl, d.name, d.nodes)
if err != nil {
return iodine.New(err, nil)
}
nodeNumber := 0
d.buckets[bucketName] = bucket
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return iodine.New(err, nil)
}
for order, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order)
err := disk.MakeDir(filepath.Join(d.name, bucketSlice))
if err != nil {
return iodine.New(err, nil)
}
}
nodeNumber = nodeNumber + 1
}
metadata, err := d.getDonutBucketMetadata()
if err != nil {
err = iodine.ToError(err)
if os.IsNotExist(err) {
metadata := make(map[string]map[string]string)
metadata[bucketName] = bucketMetadata
err = d.setDonutBucketMetadata(metadata)
if err != nil {
return iodine.New(err, nil)
}
return nil
}
return iodine.New(err, nil)
}
metadata[bucketName] = bucketMetadata
err = d.setDonutBucketMetadata(metadata)
if err != nil {
return iodine.New(err, nil)
}
return nil
}
func (d donut) getDonutBuckets() error {
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return iodine.New(err, nil)
}
for _, disk := range disks {
dirs, err := disk.ListDir(d.name)
if err != nil {
return iodine.New(err, nil)
}
for _, dir := range dirs {
splitDir := strings.Split(dir.Name(), "$")
if len(splitDir) < 3 {
return iodine.New(CorruptedBackend{Backend: dir.Name()}, nil)
}
bucketName := splitDir[0]
// we dont need this NewBucket once we cache from makeDonutBucket()
bucket, _, err := newBucket(bucketName, "private", d.name, d.nodes)
if err != nil {
return iodine.New(err, nil)
}
d.buckets[bucketName] = bucket
}
}
}
return nil
}

View file

@ -1,184 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/minio/minio/pkg/iodine"
)
/// This file contains all the internal functions used by Object interface
// getDiskWriters -
func (d donut) getBucketMetadataWriters() ([]io.WriteCloser, error) {
var writers []io.WriteCloser
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, iodine.New(err, nil)
}
writers = make([]io.WriteCloser, len(disks))
for _, disk := range disks {
bucketMetaDataWriter, err := disk.MakeFile(filepath.Join(d.name, bucketMetadataConfig))
if err != nil {
return nil, iodine.New(err, nil)
}
writers[disk.GetOrder()] = bucketMetaDataWriter
}
}
return writers, nil
}
func (d donut) getBucketMetadataReaders() ([]io.ReadCloser, error) {
var readers []io.ReadCloser
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, iodine.New(err, nil)
}
readers = make([]io.ReadCloser, len(disks))
for _, disk := range disks {
bucketMetaDataReader, err := disk.OpenFile(filepath.Join(d.name, bucketMetadataConfig))
if err != nil {
return nil, iodine.New(err, nil)
}
readers[disk.GetOrder()] = bucketMetaDataReader
}
}
return readers, nil
}
//
func (d donut) setDonutBucketMetadata(metadata map[string]map[string]string) error {
writers, err := d.getBucketMetadataWriters()
if err != nil {
return iodine.New(err, nil)
}
for _, writer := range writers {
defer writer.Close()
}
for _, writer := range writers {
jenc := json.NewEncoder(writer)
if err := jenc.Encode(metadata); err != nil {
return iodine.New(err, nil)
}
}
return nil
}
func (d donut) getDonutBucketMetadata() (map[string]map[string]string, error) {
metadata := make(map[string]map[string]string)
readers, err := d.getBucketMetadataReaders()
if err != nil {
return nil, iodine.New(err, nil)
}
for _, reader := range readers {
defer reader.Close()
}
for _, reader := range readers {
jenc := json.NewDecoder(reader)
if err := jenc.Decode(&metadata); err != nil {
return nil, iodine.New(err, nil)
}
}
return metadata, nil
}
func (d donut) makeDonutBucket(bucketName, acl string) error {
err := d.getDonutBuckets()
if err != nil {
return iodine.New(err, nil)
}
if _, ok := d.buckets[bucketName]; ok {
return iodine.New(BucketExists{Bucket: bucketName}, nil)
}
bucket, bucketMetadata, err := NewBucket(bucketName, acl, d.name, d.nodes)
if err != nil {
return iodine.New(err, nil)
}
nodeNumber := 0
d.buckets[bucketName] = bucket
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return iodine.New(err, nil)
}
for _, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, disk.GetOrder())
err := disk.MakeDir(filepath.Join(d.name, bucketSlice))
if err != nil {
return iodine.New(err, nil)
}
}
nodeNumber = nodeNumber + 1
}
metadata, err := d.getDonutBucketMetadata()
if err != nil {
err = iodine.ToError(err)
if os.IsNotExist(err) {
metadata := make(map[string]map[string]string)
metadata[bucketName] = bucketMetadata
err = d.setDonutBucketMetadata(metadata)
if err != nil {
return iodine.New(err, nil)
}
return nil
}
return iodine.New(err, nil)
}
metadata[bucketName] = bucketMetadata
err = d.setDonutBucketMetadata(metadata)
if err != nil {
return iodine.New(err, nil)
}
return nil
}
func (d donut) getDonutBuckets() error {
for _, node := range d.nodes {
disks, err := node.ListDisks()
if err != nil {
return iodine.New(err, nil)
}
for _, disk := range disks {
dirs, err := disk.ListDir(d.name)
if err != nil {
return iodine.New(err, nil)
}
for _, dir := range dirs {
splitDir := strings.Split(dir.Name(), "$")
if len(splitDir) < 3 {
return iodine.New(CorruptedBackend{Backend: dir.Name()}, nil)
}
bucketName := splitDir[0]
// we dont need this NewBucket once we cache from makeDonutBucket()
bucket, _, err := NewBucket(bucketName, "private", d.name, d.nodes)
if err != nil {
return iodine.New(err, nil)
}
d.buckets[bucketName] = bucket
}
}
}
return nil
}

View file

@ -22,12 +22,13 @@ import (
"strings"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/donut/disk"
)
// Rebalance -
func (d donut) Rebalance() error {
var totalOffSetLength int
var newDisks []Disk
var newDisks []disk.Disk
var existingDirs []os.FileInfo
for _, node := range d.nodes {
disks, err := node.ListDisks()