From befac7d047736d29991586b6df1c64ce76439756 Mon Sep 17 00:00:00 2001 From: "Frederick F. Kautz IV" Date: Sun, 22 Mar 2015 19:55:15 -0700 Subject: [PATCH] Exorcising donutbox --- pkg/donutbox/donutbox.go | 89 ----- pkg/donutbox/donutfs/donutfs.go | 1 - pkg/donutbox/donutfs/donutfs_test.go | 1 - pkg/donutbox/donutmem/donutmem.go | 246 -------------- pkg/donutbox/donutmem/donutmem_test.go | 151 --------- pkg/storage/donutstorage/donutstorage.go | 320 +----------------- pkg/storage/donutstorage/donutstorage_test.go | 4 +- pkg/storage/file/file_test.go | 2 + 8 files changed, 14 insertions(+), 800 deletions(-) delete mode 100644 pkg/donutbox/donutbox.go delete mode 100644 pkg/donutbox/donutfs/donutfs.go delete mode 100644 pkg/donutbox/donutfs/donutfs_test.go delete mode 100644 pkg/donutbox/donutmem/donutmem.go delete mode 100644 pkg/donutbox/donutmem/donutmem_test.go diff --git a/pkg/donutbox/donutbox.go b/pkg/donutbox/donutbox.go deleted file mode 100644 index 424a3d54f..000000000 --- a/pkg/donutbox/donutbox.go +++ /dev/null @@ -1,89 +0,0 @@ -package donutbox - -import "io" - -// DonutBox is an interface specifying how the storage driver should interact with its underlying system. -type DonutBox interface { - // system operations - ListBuckets() ([]string, error) - - // bucket operations - CreateBucket(bucket string) error - ListObjectsInBucket(bucket, prefix string) ([]string, error) - GetBucketMetadata(bucket string) (map[string]string, error) - SetBucketMetadata(bucket string, metadata map[string]string) error - - // object operations - GetObjectWriter(bucket, object string, column uint) (*NewObject, error) - GetObjectReader(bucket, object string, column uint) (io.Reader, error) - GetObjectMetadata(bucket, object string, column uint) (map[string]string, error) -} - -// Bucket contains major operations on a bucket -type Bucket interface { - AddDisk(Disk) error - RemoveDisk(Disk) - GetDisk(i uint) (Disk, error) - GetDisks() ([]Disk, error) - GetMetadata() (map[string]string, error) - ListObjects(prefix string) ([]string, error) - SetMetadata(metadata map[string]string) error -} - -// Disk represents major operations on a bucket's disk -type Disk interface { - GetObjectMetadata(object string) (map[string]string, error) - GetObjectReader(object string) (io.Reader, error) - GetObjectWriter(object string) (*NewObject, error) -} - -// Result is a result for async tasks -type Result struct { - Err error -} - -// CreateNewObject creates a new object wrapping a writer. Clients are not expected to use this directly. This is exposed for storage drivers. -func CreateNewObject(writer *io.PipeWriter) *NewObject { - return &NewObject{writer: writer} -} - -// NewObject wraps a writer and allows setting metadata. On a successful close, the object is committed by the backend. -type NewObject struct { - metadata map[string]string - writer *io.PipeWriter -} - -// Write data -func (newObject *NewObject) Write(data []byte) (int, error) { - return newObject.writer.Write(data) -} - -// SetMetadata sets metadata for an object -func (newObject *NewObject) SetMetadata(metadata map[string]string) { - newMetadata := make(map[string]string) - for k, v := range metadata { - newMetadata[k] = v - } - newObject.metadata = newMetadata -} - -// Close and commit the object -func (newObject *NewObject) Close() error { - return newObject.writer.Close() -} - -// CloseWithError closes the object with an error, causing the backend to abandon the object -func (newObject *NewObject) CloseWithError(err error) error { - return newObject.writer.CloseWithError(err) -} - -// GetMetadata returns a copy of the metadata set metadata -func (newObject *NewObject) GetMetadata() map[string]string { - newMetadata := make(map[string]string) - if newMetadata != nil { - for k, v := range newObject.metadata { - newMetadata[k] = v - } - } - return newMetadata -} diff --git a/pkg/donutbox/donutfs/donutfs.go b/pkg/donutbox/donutfs/donutfs.go deleted file mode 100644 index 935d80ab7..000000000 --- a/pkg/donutbox/donutfs/donutfs.go +++ /dev/null @@ -1 +0,0 @@ -package donutfs diff --git a/pkg/donutbox/donutfs/donutfs_test.go b/pkg/donutbox/donutfs/donutfs_test.go deleted file mode 100644 index 935d80ab7..000000000 --- a/pkg/donutbox/donutfs/donutfs_test.go +++ /dev/null @@ -1 +0,0 @@ -package donutfs diff --git a/pkg/donutbox/donutmem/donutmem.go b/pkg/donutbox/donutmem/donutmem.go deleted file mode 100644 index 0fe4799ad..000000000 --- a/pkg/donutbox/donutmem/donutmem.go +++ /dev/null @@ -1,246 +0,0 @@ -package donutmem - -import ( - "bytes" - "errors" - "github.com/minio-io/minio/pkg/donutbox" - "io" - "strconv" - "strings" - "sync" - "time" -) - -type bucket struct { - name string - metadata map[string]string - objects map[string]*object - lock *sync.RWMutex -} - -type object struct { - name string - data []byte - metadata map[string]string - lock *sync.RWMutex -} - -type donutMem struct { - buckets map[string]*bucket - lock *sync.RWMutex -} - -// NewDonutMem creates a new in memory donut -func NewDonutMem() donutbox.DonutBox { - return donutMem{ - buckets: make(map[string]*bucket), - lock: new(sync.RWMutex), - } -} - -// system operations -func (donutMem donutMem) ListBuckets() ([]string, error) { - donutMem.lock.RLock() - defer donutMem.lock.RUnlock() - var buckets []string - for k := range donutMem.buckets { - buckets = append(buckets, k) - } - return buckets, nil -} - -// bucket operations -func (donutMem donutMem) CreateBucket(b string) error { - donutMem.lock.Lock() - defer donutMem.lock.Unlock() - b = strings.ToLower(b) - if _, ok := donutMem.buckets[b]; ok { - return errors.New("Bucket Exists") - } - metadata := make(map[string]string) - metadata["name"] = b - metadata["created"] = time.Now().Format(time.RFC3339Nano) - newBucket := bucket{ - name: b, - metadata: metadata, - objects: make(map[string]*object), - lock: new(sync.RWMutex), - } - donutMem.buckets[b] = &newBucket - return nil -} - -func (donutMem donutMem) ListObjectsInBucket(bucketKey, prefixKey string) ([]string, error) { - donutMem.lock.RLock() - defer donutMem.lock.RUnlock() - if curBucket, ok := donutMem.buckets[bucketKey]; ok { - curBucket.lock.RLock() - defer curBucket.lock.RUnlock() - objectMap := make(map[string]string) - for objectKey := range curBucket.objects { - objectName := strings.Split(objectKey, "#")[0] - if strings.HasPrefix(objectName, prefixKey) { - objectMap[objectName] = objectName - } - } - var objects []string - for k := range objectMap { - objects = append(objects, k) - } - return objects, nil - } - return nil, errors.New("Bucket does not exist") -} - -func (donutMem donutMem) GetBucketMetadata(bucketKey string) (map[string]string, error) { - donutMem.lock.RLock() - defer donutMem.lock.RUnlock() - - if curBucket, ok := donutMem.buckets[bucketKey]; ok { - curBucket.lock.RLock() - defer curBucket.lock.RUnlock() - result := make(map[string]string) - for k, v := range curBucket.metadata { - result[k] = v - } - return result, nil - } - return nil, errors.New("Bucket not found") -} - -func (donutMem donutMem) SetBucketMetadata(bucketKey string, metadata map[string]string) error { - donutMem.lock.RLock() - defer donutMem.lock.RUnlock() - if curBucket, ok := donutMem.buckets[bucketKey]; ok { - curBucket.lock.Lock() - defer curBucket.lock.Unlock() - newMetadata := make(map[string]string) - for k, v := range metadata { - newMetadata[k] = v - } - curBucket.metadata = newMetadata - return nil - } - return errors.New("Bucket not found") -} - -// object operations -func (donutMem donutMem) GetObjectWriter(bucketKey, objectKey string, column uint) (*donutbox.NewObject, error) { - key := getKey(bucketKey, objectKey, column) - reader, writer := io.Pipe() - returnObject := donutbox.CreateNewObject(writer) - donutMem.lock.RLock() - defer donutMem.lock.RUnlock() - if curBucket, ok := donutMem.buckets[bucketKey]; ok { - curBucket.lock.Lock() - defer curBucket.lock.Unlock() - if _, ok := curBucket.objects[key]; !ok { - newObject := object{ - name: key, - data: make([]byte, 0), - lock: new(sync.RWMutex), - } - - newObject.lock.Lock() - curBucket.objects[key] = &newObject - go func() { - defer newObject.lock.Unlock() - var objBuffer bytes.Buffer - - _, err := io.Copy(&objBuffer, reader) - if err == nil { - newObject.data = objBuffer.Bytes() - writer.Close() - - metadata := returnObject.GetMetadata() - for k, v := range metadata { - metadata[k] = v - } - metadata["key"] = objectKey - metadata["column"] = strconv.FormatUint(uint64(column), 10) - newObject.metadata = metadata - - return - } - - donutMem.lock.RLock() - defer donutMem.lock.RUnlock() - bucket, _ := donutMem.buckets[bucketKey] - bucket.lock.Lock() - defer bucket.lock.Unlock() - delete(bucket.objects, key) - writer.CloseWithError(err) - }() - return returnObject, nil - } - writer.CloseWithError(errors.New("Object exists")) - return nil, errors.New("Object exists") - } - writer.CloseWithError(errors.New("Bucket does not exist")) - return nil, errors.New("Bucket does not exist") -} - -func (donutMem donutMem) GetObjectReader(bucketKey, objectKey string, column uint) (io.Reader, error) { - key := getKey(bucketKey, objectKey, column) - donutMem.lock.RLock() - defer donutMem.lock.RUnlock() - if curBucket, ok := donutMem.buckets[bucketKey]; ok { - curBucket.lock.RLock() - defer curBucket.lock.RUnlock() - if curObject, ok := curBucket.objects[key]; ok { - curObject.lock.RLock() - defer curObject.lock.RUnlock() - return bytes.NewBuffer(curObject.data), nil - } - return nil, errors.New("Object not found") - } - return nil, errors.New("Bucket not found") -} - -//func (donutMem donutMem) SetObjectMetadata(bucketKey, objectKey string, column uint, metadata map[string]string) error { -// key := getKey(bucketKey, objectKey, column) -// donutMem.lock.RLock() -// defer donutMem.lock.RUnlock() -// if curBucket, ok := donutMem.buckets[bucketKey]; ok { -// curBucket.lock.RLock() -// defer curBucket.lock.RUnlock() -// if curObject, ok := curBucket.objects[key]; ok { -// curObject.lock.Lock() -// defer curObject.lock.Unlock() -// newMetadata := make(map[string]string) -// for k, v := range metadata { -// newMetadata[k] = v -// } -// curObject.metadata = newMetadata -// return nil -// } -// return errors.New("Object not found") -// } -// return errors.New("Bucket not found") -//} - -func (donutMem donutMem) GetObjectMetadata(bucketKey, objectKey string, column uint) (map[string]string, error) { - key := getKey(bucketKey, objectKey, column) - donutMem.lock.RLock() - defer donutMem.lock.RUnlock() - - if curBucket, ok := donutMem.buckets[bucketKey]; ok { - curBucket.lock.RLock() - defer curBucket.lock.RUnlock() - if curObject, ok := curBucket.objects[key]; ok { - curObject.lock.RLock() - defer curObject.lock.RUnlock() - result := make(map[string]string) - for k, v := range curObject.metadata { - result[k] = v - } - return result, nil - } - return nil, errors.New("Object not Found: " + bucketKey + "#" + objectKey) - } - return nil, errors.New("Bucket not found") -} - -func getKey(bucketKey, objectKey string, column uint) string { - return objectKey + "#" + strconv.FormatUint(uint64(column), 10) -} diff --git a/pkg/donutbox/donutmem/donutmem_test.go b/pkg/donutbox/donutmem/donutmem_test.go deleted file mode 100644 index a0c66853d..000000000 --- a/pkg/donutbox/donutmem/donutmem_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package donutmem - -import ( - "testing" - - . "gopkg.in/check.v1" - "io/ioutil" - "sort" - "strconv" -) - -func Test(t *testing.T) { TestingT(t) } - -type MySuite struct{} - -var _ = Suite(&MySuite{}) - -func (s *MySuite) TestCreateAndReadObject(c *C) { - data := "Hello World" - donut := NewDonutMem() - - writer, err := donut.GetObjectWriter("foo", "bar", 0) - c.Assert(writer, IsNil) - c.Assert(err, Not(IsNil)) - - err = donut.CreateBucket("foo") - c.Assert(err, IsNil) - - writer, err = donut.GetObjectWriter("foo", "bar", 0) - c.Assert(err, IsNil) - count, err := writer.Write([]byte(data)) - c.Assert(count, Equals, len(data)) - c.Assert(err, IsNil) - err = writer.Close() - c.Assert(err, IsNil) - - // data should be available - reader, err := donut.GetObjectReader("foo", "bar", 0) - c.Assert(err, IsNil) - result, err := ioutil.ReadAll(reader) - c.Assert(result, DeepEquals, []byte(data)) - - // try writing, should see error - writer, err = donut.GetObjectWriter("foo", "bar", 0) - c.Assert(writer, IsNil) - c.Assert(err, Not(IsNil)) - - // data should not change - reader, err = donut.GetObjectReader("foo", "bar", 0) - c.Assert(err, IsNil) - result, err = ioutil.ReadAll(reader) - c.Assert(result, DeepEquals, []byte(data)) -} - -func (s *MySuite) TestBucketList(c *C) { - donut := NewDonutMem() - - results, err := donut.ListBuckets() - c.Assert(len(results), Equals, 0) - - var buckets []string - for i := 0; i < 10; i++ { - bucket := "foo" + strconv.Itoa(i) - buckets = append(buckets, bucket) - err := donut.CreateBucket(bucket) - c.Assert(err, IsNil) - } - sort.Strings(buckets) - results, err = donut.ListBuckets() - c.Assert(err, IsNil) - sort.Strings(results) - c.Assert(results, DeepEquals, buckets) -} - -func (s *MySuite) TestObjectList(c *C) { - donut := NewDonutMem() - donut.CreateBucket("foo") - - results, err := donut.ListObjectsInBucket("foo", "") - c.Assert(len(results), Equals, 0) - - var objects []string - for i := 0; i < 10; i++ { - object := "foo" + strconv.Itoa(i) - objects = append(objects, object) - writer, err := donut.GetObjectWriter("foo", object, 0) - c.Assert(err, IsNil) - writer.Write([]byte(object)) - writer.Close() - c.Assert(err, IsNil) - } - sort.Strings(objects) - results, err = donut.ListObjectsInBucket("foo", "") - c.Assert(err, IsNil) - c.Assert(len(results), Equals, 10) - sort.Strings(results) - c.Assert(results, DeepEquals, objects) -} - -func (s *MySuite) TestBucketMetadata(c *C) { - donut := NewDonutMem() - donut.CreateBucket("foo") - - metadata := make(map[string]string) - - metadata["hello"] = "world" - metadata["foo"] = "bar" - - err := donut.SetBucketMetadata("foo", metadata) - c.Assert(err, IsNil) - - result, err := donut.GetBucketMetadata("foo") - c.Assert(result, DeepEquals, metadata) -} - -func (s *MySuite) TestObjectMetadata(c *C) { - donut := NewDonutMem() - donut.CreateBucket("foo") - - metadata := make(map[string]string) - - metadata["hello"] = "world" - metadata["foo"] = "bar" - - result, err := donut.GetObjectMetadata("foo", "bar", 1) - c.Assert(result, IsNil) - c.Assert(err, Not(IsNil)) - - writer, err := donut.GetObjectWriter("foo", "bar", 1) - c.Assert(err, IsNil) - _, err = writer.Write([]byte("Hello World")) - c.Assert(err, IsNil) - writer.SetMetadata(metadata) - err = writer.Close() - c.Assert(err, IsNil) - - expectedMetadata := make(map[string]string) - for k, v := range metadata { - expectedMetadata[k] = v - } - expectedMetadata["key"] = "bar" - expectedMetadata["column"] = "1" - - result, err = donut.GetObjectMetadata("foo", "bar", 1) - c.Assert(err, IsNil) - c.Assert(result, DeepEquals, expectedMetadata) - - result, err = donut.GetObjectMetadata("foo", "bar", 0) - c.Assert(err, Not(IsNil)) - c.Assert(result, IsNil) -} diff --git a/pkg/storage/donutstorage/donutstorage.go b/pkg/storage/donutstorage/donutstorage.go index 57a34cf45..fcc9a0906 100644 --- a/pkg/storage/donutstorage/donutstorage.go +++ b/pkg/storage/donutstorage/donutstorage.go @@ -17,38 +17,23 @@ package donutstorage import ( - "bytes" "errors" - "io" - "sort" - "strconv" - "strings" - "time" - - "crypto/md5" - "encoding/hex" - - "github.com/minio-io/minio/pkg/donutbox" - "github.com/minio-io/minio/pkg/encoding/erasure" "github.com/minio-io/minio/pkg/storage" - "github.com/minio-io/minio/pkg/utils/split" + "io" ) // DonutDriver creates a new single disk storage driver using donut without encoding. -type DonutDriver struct { - donutBox donutbox.DonutBox -} +type DonutDriver struct{} const ( blockSize = 10 * 1024 * 1024 ) // Start a single disk subsystem -func Start(donutBox donutbox.DonutBox) (chan<- string, <-chan error, storage.Storage) { +func Start() (chan<- string, <-chan error, storage.Storage) { ctrlChannel := make(chan string) errorChannel := make(chan error) s := new(DonutDriver) - s.donutBox = donutBox go start(ctrlChannel, errorChannel, s) return ctrlChannel, errorChannel, s } @@ -59,60 +44,17 @@ func start(ctrlChannel <-chan string, errorChannel chan<- error, s *DonutDriver) // ListBuckets returns a list of buckets func (donutStorage DonutDriver) ListBuckets() (results []storage.BucketMetadata, err error) { - buckets, err := donutStorage.donutBox.ListBuckets() - if err != nil { - return nil, err - } - sort.Strings(buckets) - for _, bucket := range buckets { - metadata, err := donutStorage.donutBox.GetBucketMetadata(bucket) - if err != nil { - return nil, err - } - - created, err := time.Parse(time.RFC3339Nano, metadata["created"]) - if err != nil { - return nil, err - } - bucketMetadata := storage.BucketMetadata{ - Name: bucket, - Created: created, - } - results = append(results, bucketMetadata) - } - return results, err + return nil, errors.New("Not Implemented") } // CreateBucket creates a new bucket func (donutStorage DonutDriver) CreateBucket(bucket string) error { - err := donutStorage.donutBox.CreateBucket(bucket) - if err != nil { - return err - } - metadataBucket := storage.BucketMetadata{ - Name: bucket, - Created: time.Now(), - } - metadata := createBucketMetadata(metadataBucket) - err = donutStorage.donutBox.SetBucketMetadata(bucket, metadata) - if err != nil { - return err - } - return nil + return errors.New("Not Implemented") } // GetBucketMetadata retrieves an bucket's metadata func (donutStorage DonutDriver) GetBucketMetadata(bucket string) (storage.BucketMetadata, error) { - metadata, err := donutStorage.donutBox.GetBucketMetadata(bucket) - if err != nil { - return storage.BucketMetadata{}, err - } - created, err := time.Parse(time.RFC3339Nano, metadata["created"]) - bucketMetadata := storage.BucketMetadata{ - Name: bucket, - Created: created, - } - return bucketMetadata, nil + return storage.BucketMetadata{}, errors.New("Not Implemented") } // CreateBucketPolicy sets a bucket's access policy @@ -127,72 +69,7 @@ func (donutStorage DonutDriver) GetBucketPolicy(bucket string) (storage.BucketPo // GetObject retrieves an object and writes it to a writer func (donutStorage DonutDriver) GetObject(target io.Writer, bucket, key string) (int64, error) { - metadata, err := donutStorage.donutBox.GetObjectMetadata(bucket, key, 0) - if err != nil { - // TODO strongly type and properly handle error cases - return 0, storage.ObjectNotFound{Bucket: bucket, Object: key} - } - k, err := strconv.Atoi(metadata["erasureK"]) - if err != nil { - return 0, errors.New("Cannot parse erasureK") - } - m, err := strconv.Atoi(metadata["erasureM"]) - if err != nil { - return 0, errors.New("Cannot parse erasureM") - } - columnCount := k + m - bs, err := strconv.Atoi(metadata["blockSize"]) - if err != nil { - return 0, errors.New("Cannot parse blockSize") - } - size, err := strconv.Atoi(metadata["size"]) - if err != nil { - return 0, errors.New("Cannot parse length") - } - chunkCount := size/bs + 1 - var readers []io.Reader - for column := 0; column < columnCount; column++ { - reader, err := donutStorage.donutBox.GetObjectReader(bucket, key, uint(column)) - if err != nil { - return 0, err - } - readers = append(readers, reader) - } - - totalWritten := int64(size) - totalRemaining := int64(size) - if err != err { - return 0, err - } - params, err := erasure.ParseEncoderParams(8, 8, erasure.Cauchy) - decoder := erasure.NewEncoder(params) - for chunk := 0; chunk < chunkCount; chunk++ { - blocks := make([][]byte, columnCount) - for column := 0; column < columnCount; column++ { - var block bytes.Buffer - limitReader := io.LimitReader(readers[column], int64(blockSize)) - _, err := io.Copy(&block, limitReader) - if err != nil { - return totalWritten, err - } - blocks[column] = block.Bytes() - } - curBlockSize := blockSize - if totalRemaining < int64(blockSize) { - curBlockSize = int(totalRemaining) - } - original, err := decoder.Decode(blocks, curBlockSize) - if err != nil { - return totalWritten, err - } - curWritten, err := io.Copy(target, bytes.NewBuffer(original)) - totalRemaining = totalRemaining - curWritten - if err != nil { - return totalWritten, err - } - } - - return totalWritten, nil + return 0, errors.New("Not Implemented") } // GetPartialObject retrieves an object and writes it to a writer @@ -202,192 +79,15 @@ func (donutStorage DonutDriver) GetPartialObject(w io.Writer, bucket, object str // GetObjectMetadata retrieves an object's metadata func (donutStorage DonutDriver) GetObjectMetadata(bucket, key string, prefix string) (storage.ObjectMetadata, error) { - metadata, err := donutStorage.donutBox.GetObjectMetadata(bucket, key, 0) - if err != nil { - return storage.ObjectMetadata{}, err - } - created, err := time.Parse(time.RFC3339Nano, metadata["created"]) - size, err := strconv.ParseInt(metadata["size"], 10, 64) - objectMetadata := storage.ObjectMetadata{ - Bucket: bucket, - Key: key, - ContentType: metadata["contentType"], - Created: created, - Md5: metadata["md5"], - Size: size, - } - return objectMetadata, nil + return storage.ObjectMetadata{}, errors.New("Not Implemented") } // ListObjects lists objects func (donutStorage DonutDriver) ListObjects(bucket string, resources storage.BucketResourcesMetadata) ([]storage.ObjectMetadata, storage.BucketResourcesMetadata, error) { - objects, err := donutStorage.donutBox.ListObjectsInBucket(bucket, resources.Prefix) - if err != nil { - return nil, storage.BucketResourcesMetadata{}, err - } - var results []storage.ObjectMetadata - sort.Strings(objects) - for _, object := range withoutDelimiter(objects, resources.Prefix, resources.Delimiter) { - if len(results) < resources.Maxkeys { - objectMetadata, err := donutStorage.GetObjectMetadata(bucket, object, "") - if err != nil { - return nil, storage.BucketResourcesMetadata{}, err - } - results = append(results, objectMetadata) - } else { - resources.IsTruncated = true - } - } - if resources.Delimiter != "" { - objects = trimPrefixWithDelimiter(objects, resources.Prefix, resources.Delimiter) - objects = beforeDelimiter(objects, resources.Delimiter) - resources.CommonPrefixes = objects - } - return results, resources, nil -} - -func appendUniq(slice []string, i string) []string { - for _, ele := range slice { - if ele == i { - return slice - } - } - return append(slice, i) -} - -func withoutDelimiter(inputs []string, prefix, delim string) (results []string) { - if delim == "" { - return inputs - } - for _, input := range inputs { - input = strings.TrimPrefix(input, prefix) - if !strings.Contains(input, delim) { - results = appendUniq(results, prefix+input) - } - } - return results -} - -func trimPrefixWithDelimiter(inputs []string, prefix, delim string) (results []string) { - for _, input := range inputs { - input = strings.TrimPrefix(input, prefix) - if strings.Contains(input, delim) { - results = appendUniq(results, input) - } - } - return results -} - -func beforeDelimiter(inputs []string, delim string) (results []string) { - for _, input := range inputs { - results = appendUniq(results, strings.Split(input, delim)[0]+delim) - } - return results + return nil, storage.BucketResourcesMetadata{}, errors.New("Not Implemented") } // CreateObject creates a new object func (donutStorage DonutDriver) CreateObject(bucketKey, objectKey, contentType, md5sum string, reader io.Reader) error { - // set defaults - if contentType == "" { - contentType = "application/octet-stream" - } - contentType = strings.TrimSpace(contentType) - // split stream - splitStream := split.Stream(reader, uint64(blockSize)) - writers := make([]*donutbox.NewObject, 16) - for i := 0; i < 16; i++ { - newWriter, err := donutStorage.donutBox.GetObjectWriter(bucketKey, objectKey, uint(i)) - if err != nil { - closeAllWritersWithError(writers, err) - return err - } - writers[i] = newWriter - } - totalLength := uint64(0) - chunkCount := 0 - hasher := md5.New() - for chunk := range splitStream { - params, err := erasure.ParseEncoderParams(8, 8, erasure.Cauchy) - if err != nil { - return err - } - hasher.Write(chunk.Data) - totalLength = totalLength + uint64(len(chunk.Data)) - chunkCount = chunkCount + 1 - encoder := erasure.NewEncoder(params) - if chunk.Err == nil { - parts, _ := encoder.Encode(chunk.Data) - for index, part := range parts { - if _, err := writers[index].Write(part); err != nil { - closeAllWritersWithError(writers, err) - return err - } - } - } else { - closeAllWritersWithError(writers, chunk.Err) - return chunk.Err - } - // encode data - // write - } - // close connections - - metadataObj := storage.ObjectMetadata{ - Bucket: bucketKey, - Key: objectKey, - - ContentType: contentType, - Created: time.Now(), - Md5: hex.EncodeToString(hasher.Sum(nil)), - Size: int64(totalLength), - } - - metadata := createObjectMetadata(metadataObj, blockSize, 8, 8, "Cauchy") - - for column := uint(0); column < 16; column++ { - writers[column].SetMetadata(metadata) - } - - // TODO capture errors in writers, enough should pass before returning - closeAllWriters(writers) - - return nil -} - -func closeAllWriters(writers []*donutbox.NewObject) { - for _, writer := range writers { - if writer != nil { - writer.Close() - } - } -} - -func closeAllWritersWithError(writers []*donutbox.NewObject, err error) { - for _, writer := range writers { - if writer != nil { - writer.CloseWithError(err) - } - } -} - -func createBucketMetadata(metadataBucket storage.BucketMetadata) map[string]string { - metadata := make(map[string]string) - metadata["bucket"] = metadataBucket.Name - metadata["created"] = metadataBucket.Created.Format(time.RFC3339Nano) - return metadata -} - -func createObjectMetadata(metadataObject storage.ObjectMetadata, blockSize int, k, m uint8, technique string) map[string]string { - metadata := make(map[string]string) - metadata["bucket"] = metadataObject.Bucket - metadata["key"] = metadataObject.Key - metadata["contentType"] = metadataObject.ContentType - metadata["created"] = metadataObject.Created.Format(time.RFC3339Nano) - metadata["md5"] = metadataObject.Md5 - metadata["size"] = strconv.FormatInt(metadataObject.Size, 10) - metadata["blockSize"] = strconv.FormatUint(uint64(blockSize), 10) - metadata["erasureK"] = strconv.FormatUint(uint64(k), 10) - metadata["erasureM"] = strconv.FormatUint(uint64(m), 10) - metadata["erasureTechnique"] = technique - return metadata + return errors.New("Not Implemented") } diff --git a/pkg/storage/donutstorage/donutstorage_test.go b/pkg/storage/donutstorage/donutstorage_test.go index 281046c67..9476bf52a 100644 --- a/pkg/storage/donutstorage/donutstorage_test.go +++ b/pkg/storage/donutstorage/donutstorage_test.go @@ -23,7 +23,6 @@ import ( mstorage "github.com/minio-io/minio/pkg/storage" - "github.com/minio-io/minio/pkg/donutbox/donutmem" . "gopkg.in/check.v1" ) @@ -34,12 +33,13 @@ type MySuite struct{} var _ = Suite(&MySuite{}) func (s *MySuite) TestAPISuite(c *C) { + c.Skip("Not Implemented") var storageList []string create := func() mstorage.Storage { path, err := ioutil.TempDir(os.TempDir(), "minio-fs-") c.Check(err, IsNil) storageList = append(storageList, path) - _, _, store := Start(donutmem.NewDonutMem()) // TODO Make InMemory driver + _, _, store := Start() // TODO Make InMemory driver return store } mstorage.APITestSuite(c, create) diff --git a/pkg/storage/file/file_test.go b/pkg/storage/file/file_test.go index ca9bbb088..372dc31b1 100644 --- a/pkg/storage/file/file_test.go +++ b/pkg/storage/file/file_test.go @@ -24,6 +24,7 @@ import ( mstorage "github.com/minio-io/minio/pkg/storage" . "gopkg.in/check.v1" + "log" ) func Test(t *testing.T) { TestingT(t) } @@ -46,6 +47,7 @@ func (s *MySuite) TestAPISuite(c *C) { } func removeRoots(c *C, roots []string) { + log.Println(roots) for _, root := range roots { err := os.RemoveAll(root) c.Check(err, IsNil)