Further fixes -

- All test files have been renamed to their respective <package>_test name,
    this is done in accordance with
      - https://github.com/golang/go/wiki/CodeReviewComments#import-dot
        imports are largely used in testing, but to avoid namespace collision
        and circular dependencies

  - Never use _* in package names other than "_test" change fragment_v1 to expose
    fragment just like 'gopkg.in/check.v1'
This commit is contained in:
Harshavardhana 2015-03-06 01:50:51 -08:00
parent 02ccf123c9
commit e5af8a3f5d
24 changed files with 245 additions and 285 deletions

View file

@ -55,7 +55,11 @@ Building Libraries
* When you're ready to create a pull request, be sure to: * When you're ready to create a pull request, be sure to:
- Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
- Run `go fmt - Run `go fmt
- Run `golint` (`go get github.com/golang/lint/golint`) - Run `golint`
```
$ go get github.com/golang/lint/golint
$ golint ./...
```
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
- Make sure `go test -race ./...` and `go build` completes. - Make sure `go test -race ./...` and `go build` completes.
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project * Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project

View file

@ -16,6 +16,7 @@ getdeps: checkdeps checkgopath
verifier: getdeps verifier: getdeps
@echo "Checking for offending code" @echo "Checking for offending code"
@go run buildscripts/verifier.go ${PWD} @go run buildscripts/verifier.go ${PWD}
@go vet ./...
build-all: verifier build-all: verifier
@echo "Building Libraries" @echo "Building Libraries"

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package minioapi package minioapi_test
import ( import (
"bytes" "bytes"
@ -28,8 +28,10 @@ import (
"testing" "testing"
"time" "time"
"github.com/minio-io/minio/pkg/api/minioapi"
mstorage "github.com/minio-io/minio/pkg/storage" mstorage "github.com/minio-io/minio/pkg/storage"
"github.com/minio-io/minio/pkg/storage/inmemory" "github.com/minio-io/minio/pkg/storage/inmemory"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -41,7 +43,7 @@ var _ = Suite(&MySuite{})
func (s *MySuite) TestNonExistantObject(c *C) { func (s *MySuite) TestNonExistantObject(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -53,7 +55,7 @@ func (s *MySuite) TestNonExistantObject(c *C) {
func (s *MySuite) TestEmptyObject(c *C) { func (s *MySuite) TestEmptyObject(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -78,7 +80,7 @@ func (s *MySuite) TestEmptyObject(c *C) {
func (s *MySuite) TestObject(c *C) { func (s *MySuite) TestObject(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -101,7 +103,7 @@ func (s *MySuite) TestObject(c *C) {
func (s *MySuite) TestMultipleObjects(c *C) { func (s *MySuite) TestMultipleObjects(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -181,7 +183,7 @@ func (s *MySuite) TestMultipleObjects(c *C) {
func (s *MySuite) TestNotImplemented(c *C) { func (s *MySuite) TestNotImplemented(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -192,7 +194,7 @@ func (s *MySuite) TestNotImplemented(c *C) {
func (s *MySuite) TestHeader(c *C) { func (s *MySuite) TestHeader(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -215,7 +217,7 @@ func (s *MySuite) TestHeader(c *C) {
func (s *MySuite) TestPutBucket(c *C) { func (s *MySuite) TestPutBucket(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -240,7 +242,7 @@ func (s *MySuite) TestPutBucket(c *C) {
func (s *MySuite) TestPutObject(c *C) { func (s *MySuite) TestPutObject(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -297,7 +299,7 @@ func (s *MySuite) TestPutObject(c *C) {
func (s *MySuite) TestListBuckets(c *C) { func (s *MySuite) TestListBuckets(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -337,8 +339,8 @@ func (s *MySuite) TestListBuckets(c *C) {
c.Assert(listResponse.Buckets.Bucket[1].Name, Equals, "foo") c.Assert(listResponse.Buckets.Bucket[1].Name, Equals, "foo")
} }
func readListBucket(reader io.Reader) (BucketListResponse, error) { func readListBucket(reader io.Reader) (minioapi.BucketListResponse, error) {
var results BucketListResponse var results minioapi.BucketListResponse
decoder := xml.NewDecoder(reader) decoder := xml.NewDecoder(reader)
err := decoder.Decode(&results) err := decoder.Decode(&results)
return results, err return results, err
@ -376,7 +378,7 @@ func verifyHeaders(c *C, header http.Header, date time.Time, size int, contentTy
func (s *MySuite) TestXMLNameNotInBucketListJson(c *C) { func (s *MySuite) TestXMLNameNotInBucketListJson(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -401,7 +403,7 @@ func (s *MySuite) TestXMLNameNotInBucketListJson(c *C) {
func (s *MySuite) TestXMLNameNotInObjectListJson(c *C) { func (s *MySuite) TestXMLNameNotInObjectListJson(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()
@ -426,7 +428,7 @@ func (s *MySuite) TestXMLNameNotInObjectListJson(c *C) {
func (s *MySuite) TestContentTypePersists(c *C) { func (s *MySuite) TestContentTypePersists(c *C) {
_, _, storage := inmemory.Start() _, _, storage := inmemory.Start()
httpHandler := HTTPHandler("", storage) httpHandler := minioapi.HTTPHandler("", storage)
testServer := httptest.NewServer(httpHandler) testServer := httptest.NewServer(httpHandler)
defer testServer.Close() defer testServer.Close()

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package erasure_v1 package erasure
import ( import (
"bytes" "bytes"

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package erasure_v1 package erasure
import ( import (
"bytes" "bytes"

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package fragment_v1 package fragment
import ( import (
"bytes" "bytes"

View file

@ -1,2 +0,0 @@
donut_gen
hello

View file

@ -1,59 +0,0 @@
package main
import (
"bytes"
"fmt"
"os"
"reflect"
"github.com/minio-io/minio/pkg/storage/donut/fragment/fragment_v1"
)
func main() {
fmt.Println("--start")
file, err := os.OpenFile("newfile", os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
panic(err)
}
data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.")
dataBuffer := bytes.NewBuffer(data)
err = fragment_v1.WriteFrame(file, dataBuffer, uint64(dataBuffer.Len()))
if err != nil {
panic(err)
}
file.Close()
fmt.Println("--closed")
fmt.Println("--verify")
stat, _ := os.Stat("newfile")
fileSize := stat.Size()
rfile, _ := os.OpenFile("newfile", os.O_RDONLY, 0666)
blockStart := make([]byte, 4)
blockStartCheck := []byte{'M', 'I', 'N', 'I'}
_, err = rfile.Read(blockStart)
if err != nil {
panic(err)
}
blockEnd := make([]byte, 4)
start := fileSize - 4
blockEndCheck := []byte{'I', 'N', 'I', 'M'}
rfile.ReadAt(blockEnd, start)
rfile.Close()
if !reflect.DeepEqual(blockStart, blockStartCheck) {
panic("Corrupted donut file")
}
if !reflect.DeepEqual(blockEnd, blockEndCheck) {
panic("Corrupted donut file")
}
fmt.Println("--verified")
fmt.Println("--end")
}

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package fragment_v1 package fragment_test
import ( import (
"bytes" "bytes"
@ -22,8 +22,8 @@ import (
"encoding/binary" "encoding/binary"
"testing" "testing"
"github.com/minio-io/minio/pkg/storage/donut/fragment/fragment_v1"
"github.com/minio-io/minio/pkg/utils/checksum/crc32c" "github.com/minio-io/minio/pkg/utils/checksum/crc32c"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -39,7 +39,7 @@ func (s *MySuite) TestSingleWrite(c *C) {
testData := "Hello, World" testData := "Hello, World"
testLength := uint64(len(testData)) testLength := uint64(len(testData))
err := WriteFrame(&testBuffer, bytes.NewBufferString(testData), testLength) err := fragment.WriteFrame(&testBuffer, bytes.NewBufferString(testData), testLength)
c.Assert(err, IsNil) c.Assert(err, IsNil)
testBufferLength := uint64(testBuffer.Len()) testBufferLength := uint64(testBuffer.Len())
@ -112,7 +112,7 @@ func (s *MySuite) TestSingleWrite(c *C) {
func (s *MySuite) TestLengthMismatchInWrite(c *C) { func (s *MySuite) TestLengthMismatchInWrite(c *C) {
var testData bytes.Buffer var testData bytes.Buffer
err := WriteFrame(&testData, bytes.NewBufferString("hello, world"), 5) err := fragment.WriteFrame(&testData, bytes.NewBufferString("hello, world"), 5)
c.Assert(err, Not(IsNil)) c.Assert(err, Not(IsNil))
} }
@ -122,7 +122,7 @@ func benchmarkSize(b *testing.B, size int) {
b.SetBytes(int64(size)) b.SetBytes(int64(size))
target := new(bytes.Buffer) target := new(bytes.Buffer)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
WriteFrame(target, bytes.NewReader(buf[:size]), uint64(size)) fragment.WriteFrame(target, bytes.NewReader(buf[:size]), uint64(size))
} }
} }

View file

@ -14,12 +14,14 @@
* limitations under the License. * limitations under the License.
*/ */
package erasure package erasure_test
import ( import (
"bytes" "bytes"
. "gopkg.in/check.v1"
"testing" "testing"
"github.com/minio-io/minio/pkg/storage/erasure"
. "gopkg.in/check.v1"
) )
type MySuite struct{} type MySuite struct{}
@ -29,11 +31,11 @@ var _ = Suite(&MySuite{})
func Test(t *testing.T) { TestingT(t) } func Test(t *testing.T) { TestingT(t) }
func (s *MySuite) TestCauchyDecode(c *C) { func (s *MySuite) TestCauchyDecode(c *C) {
ep, _ := ParseEncoderParams(10, 5, Cauchy) ep, _ := erasure.ParseEncoderParams(10, 5, erasure.Cauchy)
data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.") data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.")
e := NewEncoder(ep) e := erasure.NewEncoder(ep)
chunks, length := e.Encode(data) chunks, length := e.Encode(data)
c.Assert(length, Equals, len(data)) c.Assert(length, Equals, len(data))

View file

@ -14,19 +14,21 @@
* limitations under the License. * limitations under the License.
*/ */
package erasure package erasure_test
import ( import (
"bytes" "bytes"
"github.com/minio-io/minio/pkg/storage/erasure"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
func (s *MySuite) TestVanderMondeDecode(c *C) { func (s *MySuite) TestVanderMondeDecode(c *C) {
ep, _ := ParseEncoderParams(10, 5, Vandermonde) ep, _ := erasure.ParseEncoderParams(10, 5, erasure.Vandermonde)
data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.") data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.")
e := NewEncoder(ep) e := erasure.NewEncoder(ep)
chunks, length := e.Encode(data) chunks, length := e.Encode(data)
c.Logf("chunks length: %d", len(chunks)) c.Logf("chunks length: %d", len(chunks))
c.Logf("length: %d", length) c.Logf("length: %d", length)

View file

@ -33,7 +33,8 @@ import (
"github.com/minio-io/minio/pkg/utils/policy" "github.com/minio-io/minio/pkg/utils/policy"
) )
type storage struct { // Storage - fs local variables
type Storage struct {
root string root string
lock *sync.Mutex lock *sync.Mutex
} }
@ -44,17 +45,17 @@ type SerializedMetadata struct {
} }
// Start filesystem channel // Start filesystem channel
func Start(root string) (chan<- string, <-chan error, *storage) { func Start(root string) (chan<- string, <-chan error, *Storage) {
ctrlChannel := make(chan string) ctrlChannel := make(chan string)
errorChannel := make(chan error) errorChannel := make(chan error)
s := storage{} s := Storage{}
s.root = root s.root = root
s.lock = new(sync.Mutex) s.lock = new(sync.Mutex)
go start(ctrlChannel, errorChannel, &s) go start(ctrlChannel, errorChannel, &s)
return ctrlChannel, errorChannel, &s return ctrlChannel, errorChannel, &s
} }
func start(ctrlChannel <-chan string, errorChannel chan<- error, s *storage) { func start(ctrlChannel <-chan string, errorChannel chan<- error, s *Storage) {
err := os.MkdirAll(s.root, 0700) err := os.MkdirAll(s.root, 0700)
errorChannel <- err errorChannel <- err
close(errorChannel) close(errorChannel)
@ -71,8 +72,8 @@ func appendUniq(slice []string, i string) []string {
/// Bucket Operations /// Bucket Operations
// GET - Service // ListBuckets - Get service
func (storage *storage) ListBuckets() ([]mstorage.BucketMetadata, error) { func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
files, err := ioutil.ReadDir(storage.root) files, err := ioutil.ReadDir(storage.root)
if err != nil { if err != nil {
return []mstorage.BucketMetadata{}, mstorage.EmbedError("bucket", "", err) return []mstorage.BucketMetadata{}, mstorage.EmbedError("bucket", "", err)
@ -96,8 +97,8 @@ func (storage *storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
return metadataList, nil return metadataList, nil
} }
// PUT - Bucket // StoreBucket - PUT Bucket
func (storage *storage) StoreBucket(bucket string) error { func (storage *Storage) StoreBucket(bucket string) error {
storage.lock.Lock() storage.lock.Lock()
defer storage.lock.Unlock() defer storage.lock.Unlock()
@ -124,8 +125,8 @@ func (storage *storage) StoreBucket(bucket string) error {
return nil return nil
} }
// GET - Bucket policy // GetBucketPolicy - GET bucket policy
func (storage *storage) GetBucketPolicy(bucket string) (interface{}, error) { func (storage *Storage) GetBucketPolicy(bucket string) (interface{}, error) {
storage.lock.Lock() storage.lock.Lock()
defer storage.lock.Unlock() defer storage.lock.Unlock()
@ -169,8 +170,8 @@ func (storage *storage) GetBucketPolicy(bucket string) (interface{}, error) {
} }
// PUT - Bucket policy // StoreBucketPolicy - PUT bucket policy
func (storage *storage) StoreBucketPolicy(bucket string, policy interface{}) error { func (storage *Storage) StoreBucketPolicy(bucket string, policy interface{}) error {
storage.lock.Lock() storage.lock.Lock()
defer storage.lock.Unlock() defer storage.lock.Unlock()
@ -212,8 +213,8 @@ func (storage *storage) StoreBucketPolicy(bucket string, policy interface{}) err
/// Object Operations /// Object Operations
// GET Object // CopyObjectToWriter - GET object
func (storage *storage) CopyObjectToWriter(w io.Writer, bucket string, object string) (int64, error) { func (storage *Storage) CopyObjectToWriter(w io.Writer, bucket string, object string) (int64, error) {
// validate bucket // validate bucket
if mstorage.IsValidBucket(bucket) == false { if mstorage.IsValidBucket(bucket) == false {
return 0, mstorage.BucketNameInvalid{Bucket: bucket} return 0, mstorage.BucketNameInvalid{Bucket: bucket}
@ -254,8 +255,8 @@ func (storage *storage) CopyObjectToWriter(w io.Writer, bucket string, object st
return count, nil return count, nil
} }
// HEAD Object // GetObjectMetadata - HEAD object
func (storage *storage) GetObjectMetadata(bucket string, object string) (mstorage.ObjectMetadata, error) { func (storage *Storage) GetObjectMetadata(bucket string, object string) (mstorage.ObjectMetadata, error) {
if mstorage.IsValidBucket(bucket) == false { if mstorage.IsValidBucket(bucket) == false {
return mstorage.ObjectMetadata{}, mstorage.BucketNameInvalid{Bucket: bucket} return mstorage.ObjectMetadata{}, mstorage.BucketNameInvalid{Bucket: bucket}
} }
@ -352,8 +353,8 @@ func (b byObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// Less // Less
func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key } func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
// GET bucket (list objects) // ListObjects - GET bucket (list objects)
func (storage *storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) { func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) {
p := bucketDir{} p := bucketDir{}
p.files = make(map[string]os.FileInfo) p.files = make(map[string]os.FileInfo)
@ -454,8 +455,8 @@ ret:
return metadataList, resources, nil return metadataList, resources, nil
} }
// PUT object // StoreObject - PUT object
func (storage *storage) StoreObject(bucket, key, contentType string, data io.Reader) error { func (storage *Storage) StoreObject(bucket, key, contentType string, data io.Reader) error {
// TODO Commits should stage then move instead of writing directly // TODO Commits should stage then move instead of writing directly
storage.lock.Lock() storage.lock.Lock()
defer storage.lock.Unlock() defer storage.lock.Unlock()

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package fs package fs_test
import ( import (
"io/ioutil" "io/ioutil"
@ -22,6 +22,7 @@ import (
"testing" "testing"
mstorage "github.com/minio-io/minio/pkg/storage" mstorage "github.com/minio-io/minio/pkg/storage"
"github.com/minio-io/minio/pkg/storage/fs"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -38,7 +39,7 @@ func (s *MySuite) TestAPISuite(c *C) {
path, err := ioutil.TempDir(os.TempDir(), "minio-fs-") path, err := ioutil.TempDir(os.TempDir(), "minio-fs-")
c.Check(err, IsNil) c.Check(err, IsNil)
storageList = append(storageList, path) storageList = append(storageList, path)
_, _, store := Start(path) _, _, store := fs.Start(path)
return store return store
} }
mstorage.APITestSuite(c, create) mstorage.APITestSuite(c, create)

View file

@ -30,7 +30,8 @@ import (
"github.com/minio-io/minio/pkg/utils/policy" "github.com/minio-io/minio/pkg/utils/policy"
) )
type storage struct { // Storage - local variables
type Storage struct {
bucketdata map[string]storedBucket bucketdata map[string]storedBucket
objectdata map[string]storedObject objectdata map[string]storedObject
lock *sync.RWMutex lock *sync.RWMutex
@ -48,11 +49,11 @@ type storedObject struct {
} }
// Start inmemory object server // Start inmemory object server
func Start() (chan<- string, <-chan error, *storage) { func Start() (chan<- string, <-chan error, *Storage) {
ctrlChannel := make(chan string) ctrlChannel := make(chan string)
errorChannel := make(chan error) errorChannel := make(chan error)
go start(ctrlChannel, errorChannel) go start(ctrlChannel, errorChannel)
return ctrlChannel, errorChannel, &storage{ return ctrlChannel, errorChannel, &Storage{
bucketdata: make(map[string]storedBucket), bucketdata: make(map[string]storedBucket),
objectdata: make(map[string]storedObject), objectdata: make(map[string]storedObject),
lock: new(sync.RWMutex), lock: new(sync.RWMutex),
@ -63,8 +64,8 @@ func start(ctrlChannel <-chan string, errorChannel chan<- error) {
close(errorChannel) close(errorChannel)
} }
// GET object from memory buffer // CopyObjectToWriter - GET object from memory buffer
func (storage *storage) CopyObjectToWriter(w io.Writer, bucket string, object string) (int64, error) { func (storage *Storage) CopyObjectToWriter(w io.Writer, bucket string, object string) (int64, error) {
// TODO synchronize access // TODO synchronize access
// get object // get object
key := bucket + ":" + object key := bucket + ":" + object
@ -76,18 +77,18 @@ func (storage *storage) CopyObjectToWriter(w io.Writer, bucket string, object st
return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object} return 0, mstorage.ObjectNotFound{Bucket: bucket, Object: object}
} }
// Not implemented // StoreBucketPolicy - Not implemented
func (storage *storage) StoreBucketPolicy(bucket string, policy interface{}) error { func (storage *Storage) StoreBucketPolicy(bucket string, policy interface{}) error {
return mstorage.APINotImplemented{API: "PutBucketPolicy"} return mstorage.APINotImplemented{API: "PutBucketPolicy"}
} }
// Not implemented // GetBucketPolicy - Not implemented
func (storage *storage) GetBucketPolicy(bucket string) (interface{}, error) { func (storage *Storage) GetBucketPolicy(bucket string) (interface{}, error) {
return policy.BucketPolicy{}, mstorage.APINotImplemented{API: "GetBucketPolicy"} return policy.BucketPolicy{}, mstorage.APINotImplemented{API: "GetBucketPolicy"}
} }
// PUT object to memory buffer // StoreObject - PUT object to memory buffer
func (storage *storage) StoreObject(bucket, key, contentType string, data io.Reader) error { func (storage *Storage) StoreObject(bucket, key, contentType string, data io.Reader) error {
storage.lock.Lock() storage.lock.Lock()
defer storage.lock.Unlock() defer storage.lock.Unlock()
@ -127,8 +128,8 @@ func (storage *storage) StoreObject(bucket, key, contentType string, data io.Rea
return nil return nil
} }
// Create Bucket in memory // StoreBucket - create bucket in memory
func (storage *storage) StoreBucket(bucketName string) error { func (storage *Storage) StoreBucket(bucketName string) error {
storage.lock.Lock() storage.lock.Lock()
defer storage.lock.Unlock() defer storage.lock.Unlock()
if !mstorage.IsValidBucket(bucketName) { if !mstorage.IsValidBucket(bucketName) {
@ -148,8 +149,8 @@ func (storage *storage) StoreBucket(bucketName string) error {
return nil return nil
} }
// List objects in memory // ListObjects - list objects from memory
func (storage *storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) { func (storage *Storage) ListObjects(bucket string, resources mstorage.BucketResourcesMetadata) ([]mstorage.ObjectMetadata, mstorage.BucketResourcesMetadata, error) {
if _, ok := storage.bucketdata[bucket]; ok == false { if _, ok := storage.bucketdata[bucket]; ok == false {
return []mstorage.ObjectMetadata{}, mstorage.BucketResourcesMetadata{IsTruncated: false}, mstorage.BucketNotFound{Bucket: bucket} return []mstorage.ObjectMetadata{}, mstorage.BucketResourcesMetadata{IsTruncated: false}, mstorage.BucketNotFound{Bucket: bucket}
} }
@ -186,8 +187,8 @@ func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// Less // Less
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name } func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// List buckets // ListBuckets - List buckets from memory
func (storage *storage) ListBuckets() ([]mstorage.BucketMetadata, error) { func (storage *Storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
var results []mstorage.BucketMetadata var results []mstorage.BucketMetadata
for _, bucket := range storage.bucketdata { for _, bucket := range storage.bucketdata {
results = append(results, bucket.metadata) results = append(results, bucket.metadata)
@ -196,8 +197,8 @@ func (storage *storage) ListBuckets() ([]mstorage.BucketMetadata, error) {
return results, nil return results, nil
} }
// HEAD object // GetObjectMetadata - get object metadata from memory
func (storage *storage) GetObjectMetadata(bucket, key string) (mstorage.ObjectMetadata, error) { func (storage *Storage) GetObjectMetadata(bucket, key string) (mstorage.ObjectMetadata, error) {
objectKey := bucket + ":" + key objectKey := bucket + ":" + key
if object, ok := storage.objectdata[objectKey]; ok == true { if object, ok := storage.objectdata[objectKey]; ok == true {

View file

@ -14,12 +14,13 @@
* limitations under the License. * limitations under the License.
*/ */
package inmemory package inmemory_test
import ( import (
"testing" "testing"
mstorage "github.com/minio-io/minio/pkg/storage" mstorage "github.com/minio-io/minio/pkg/storage"
"github.com/minio-io/minio/pkg/storage/inmemory"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -32,7 +33,7 @@ var _ = Suite(&MySuite{})
func (s *MySuite) TestAPISuite(c *C) { func (s *MySuite) TestAPISuite(c *C) {
create := func() mstorage.Storage { create := func() mstorage.Storage {
_, _, store := Start() _, _, store := inmemory.Start()
return store return store
} }

View file

@ -21,11 +21,11 @@ import (
"math/rand" "math/rand"
"strconv" "strconv"
. "gopkg.in/check.v1" "gopkg.in/check.v1"
) )
// APITestSuite - collection of API tests // APITestSuite - collection of API tests
func APITestSuite(c *C, create func() Storage) { func APITestSuite(c *check.C, create func() Storage) {
testCreateBucket(c, create) testCreateBucket(c, create)
testMultipleObjectCreation(c, create) testMultipleObjectCreation(c, create)
testPaging(c, create) testPaging(c, create)
@ -41,15 +41,15 @@ func APITestSuite(c *C, create func() Storage) {
testDefaultContentType(c, create) testDefaultContentType(c, create)
} }
func testCreateBucket(c *C, create func() Storage) { func testCreateBucket(c *check.C, create func() Storage) {
// TODO // TODO
} }
func testMultipleObjectCreation(c *C, create func() Storage) { func testMultipleObjectCreation(c *check.C, create func() Storage) {
objects := make(map[string][]byte) objects := make(map[string][]byte)
storage := create() storage := create()
err := storage.StoreBucket("bucket") err := storage.StoreBucket("bucket")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
randomPerm := rand.Perm(10) randomPerm := rand.Perm(10)
randomString := "" randomString := ""
@ -59,7 +59,7 @@ func testMultipleObjectCreation(c *C, create func() Storage) {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
objects[key] = []byte(randomString) objects[key] = []byte(randomString)
err := storage.StoreObject("bucket", key, "", bytes.NewBufferString(randomString)) err := storage.StoreObject("bucket", key, "", bytes.NewBufferString(randomString))
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
} }
// ensure no duplicate etags // ensure no duplicate etags
@ -67,162 +67,162 @@ func testMultipleObjectCreation(c *C, create func() Storage) {
for key, value := range objects { for key, value := range objects {
var byteBuffer bytes.Buffer var byteBuffer bytes.Buffer
storage.CopyObjectToWriter(&byteBuffer, "bucket", key) storage.CopyObjectToWriter(&byteBuffer, "bucket", key)
c.Assert(bytes.Equal(value, byteBuffer.Bytes()), Equals, true) c.Assert(bytes.Equal(value, byteBuffer.Bytes()), check.Equals, true)
metadata, err := storage.GetObjectMetadata("bucket", key) metadata, err := storage.GetObjectMetadata("bucket", key)
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
c.Assert(metadata.Size, Equals, int64(len(value))) c.Assert(metadata.Size, check.Equals, int64(len(value)))
_, ok := etags[metadata.ETag] _, ok := etags[metadata.ETag]
c.Assert(ok, Equals, false) c.Assert(ok, check.Equals, false)
etags[metadata.ETag] = metadata.ETag etags[metadata.ETag] = metadata.ETag
} }
} }
func testPaging(c *C, create func() Storage) { func testPaging(c *check.C, create func() Storage) {
storage := create() storage := create()
storage.StoreBucket("bucket") storage.StoreBucket("bucket")
resources := BucketResourcesMetadata{} resources := BucketResourcesMetadata{}
objects, resources, err := storage.ListObjects("bucket", resources) objects, resources, err := storage.ListObjects("bucket", resources)
c.Assert(len(objects), Equals, 0) c.Assert(len(objects), check.Equals, 0)
c.Assert(resources.IsTruncated, Equals, false) c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
// check before paging occurs // checheck before paging occurs
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
storage.StoreObject("bucket", key, "", bytes.NewBufferString(key)) storage.StoreObject("bucket", key, "", bytes.NewBufferString(key))
resources.Maxkeys = 5 resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources) objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(len(objects), Equals, i+1) c.Assert(len(objects), check.Equals, i+1)
c.Assert(resources.IsTruncated, Equals, false) c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
} }
// check after paging occurs pages work // checheck after paging occurs pages work
for i := 6; i <= 10; i++ { for i := 6; i <= 10; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
storage.StoreObject("bucket", key, "", bytes.NewBufferString(key)) storage.StoreObject("bucket", key, "", bytes.NewBufferString(key))
resources.Maxkeys = 5 resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources) objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(len(objects), Equals, 5) c.Assert(len(objects), check.Equals, 5)
c.Assert(resources.IsTruncated, Equals, true) c.Assert(resources.IsTruncated, check.Equals, true)
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
} }
// check paging with prefix at end returns less objects // checheck paging with prefix at end returns less objects
{ {
storage.StoreObject("bucket", "newPrefix", "", bytes.NewBufferString("prefix1")) storage.StoreObject("bucket", "newPrefix", "", bytes.NewBufferString("prefix1"))
storage.StoreObject("bucket", "newPrefix2", "", bytes.NewBufferString("prefix2")) storage.StoreObject("bucket", "newPrefix2", "", bytes.NewBufferString("prefix2"))
resources.Prefix = "new" resources.Prefix = "new"
resources.Maxkeys = 5 resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources) objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(len(objects), Equals, 2) c.Assert(len(objects), check.Equals, 2)
} }
// check ordering of pages // checheck ordering of pages
{ {
resources.Prefix = "" resources.Prefix = ""
resources.Maxkeys = 1000 resources.Maxkeys = 1000
objects, resources, err = storage.ListObjects("bucket", resources) objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(objects[0].Key, Equals, "newPrefix") c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, Equals, "newPrefix2") c.Assert(objects[1].Key, check.Equals, "newPrefix2")
c.Assert(objects[2].Key, Equals, "obj0") c.Assert(objects[2].Key, check.Equals, "obj0")
c.Assert(objects[3].Key, Equals, "obj1") c.Assert(objects[3].Key, check.Equals, "obj1")
c.Assert(objects[4].Key, Equals, "obj10") c.Assert(objects[4].Key, check.Equals, "obj10")
} }
// check ordering of results with prefix // checheck ordering of results with prefix
{ {
resources.Prefix = "obj" resources.Prefix = "obj"
resources.Maxkeys = 1000 resources.Maxkeys = 1000
objects, resources, err = storage.ListObjects("bucket", resources) objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(objects[0].Key, Equals, "obj0") c.Assert(objects[0].Key, check.Equals, "obj0")
c.Assert(objects[1].Key, Equals, "obj1") c.Assert(objects[1].Key, check.Equals, "obj1")
c.Assert(objects[2].Key, Equals, "obj10") c.Assert(objects[2].Key, check.Equals, "obj10")
c.Assert(objects[3].Key, Equals, "obj2") c.Assert(objects[3].Key, check.Equals, "obj2")
c.Assert(objects[4].Key, Equals, "obj3") c.Assert(objects[4].Key, check.Equals, "obj3")
} }
// check ordering of results with prefix and no paging // checheck ordering of results with prefix and no paging
{ {
resources.Prefix = "new" resources.Prefix = "new"
resources.Maxkeys = 5 resources.Maxkeys = 5
objects, resources, err = storage.ListObjects("bucket", resources) objects, resources, err = storage.ListObjects("bucket", resources)
c.Assert(objects[0].Key, Equals, "newPrefix") c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, Equals, "newPrefix2") c.Assert(objects[1].Key, check.Equals, "newPrefix2")
} }
} }
func testObjectOverwriteFails(c *C, create func() Storage) { func testObjectOverwriteFails(c *check.C, create func() Storage) {
storage := create() storage := create()
storage.StoreBucket("bucket") storage.StoreBucket("bucket")
err := storage.StoreObject("bucket", "object", "", bytes.NewBufferString("one")) err := storage.StoreObject("bucket", "object", "", bytes.NewBufferString("one"))
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
err = storage.StoreObject("bucket", "object", "", bytes.NewBufferString("three")) err = storage.StoreObject("bucket", "object", "", bytes.NewBufferString("three"))
c.Assert(err, Not(IsNil)) c.Assert(err, check.Not(check.IsNil))
var bytesBuffer bytes.Buffer var bytesBuffer bytes.Buffer
length, err := storage.CopyObjectToWriter(&bytesBuffer, "bucket", "object") length, err := storage.CopyObjectToWriter(&bytesBuffer, "bucket", "object")
c.Assert(length, Equals, int64(len("one"))) c.Assert(length, check.Equals, int64(len("one")))
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
c.Assert(string(bytesBuffer.Bytes()), Equals, "one") c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one")
} }
func testNonExistantBucketOperations(c *C, create func() Storage) { func testNonExistantBucketOperations(c *check.C, create func() Storage) {
storage := create() storage := create()
err := storage.StoreObject("bucket", "object", "", bytes.NewBufferString("one")) err := storage.StoreObject("bucket", "object", "", bytes.NewBufferString("one"))
c.Assert(err, Not(IsNil)) c.Assert(err, check.Not(check.IsNil))
} }
func testBucketRecreateFails(c *C, create func() Storage) { func testBucketRecreateFails(c *check.C, create func() Storage) {
storage := create() storage := create()
err := storage.StoreBucket("string") err := storage.StoreBucket("string")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
err = storage.StoreBucket("string") err = storage.StoreBucket("string")
c.Assert(err, Not(IsNil)) c.Assert(err, check.Not(check.IsNil))
} }
func testPutObjectInSubdir(c *C, create func() Storage) { func testPutObjectInSubdir(c *check.C, create func() Storage) {
storage := create() storage := create()
err := storage.StoreBucket("bucket") err := storage.StoreBucket("bucket")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
err = storage.StoreObject("bucket", "dir1/dir2/object", "", bytes.NewBufferString("hello world")) err = storage.StoreObject("bucket", "dir1/dir2/object", "", bytes.NewBufferString("hello world"))
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
var bytesBuffer bytes.Buffer var bytesBuffer bytes.Buffer
length, err := storage.CopyObjectToWriter(&bytesBuffer, "bucket", "dir1/dir2/object") length, err := storage.CopyObjectToWriter(&bytesBuffer, "bucket", "dir1/dir2/object")
c.Assert(len(bytesBuffer.Bytes()), Equals, len("hello world")) c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
c.Assert(int64(len(bytesBuffer.Bytes())), Equals, length) c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
} }
func testListBuckets(c *C, create func() Storage) { func testListBuckets(c *check.C, create func() Storage) {
storage := create() storage := create()
// test empty list // test empty list
buckets, err := storage.ListBuckets() buckets, err := storage.ListBuckets()
c.Assert(len(buckets), Equals, 0) c.Assert(len(buckets), check.Equals, 0)
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
// add one and test exists // add one and test exists
err = storage.StoreBucket("bucket1") err = storage.StoreBucket("bucket1")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
buckets, err = storage.ListBuckets() buckets, err = storage.ListBuckets()
c.Assert(len(buckets), Equals, 1) c.Assert(len(buckets), check.Equals, 1)
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
// add two and test exists // add two and test exists
err = storage.StoreBucket("bucket2") err = storage.StoreBucket("bucket2")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
buckets, err = storage.ListBuckets() buckets, err = storage.ListBuckets()
c.Assert(len(buckets), Equals, 2) c.Assert(len(buckets), check.Equals, 2)
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
// add three and test exists + prefix // add three and test exists + prefix
err = storage.StoreBucket("bucket22") err = storage.StoreBucket("bucket22")
buckets, err = storage.ListBuckets() buckets, err = storage.ListBuckets()
c.Assert(len(buckets), Equals, 3) c.Assert(len(buckets), check.Equals, 3)
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
} }
func testListBucketsOrder(c *C, create func() Storage) { func testListBucketsOrder(c *check.C, create func() Storage) {
// if implementation contains a map, order of map keys will vary. // if implementation contains a map, order of map keys will vary.
// this ensures they return in the same order each time // this ensures they return in the same order each time
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
@ -232,107 +232,107 @@ func testListBucketsOrder(c *C, create func() Storage) {
storage.StoreBucket("bucket2") storage.StoreBucket("bucket2")
buckets, err := storage.ListBuckets() buckets, err := storage.ListBuckets()
c.Assert(len(buckets), Equals, 2) c.Assert(len(buckets), check.Equals, 2)
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
c.Assert(buckets[0].Name, Equals, "bucket1") c.Assert(buckets[0].Name, check.Equals, "bucket1")
c.Assert(buckets[1].Name, Equals, "bucket2") c.Assert(buckets[1].Name, check.Equals, "bucket2")
} }
} }
func testListObjectsTestsForNonExistantBucket(c *C, create func() Storage) { func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Storage) {
storage := create() storage := create()
resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000} resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000}
objects, resources, err := storage.ListObjects("bucket", resources) objects, resources, err := storage.ListObjects("bucket", resources)
c.Assert(err, Not(IsNil)) c.Assert(err, check.Not(check.IsNil))
c.Assert(resources.IsTruncated, Equals, false) c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(len(objects), Equals, 0) c.Assert(len(objects), check.Equals, 0)
} }
func testNonExistantObjectInBucket(c *C, create func() Storage) { func testNonExistantObjectInBucket(c *check.C, create func() Storage) {
storage := create() storage := create()
err := storage.StoreBucket("bucket") err := storage.StoreBucket("bucket")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer var byteBuffer bytes.Buffer
length, err := storage.CopyObjectToWriter(&byteBuffer, "bucket", "dir1") length, err := storage.CopyObjectToWriter(&byteBuffer, "bucket", "dir1")
c.Assert(length, Equals, int64(0)) c.Assert(length, check.Equals, int64(0))
c.Assert(err, Not(IsNil)) c.Assert(err, check.Not(check.IsNil))
c.Assert(len(byteBuffer.Bytes()), Equals, 0) c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
switch err := err.(type) { switch err := err.(type) {
case ObjectNotFound: case ObjectNotFound:
{ {
c.Assert(err, ErrorMatches, "Object not Found: bucket#dir1") c.Assert(err, check.ErrorMatches, "Object not Found: bucket#dir1")
} }
default: default:
{ {
c.Assert(err, Equals, "fails") c.Assert(err, check.Equals, "fails")
} }
} }
} }
func testGetDirectoryReturnsObjectNotFound(c *C, create func() Storage) { func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Storage) {
storage := create() storage := create()
err := storage.StoreBucket("bucket") err := storage.StoreBucket("bucket")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
err = storage.StoreObject("bucket", "dir1/dir2/object", "", bytes.NewBufferString("hello world")) err = storage.StoreObject("bucket", "dir1/dir2/object", "", bytes.NewBufferString("hello world"))
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer var byteBuffer bytes.Buffer
length, err := storage.CopyObjectToWriter(&byteBuffer, "bucket", "dir1") length, err := storage.CopyObjectToWriter(&byteBuffer, "bucket", "dir1")
c.Assert(length, Equals, int64(0)) c.Assert(length, check.Equals, int64(0))
switch err := err.(type) { switch err := err.(type) {
case ObjectNotFound: case ObjectNotFound:
{ {
c.Assert(err.Bucket, Equals, "bucket") c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, Equals, "dir1") c.Assert(err.Object, check.Equals, "dir1")
} }
default: default:
{ {
// force a failure with a line number // force a failure with a line number
c.Assert(err, Equals, "ObjectNotFound") c.Assert(err, check.Equals, "ObjectNotFound")
} }
} }
c.Assert(len(byteBuffer.Bytes()), Equals, 0) c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
var byteBuffer2 bytes.Buffer var byteBuffer2 bytes.Buffer
length, err = storage.CopyObjectToWriter(&byteBuffer, "bucket", "dir1/") length, err = storage.CopyObjectToWriter(&byteBuffer, "bucket", "dir1/")
c.Assert(length, Equals, int64(0)) c.Assert(length, check.Equals, int64(0))
switch err := err.(type) { switch err := err.(type) {
case ObjectNotFound: case ObjectNotFound:
{ {
c.Assert(err.Bucket, Equals, "bucket") c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, Equals, "dir1/") c.Assert(err.Object, check.Equals, "dir1/")
} }
default: default:
{ {
// force a failure with a line number // force a failure with a line number
c.Assert(err, Equals, "ObjectNotFound") c.Assert(err, check.Equals, "ObjectNotFound")
} }
} }
c.Assert(len(byteBuffer2.Bytes()), Equals, 0) c.Assert(len(byteBuffer2.Bytes()), check.Equals, 0)
} }
func testDefaultContentType(c *C, create func() Storage) { func testDefaultContentType(c *check.C, create func() Storage) {
storage := create() storage := create()
err := storage.StoreBucket("bucket") err := storage.StoreBucket("bucket")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
// test empty // test empty
err = storage.StoreObject("bucket", "one", "", bytes.NewBufferString("one")) err = storage.StoreObject("bucket", "one", "", bytes.NewBufferString("one"))
metadata, err := storage.GetObjectMetadata("bucket", "one") metadata, err := storage.GetObjectMetadata("bucket", "one")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, Equals, "application/octet-stream") c.Assert(metadata.ContentType, check.Equals, "application/octet-stream")
// test custom // test custom
storage.StoreObject("bucket", "two", "application/text", bytes.NewBufferString("two")) storage.StoreObject("bucket", "two", "application/text", bytes.NewBufferString("two"))
metadata, err = storage.GetObjectMetadata("bucket", "two") metadata, err = storage.GetObjectMetadata("bucket", "two")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, Equals, "application/text") c.Assert(metadata.ContentType, check.Equals, "application/text")
// test trim space // test trim space
storage.StoreObject("bucket", "three", "\tapplication/json ", bytes.NewBufferString("three")) storage.StoreObject("bucket", "three", "\tapplication/json ", bytes.NewBufferString("three"))
metadata, err = storage.GetObjectMetadata("bucket", "three") metadata, err = storage.GetObjectMetadata("bucket", "three")
c.Assert(err, IsNil) c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, Equals, "application/json") c.Assert(metadata.ContentType, check.Equals, "application/json")
} }

View file

@ -25,7 +25,7 @@ var castanagoliTable = crc32.MakeTable(crc32.Castagnoli)
/// Convenience functions /// Convenience functions
// Single caller crc helper // Sum32 - single caller crc helper
func Sum32(buffer []byte) uint32 { func Sum32(buffer []byte) uint32 {
crc := crc32.New(castanagoliTable) crc := crc32.New(castanagoliTable)
crc.Reset() crc.Reset()
@ -33,7 +33,7 @@ func Sum32(buffer []byte) uint32 {
return crc.Sum32() return crc.Sum32()
} }
// Low memory footprint io.Reader based crc helper // Sum - io.Reader based crc helper
func Sum(reader io.Reader) (uint32, error) { func Sum(reader io.Reader) (uint32, error) {
h := New() h := New()
var err error var err error

View file

@ -27,9 +27,9 @@ import (
// Config context // Config context
type Config struct { type Config struct {
configPath string ConfigPath string
configFile string ConfigFile string
configLock *sync.RWMutex ConfigLock *sync.RWMutex
Users map[string]User Users map[string]User
} }
@ -52,22 +52,22 @@ func (c *Config) SetupConfig() error {
return err return err
} }
c.configPath = confPath c.ConfigPath = confPath
c.configFile = path.Join(c.configPath, "config.json") c.ConfigFile = path.Join(c.ConfigPath, "config.json")
if _, err := os.Stat(c.configFile); os.IsNotExist(err) { if _, err := os.Stat(c.ConfigFile); os.IsNotExist(err) {
_, err = os.Create(c.configFile) _, err = os.Create(c.ConfigFile)
if err != nil { if err != nil {
return err return err
} }
} }
c.configLock = new(sync.RWMutex) c.ConfigLock = new(sync.RWMutex)
return nil return nil
} }
// GetConfigPath config file location // GetConfigPath config file location
func (c *Config) GetConfigPath() string { func (c *Config) GetConfigPath() string {
return c.configPath return c.ConfigPath
} }
// IsUserExists verify if user exists // IsUserExists verify if user exists
@ -104,13 +104,13 @@ func (c *Config) AddUser(user User) {
// WriteConfig - write encoded json in config file // WriteConfig - write encoded json in config file
func (c *Config) WriteConfig() error { func (c *Config) WriteConfig() error {
c.configLock.Lock() c.ConfigLock.Lock()
defer c.configLock.Unlock() defer c.ConfigLock.Unlock()
var file *os.File var file *os.File
var err error var err error
file, err = os.OpenFile(c.configFile, os.O_WRONLY, 0666) file, err = os.OpenFile(c.ConfigFile, os.O_WRONLY, 0666)
defer file.Close() defer file.Close()
if err != nil { if err != nil {
return err return err
@ -123,13 +123,13 @@ func (c *Config) WriteConfig() error {
// ReadConfig - read json config file and decode // ReadConfig - read json config file and decode
func (c *Config) ReadConfig() error { func (c *Config) ReadConfig() error {
c.configLock.RLock() c.ConfigLock.RLock()
defer c.configLock.RUnlock() defer c.ConfigLock.RUnlock()
var file *os.File var file *os.File
var err error var err error
file, err = os.OpenFile(c.configFile, os.O_RDONLY, 0666) file, err = os.OpenFile(c.ConfigFile, os.O_RDONLY, 0666)
defer file.Close() defer file.Close()
if err != nil { if err != nil {
return err return err

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package config package config_test
import ( import (
"io/ioutil" "io/ioutil"
@ -23,6 +23,7 @@ import (
"sync" "sync"
"testing" "testing"
"github.com/minio-io/minio/pkg/utils/config"
"github.com/minio-io/minio/pkg/utils/crypto/keys" "github.com/minio-io/minio/pkg/utils/crypto/keys"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -34,22 +35,22 @@ var _ = Suite(&MySuite{})
func Test(t *testing.T) { TestingT(t) } func Test(t *testing.T) { TestingT(t) }
func (s *MySuite) TestConfig(c *C) { func (s *MySuite) TestConfig(c *C) {
conf := Config{} conf := config.Config{}
conf.configPath, _ = ioutil.TempDir("/tmp", "minio-test-") conf.ConfigPath, _ = ioutil.TempDir("/tmp", "minio-test-")
defer os.RemoveAll(conf.configPath) defer os.RemoveAll(conf.ConfigPath)
conf.configFile = path.Join(conf.configPath, "config.json") conf.ConfigFile = path.Join(conf.ConfigPath, "config.json")
if _, err := os.Stat(conf.configFile); os.IsNotExist(err) { if _, err := os.Stat(conf.ConfigFile); os.IsNotExist(err) {
_, err = os.Create(conf.configFile) _, err = os.Create(conf.ConfigFile)
if err != nil { if err != nil {
c.Fatal(err) c.Fatal(err)
} }
} }
conf.configLock = new(sync.RWMutex) conf.ConfigLock = new(sync.RWMutex)
accesskey, _ := keys.GenerateRandomAlphaNumeric(keys.MinioAccessID) accesskey, _ := keys.GenerateRandomAlphaNumeric(keys.MinioAccessID)
secretkey, _ := keys.GenerateRandomBase64(keys.MinioSecretID) secretkey, _ := keys.GenerateRandomBase64(keys.MinioSecretID)
user := User{ user := config.User{
Name: "gnubot", Name: "gnubot",
AccessKey: string(accesskey), AccessKey: string(accesskey),
SecretKey: string(secretkey), SecretKey: string(secretkey),
@ -64,7 +65,7 @@ func (s *MySuite) TestConfig(c *C) {
accesskey, _ = keys.GenerateRandomAlphaNumeric(keys.MinioAccessID) accesskey, _ = keys.GenerateRandomAlphaNumeric(keys.MinioAccessID)
secretkey, _ = keys.GenerateRandomBase64(keys.MinioSecretID) secretkey, _ = keys.GenerateRandomBase64(keys.MinioSecretID)
user = User{ user = config.User{
Name: "minio", Name: "minio",
AccessKey: string(accesskey), AccessKey: string(accesskey),
SecretKey: string(secretkey), SecretKey: string(secretkey),

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package cpu package cpu_test
import ( import (
"errors" "errors"
@ -23,6 +23,7 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/minio-io/minio/pkg/utils/cpu"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -49,7 +50,7 @@ func hasCPUFeatureFromOS(feature string) (bool, error) {
func (s *MySuite) TestHasSSE41(c *C) { func (s *MySuite) TestHasSSE41(c *C) {
if runtime.GOOS == "linux" { if runtime.GOOS == "linux" {
var flag = HasSSE41() var flag = cpu.HasSSE41()
osCheck, err := hasCPUFeatureFromOS("sse4_1") osCheck, err := hasCPUFeatureFromOS("sse4_1")
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Check(flag, Equals, osCheck) c.Check(flag, Equals, osCheck)
@ -58,7 +59,7 @@ func (s *MySuite) TestHasSSE41(c *C) {
func (s *MySuite) TestHasAVX(c *C) { func (s *MySuite) TestHasAVX(c *C) {
if runtime.GOOS == "linux" { if runtime.GOOS == "linux" {
var flag = HasAVX() var flag = cpu.HasAVX()
osFlag, err := hasCPUFeatureFromOS("avx") osFlag, err := hasCPUFeatureFromOS("avx")
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Check(osFlag, Equals, flag) c.Check(osFlag, Equals, flag)
@ -67,7 +68,7 @@ func (s *MySuite) TestHasAVX(c *C) {
func (s *MySuite) TestHasAVX2(c *C) { func (s *MySuite) TestHasAVX2(c *C) {
if runtime.GOOS == "linux" { if runtime.GOOS == "linux" {
var flag = HasAVX2() var flag = cpu.HasAVX2()
osFlag, err := hasCPUFeatureFromOS("avx2") osFlag, err := hasCPUFeatureFromOS("avx2")
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Check(osFlag, Equals, flag) c.Check(osFlag, Equals, flag)

View file

@ -14,11 +14,12 @@
* limitations under the License. * limitations under the License.
*/ */
package keys package keys_test
import ( import (
"testing" "testing"
"github.com/minio-io/minio/pkg/utils/crypto/keys"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -28,11 +29,11 @@ type MySuite struct{}
var _ = Suite(&MySuite{}) var _ = Suite(&MySuite{})
func (s *MySuite) Testing(c *C) { func (s *MySuite) TestingKeys(c *C) {
value, err := GenerateRandomBase64(MinioSecretID) value, err := keys.GenerateRandomBase64(keys.MinioSecretID)
c.Assert(err, IsNil) c.Assert(err, IsNil)
alphanum, err := GenerateRandomAlphaNumeric(MinioAccessID) alphanum, err := keys.GenerateRandomAlphaNumeric(keys.MinioAccessID)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Log(string(value)) c.Log(string(value))

View file

@ -1,10 +1,11 @@
package md5 package md5_test
import ( import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"testing" "testing"
"github.com/minio-io/minio/pkg/utils/crypto/md5"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -17,7 +18,7 @@ var _ = Suite(&MySuite{})
func (s *MySuite) TestMd5sum(c *C) { func (s *MySuite) TestMd5sum(c *C) {
testString := []byte("Test string") testString := []byte("Test string")
expectedHash, _ := hex.DecodeString("0fd3dbec9730101bff92acc820befc34") expectedHash, _ := hex.DecodeString("0fd3dbec9730101bff92acc820befc34")
hash, err := Sum(bytes.NewBuffer(testString)) hash, err := md5.Sum(bytes.NewBuffer(testString))
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(bytes.Equal(expectedHash, hash), Equals, true) c.Assert(bytes.Equal(expectedHash, hash), Equals, true)
} }

View file

@ -14,12 +14,13 @@
* limitations under the License. * limitations under the License.
*/ */
package x509 package x509_test
import ( import (
"testing" "testing"
"time" "time"
"github.com/minio-io/minio/pkg/utils/crypto/x509"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -30,8 +31,8 @@ type MySuite struct{}
var _ = Suite(&MySuite{}) var _ = Suite(&MySuite{})
func (s *MySuite) Testing(c *C) { func (s *MySuite) Testing(c *C) {
certObj := Certificates{} certObj := x509.Certificates{}
params := Params{ params := x509.Params{
Hostname: "example.com", Hostname: "example.com",
IsCA: false, IsCA: false,
EcdsaCurve: "P224", EcdsaCurve: "P224",

View file

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package split package split_test
import ( import (
"bufio" "bufio"
@ -24,6 +24,7 @@ import (
"strconv" "strconv"
"testing" "testing"
"github.com/minio-io/minio/pkg/utils/split"
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
@ -41,7 +42,7 @@ func (s *MySuite) TestSplitStream(c *C) {
} }
bytesWriter.Flush() bytesWriter.Flush()
reader := bytes.NewReader(bytesBuffer.Bytes()) reader := bytes.NewReader(bytesBuffer.Bytes())
ch := Stream(reader, 25) ch := split.Stream(reader, 25)
var resultsBuffer bytes.Buffer var resultsBuffer bytes.Buffer
resultsWriter := bufio.NewWriter(&resultsBuffer) resultsWriter := bufio.NewWriter(&resultsBuffer)
for chunk := range ch { for chunk := range ch {
@ -52,17 +53,17 @@ func (s *MySuite) TestSplitStream(c *C) {
} }
func (s *MySuite) TestFileSplitJoin(c *C) { func (s *MySuite) TestFileSplitJoin(c *C) {
err := FileWithPrefix("test-data/TESTFILE", 1024, "TESTPREFIX") err := split.FileWithPrefix("test-data/TESTFILE", 1024, "TESTPREFIX")
c.Assert(err, IsNil) c.Assert(err, IsNil)
err = FileWithPrefix("test-data/TESTFILE", 1024, "") err = split.FileWithPrefix("test-data/TESTFILE", 1024, "")
c.Assert(err, Not(IsNil)) c.Assert(err, Not(IsNil))
devnull, err := os.OpenFile(os.DevNull, 2, os.ModeAppend) devnull, err := os.OpenFile(os.DevNull, 2, os.ModeAppend)
defer devnull.Close() defer devnull.Close()
reader := JoinFiles(".", "ERROR") reader := split.JoinFiles(".", "ERROR")
_, err = io.Copy(devnull, reader) _, err = io.Copy(devnull, reader)
c.Assert(err, Not(IsNil)) c.Assert(err, Not(IsNil))
reader = JoinFiles(".", "TESTPREFIX") reader = split.JoinFiles(".", "TESTPREFIX")
_, err = io.Copy(devnull, reader) _, err = io.Copy(devnull, reader)
c.Assert(err, IsNil) c.Assert(err, IsNil)
} }