Migrate this project to minio micro services code

This commit is contained in:
Harshavardhana 2015-10-16 11:26:01 -07:00
parent 8c4119cbeb
commit 762b798767
349 changed files with 3704 additions and 76049 deletions

View file

@ -12,7 +12,8 @@ $ cd minio
```
### Compiling Minio from source
Minio uses ``Makefile`` to wrap around some of the limitations of ``go`` build system. To compile Minio source, simply change to your workspace folder and type ``make``.
Minio uses ``Makefile`` to wrap around some of redundant checks done through command line.
```sh
$ make
Checking if proper environment variables are set.. Done

View file

@ -1,10 +0,0 @@
## Contributors
<!-- DO NOT EDIT - CONTRIBUTORS.md is autogenerated from git commit log by contributors.sh script. -->
- Anand Babu (AB) Periasamy <ab@minio.io>
- Anis Elleuch <vadmeste@gmail.com>
- Frederick F. Kautz IV <fkautz@minio.io>
- Harshavardhana <harsha@minio.io>
- Krishna Srinivas <krishna@minio.io>
- Matthew Farrellee <matt@cs.wisc.edu>
- Nate Rosenblum <flander@gmail.com>

View file

@ -16,8 +16,7 @@ RUN apt-get update -y && apt-get install -y -q \
curl \
git \
build-essential \
ca-certificates \
yasm
ca-certificates
RUN curl -O -s https://storage.googleapis.com/golang/${GOLANG_TARBALL} && \
tar -xzf ${GOLANG_TARBALL} -C ${GOROOT%*go*} && \
@ -34,6 +33,6 @@ RUN apt-get remove -y build-essential curl git && \
USER minio
EXPOSE 9000 9001
EXPOSE 9000
CMD ["sh", "-c", "${GOPATH}/bin/minio server"]

View file

@ -2,9 +2,9 @@
### Build Dependencies
This installation document assumes Ubuntu 14.04+ on x86-64 platform.
##### Install Git, GCC, yasm
##### Install Git, GCC
```sh
$ sudo apt-get install git build-essential yasm
$ sudo apt-get install git build-essential
```
##### Install Go 1.5+
@ -39,7 +39,7 @@ $ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/maste
##### Install Git, Python
```sh
$ brew install git python yasm
$ brew install git python
```
##### Install Go 1.5+

View file

@ -6,12 +6,13 @@ checkdeps:
checkgopath:
@echo "Checking if project is at ${GOPATH}"
@for mcpath in $(echo ${GOPATH} | sed 's/:/\n/g'); do if [ ! -d ${mcpath}/src/github.com/minio/minio ]; then echo "Project not found in ${mcpath}, please follow instructions provided at https://github.com/minio/minio/blob/master/CONTRIBUTING.md#setup-your-minio-github-repository" && exit 1; fi done
@for miniofspath in $(echo ${GOPATH} | sed 's/:/\n/g'); do if [ ! -d ${mcpath}/src/github.com/minio/minio ]; then echo "Project not found in ${miniofspath}, please follow instructions provided at https://github.com/minio/minio/blob/master/CONTRIBUTING.md#setup-your-minio-github-repository" && exit 1; fi done
getdeps: checkdeps checkgopath
@go get github.com/golang/lint/golint && echo "Installed golint:"
@go get golang.org/x/tools/cmd/vet && echo "Installed vet:"
@go get github.com/fzipp/gocyclo && echo "Installed gocyclo:"
@go get -u github.com/remyoudompheng/go-misc/deadcode && echo "Installed deadcode:"
verifiers: getdeps vet fmt lint cyclo
@ -34,12 +35,11 @@ lint:
cyclo:
@echo "Running $@:"
@GO15VENDOREXPERIMENT=1 gocyclo -over 25 *.go
@GO15VENDOREXPERIMENT=1 gocyclo -over 25 pkg
@GO15VENDOREXPERIMENT=1 gocyclo -over 50 *.go
@GO15VENDOREXPERIMENT=1 gocyclo -over 50 pkg
build: getdeps verifiers
@echo "Installing minio:"
@GO15VENDOREXPERIMENT=1 go generate ./...
@echo "Installing minio:" #@GO15VENDOREXPERIMENT=1 deadcode
test: build
@echo "Running all testing:"
@ -69,4 +69,3 @@ clean:
@echo "Cleaning up all the generated files:"
@rm -fv cover.out
@rm -fv minio
@rm -fv pkg/erasure/*.syso

View file

@ -1,6 +1,6 @@
## Minio Server [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Minio [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Minio is a minimal cloud storage server written in Golang and licensed under [Apache license v2](./LICENSE). Minio is compatible with Amazon S3 APIs. [![Build Status](https://travis-ci.org/minio/minio.svg?branch=master)](https://travis-ci.org/minio/minio)
Minio is a minimal cloud storage server for Micro Services & Magnetic Disks. Written in Golang and licensed under [Apache license v2](./LICENSE). Compatible with Amazon S3 APIs.
## Minio Client
@ -11,64 +11,54 @@ Minio is a minimal cloud storage server written in Golang and licensed under [Ap
- [Java Library](https://github.com/minio/minio-java)
- [Nodejs Library](https://github.com/minio/minio-js)
- [Python Library](https://github.com/minio/minio-py)
- [.Net Library](https://github.com/minio/minio-dotnet)
## Server Roadmap
~~~
Storage Backend:
- Donut: Erasure coded backend.
- Status: Standalone mode complete.
Storage Operations:
- Collective:
- Status: Work in progress.
### Install [![Build Status](https://travis-ci.org/minio/minio.svg?branch=master)](https://travis-ci.org/minio/minio)[![Build status](https://ci.appveyor.com/api/projects/status/k61d0v3ritbwm2su?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio)
Storage Management:
- WebCLI:
- Status: Work in progress.
- Authentication:
- Status: Work in progress.
- Admin Console:
- Status: Work in progress.
- User Console:
- Status: Work in progress.
- Logging:
- Status: Work in progress.
~~~
### Install
<blockquote>
NOTE: If you happen to compile from source code, following options are not available anymore. Minio master branch is going through lots of rapid changes, documentation will be updated subsequently.
</blockquote>
#### GNU/Linux
Download ``minio`` from https://dl.minio.io:9000/updates/2015/Jun/linux-amd64/minio
#### Linux, OS X, Windows
~~~
$ wget https://dl.minio.io:9000/updates/2015/Jun/linux-amd64/minio
$ chmod +x minio
$ ./minio mode memory limit 12GB expire 2h
~~~
#### OS X
Download ``minio`` from https://dl.minio.io:9000/updates/2015/Jun/darwin-amd64/minio
~~~
$ wget https://dl.minio.io:9000/updates/2015/Jun/darwin-amd64/minio
$ chmod +x minio
$ ./minio mode memory limit 12GB expire 2h
$ go get github.com/minio/minio
~~~
### How to use Minio?
[![asciicast](https://asciinema.org/a/21575.png)](https://asciinema.org/a/21575)
~~~
$ minio server
NAME:
minio server - Start Minio cloud storage server.
USAGE:
minio server PATH
EXAMPLES:
1. Start minio server on Linux.
$ minio server /home/shared
2. Start minio server on Windows.
$ minio server C:\MyShare
3. Start minio server bound to a specific IP:PORT, when you have multiple network interfaces.
$ minio --address 192.168.1.101:9000 /home/shared
~~~
~~~
$ minio server ~/Photos
AccessKey: G5GJRH51R2HSUWYPGIX5 SecretKey: uxhBC1Yscut3/u81l5L8Yp636ZUk32N4m/gFASuZ
To configure Minio Client.
$ wget https://dl.minio.io:9000/updates/2015/Oct/darwin-amd64/mc
$ chmod 755 mc
$ ./mc config host add localhost:9000 G5GJRH51R2HSUWYPGIX5 uxhBC1Yscut3/u81l5L8Yp636ZUk32N4m/gFASuZ
$ ./mc mb localhost/photobucket
$ ./mc cp ~/Photos... localhost/photobucket
Starting minio server:
Listening on http://127.0.0.1:9000
Listening on http://172.30.2.17:9000
~~~
### Contribute to Minio Project
Please follow Minio [Contributor's Guide](./CONTRIBUTING.md)
### Jobs
If you think in Lisp or Haskell and hack in go, you would blend right in. Send your github link to callhome@minio.io.

View file

@ -1,5 +1,5 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View file

@ -65,12 +65,12 @@ const (
NotImplemented
RequestTimeTooSkewed
SignatureDoesNotMatch
TooManyBuckets
MethodNotAllowed
InvalidPart
InvalidPartOrder
AuthorizationHeaderMalformed
MalformedPOSTRequest
BucketNotEmpty
)
// Error codes, non exhaustive list - standard HTTP errors
@ -200,11 +200,6 @@ var errorCodeResponse = map[int]APIError{
Description: "The request signature we calculated does not match the signature you provided.",
HTTPStatusCode: http.StatusForbidden,
},
TooManyBuckets: {
Code: "TooManyBuckets",
Description: "You have attempted to create more buckets than allowed.",
HTTPStatusCode: http.StatusBadRequest,
},
MethodNotAllowed: {
Code: "MethodNotAllowed",
Description: "The specified method is not allowed against this resource.",
@ -235,6 +230,11 @@ var errorCodeResponse = map[int]APIError{
Description: "The body of your POST request is not well-formed multipart/form-data.",
HTTPStatusCode: http.StatusBadRequest,
},
BucketNotEmpty: {
Code: "BucketNotEmpty",
Description: "The bucket you tried to delete is not empty.",
HTTPStatusCode: http.StatusConflict,
},
}
// errorCodeError provides errorCode to Error. It returns empty if the code provided is unknown

View file

@ -24,7 +24,7 @@ import (
"runtime"
"strconv"
"github.com/minio/minio/pkg/donut"
"github.com/minio/minio/pkg/fs"
)
//// helpers
@ -63,7 +63,7 @@ func encodeErrorResponse(response interface{}) []byte {
}
// Write object header
func setObjectHeaders(w http.ResponseWriter, metadata donut.ObjectMetadata, contentRange *httpRange) {
func setObjectHeaders(w http.ResponseWriter, metadata fs.ObjectMetadata, contentRange *httpRange) {
// set common headers
if contentRange != nil {
if contentRange.length > 0 {
@ -77,8 +77,8 @@ func setObjectHeaders(w http.ResponseWriter, metadata donut.ObjectMetadata, cont
// set object headers
lastModified := metadata.Created.Format(http.TimeFormat)
// object related headers
w.Header().Set("Content-Type", metadata.Metadata["contentType"])
w.Header().Set("ETag", "\""+metadata.MD5Sum+"\"")
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("ETag", "\""+metadata.Md5+"\"")
w.Header().Set("Last-Modified", lastModified)
// set content range

View file

@ -20,11 +20,11 @@ import (
"net/url"
"strconv"
"github.com/minio/minio/pkg/donut"
"github.com/minio/minio/pkg/fs"
)
// parse bucket url queries
func getBucketResources(values url.Values) (v donut.BucketResourcesMetadata) {
func getBucketResources(values url.Values) (v fs.BucketResourcesMetadata) {
v.Prefix = values.Get("prefix")
v.Marker = values.Get("marker")
v.Maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
@ -34,7 +34,7 @@ func getBucketResources(values url.Values) (v donut.BucketResourcesMetadata) {
}
// part bucket url queries for ?uploads
func getBucketMultipartResources(values url.Values) (v donut.BucketMultipartResourcesMetadata) {
func getBucketMultipartResources(values url.Values) (v fs.BucketMultipartResourcesMetadata) {
v.Prefix = values.Get("prefix")
v.KeyMarker = values.Get("key-marker")
v.MaxUploads, _ = strconv.Atoi(values.Get("max-uploads"))
@ -45,7 +45,7 @@ func getBucketMultipartResources(values url.Values) (v donut.BucketMultipartReso
}
// parse object url queries
func getObjectResources(values url.Values) (v donut.ObjectResourcesMetadata) {
func getObjectResources(values url.Values) (v fs.ObjectResourcesMetadata) {
v.UploadID = values.Get("uploadId")
v.PartNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker"))
v.MaxParts, _ = strconv.Atoi(values.Get("max-parts"))

View file

@ -19,7 +19,7 @@ package main
import (
"net/http"
"github.com/minio/minio/pkg/donut"
"github.com/minio/minio/pkg/fs"
)
// Reply date format
@ -33,7 +33,7 @@ const (
//
// output:
// populated struct that can be serialized to match xml and json api spec output
func generateListBucketsResponse(buckets []donut.BucketMetadata) ListBucketsResponse {
func generateListBucketsResponse(buckets []fs.BucketMetadata) ListBucketsResponse {
var listbuckets []*Bucket
var data = ListBucketsResponse{}
var owner = Owner{}
@ -55,7 +55,7 @@ func generateListBucketsResponse(buckets []donut.BucketMetadata) ListBucketsResp
}
// generates an AccessControlPolicy response for the said ACL.
func generateAccessControlPolicyResponse(acl donut.BucketACL) AccessControlPolicyResponse {
func generateAccessControlPolicyResponse(acl fs.BucketACL) AccessControlPolicyResponse {
accessCtrlPolicyResponse := AccessControlPolicyResponse{}
accessCtrlPolicyResponse.Owner = Owner{
ID: "minio",
@ -92,7 +92,7 @@ func generateAccessControlPolicyResponse(acl donut.BucketACL) AccessControlPolic
}
// generates an ListObjects response for the said bucket with other enumerated options.
func generateListObjectsResponse(bucket string, objects []donut.ObjectMetadata, bucketResources donut.BucketResourcesMetadata) ListObjectsResponse {
func generateListObjectsResponse(bucket string, objects []fs.ObjectMetadata, bucketResources fs.BucketResourcesMetadata) ListObjectsResponse {
var contents []*Object
var prefixes []*CommonPrefix
var owner = Owner{}
@ -108,7 +108,7 @@ func generateListObjectsResponse(bucket string, objects []donut.ObjectMetadata,
}
content.Key = object.Object
content.LastModified = object.Created.Format(rfcFormat)
content.ETag = "\"" + object.MD5Sum + "\""
content.ETag = "\"" + object.Md5 + "\""
content.Size = object.Size
content.StorageClass = "STANDARD"
content.Owner = owner
@ -152,11 +152,11 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string)
}
// generateListPartsResult
func generateListPartsResponse(objectMetadata donut.ObjectResourcesMetadata) ListPartsResponse {
func generateListPartsResponse(objectMetadata fs.ObjectResourcesMetadata) ListPartsResponse {
// TODO - support EncodingType in xml decoding
listPartsResponse := ListPartsResponse{}
listPartsResponse.Bucket = objectMetadata.Bucket
listPartsResponse.Key = objectMetadata.Key
listPartsResponse.Key = objectMetadata.Object
listPartsResponse.UploadID = objectMetadata.UploadID
listPartsResponse.StorageClass = "STANDARD"
listPartsResponse.Initiator.ID = "minio"
@ -182,7 +182,7 @@ func generateListPartsResponse(objectMetadata donut.ObjectResourcesMetadata) Lis
}
// generateListMultipartUploadsResponse
func generateListMultipartUploadsResponse(bucket string, metadata donut.BucketMultipartResourcesMetadata) ListMultipartUploadsResponse {
func generateListMultipartUploadsResponse(bucket string, metadata fs.BucketMultipartResourcesMetadata) ListMultipartUploadsResponse {
listMultipartUploadsResponse := ListMultipartUploadsResponse{}
listMultipartUploadsResponse.Bucket = bucket
listMultipartUploadsResponse.Delimiter = metadata.Delimiter
@ -199,7 +199,7 @@ func generateListMultipartUploadsResponse(bucket string, metadata donut.BucketMu
for _, upload := range metadata.Upload {
newUpload := &Upload{}
newUpload.UploadID = upload.UploadID
newUpload.Key = upload.Key
newUpload.Key = upload.Object
newUpload.Initiated = upload.Initiated.Format(rfcFormat)
listMultipartUploadsResponse.Upload = append(listMultipartUploadsResponse.Upload, newUpload)
}

View file

@ -26,14 +26,13 @@ import (
"strings"
"time"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
"github.com/minio/minio/pkg/fs"
"github.com/minio/minio-xl/pkg/probe"
)
const (
authHeaderPrefix = "AWS4-HMAC-SHA256"
iso8601Format = "20060102T150405Z"
yyyymmdd = "20060102"
)
// getCredentialsFromAuth parse credentials tag from authorization value
@ -92,38 +91,36 @@ func stripAccessKeyID(authHeaderValue string) (string, *probe.Error) {
return "", err.Trace()
}
accessKeyID := credentialElements[0]
if !IsValidAccessKey(accessKeyID) {
if !isValidAccessKey(accessKeyID) {
return "", probe.NewError(errAccessKeyIDInvalid)
}
return accessKeyID, nil
}
// initSignatureV4 initializing signature verification
func initSignatureV4(req *http.Request) (*signv4.Signature, *probe.Error) {
func initSignatureV4(req *http.Request) (*fs.Signature, *probe.Error) {
// strip auth from authorization header
authHeaderValue := req.Header.Get("Authorization")
accessKeyID, err := stripAccessKeyID(authHeaderValue)
if err != nil {
return nil, err.Trace()
}
authConfig, err := LoadConfig()
authConfig, err := loadAuthConfig()
if err != nil {
return nil, err.Trace()
}
authFields := strings.Split(strings.TrimSpace(authHeaderValue), ",")
signedHeaders := strings.Split(strings.Split(strings.TrimSpace(authFields[1]), "=")[1], ";")
signature := strings.Split(strings.TrimSpace(authFields[2]), "=")[1]
for _, user := range authConfig.Users {
if user.AccessKeyID == accessKeyID {
signature := &signv4.Signature{
AccessKeyID: user.AccessKeyID,
SecretAccessKey: user.SecretAccessKey,
Signature: signature,
SignedHeaders: signedHeaders,
Request: req,
}
return signature, nil
if authConfig.AccessKeyID == accessKeyID {
signature := &fs.Signature{
AccessKeyID: authConfig.AccessKeyID,
SecretAccessKey: authConfig.SecretAccessKey,
Signature: signature,
SignedHeaders: signedHeaders,
Request: req,
}
return signature, nil
}
return nil, probe.NewError(errAccessKeyIDInvalid)
}
@ -163,7 +160,7 @@ func applyPolicy(formValues map[string]string) *probe.Error {
if err != nil {
return probe.NewError(err)
}
postPolicyForm, perr := signv4.ParsePostPolicyForm(string(policyBytes))
postPolicyForm, perr := fs.ParsePostPolicyForm(string(policyBytes))
if perr != nil {
return perr.Trace()
}
@ -204,61 +201,57 @@ func applyPolicy(formValues map[string]string) *probe.Error {
}
// initPostPresignedPolicyV4 initializing post policy signature verification
func initPostPresignedPolicyV4(formValues map[string]string) (*signv4.Signature, *probe.Error) {
func initPostPresignedPolicyV4(formValues map[string]string) (*fs.Signature, *probe.Error) {
credentialElements := strings.Split(strings.TrimSpace(formValues["X-Amz-Credential"]), "/")
if len(credentialElements) != 5 {
return nil, probe.NewError(errCredentialTagMalformed)
}
accessKeyID := credentialElements[0]
if !IsValidAccessKey(accessKeyID) {
if !isValidAccessKey(accessKeyID) {
return nil, probe.NewError(errAccessKeyIDInvalid)
}
authConfig, perr := LoadConfig()
authConfig, perr := loadAuthConfig()
if perr != nil {
return nil, perr.Trace()
}
for _, user := range authConfig.Users {
if user.AccessKeyID == accessKeyID {
signature := &signv4.Signature{
AccessKeyID: user.AccessKeyID,
SecretAccessKey: user.SecretAccessKey,
Signature: formValues["X-Amz-Signature"],
PresignedPolicy: formValues["Policy"],
}
return signature, nil
if authConfig.AccessKeyID == accessKeyID {
signature := &fs.Signature{
AccessKeyID: authConfig.AccessKeyID,
SecretAccessKey: authConfig.SecretAccessKey,
Signature: formValues["X-Amz-Signature"],
PresignedPolicy: formValues["Policy"],
}
return signature, nil
}
return nil, probe.NewError(errAccessKeyIDInvalid)
}
// initPresignedSignatureV4 initializing presigned signature verification
func initPresignedSignatureV4(req *http.Request) (*signv4.Signature, *probe.Error) {
func initPresignedSignatureV4(req *http.Request) (*fs.Signature, *probe.Error) {
credentialElements := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-Credential")), "/")
if len(credentialElements) != 5 {
return nil, probe.NewError(errCredentialTagMalformed)
}
accessKeyID := credentialElements[0]
if !IsValidAccessKey(accessKeyID) {
if !isValidAccessKey(accessKeyID) {
return nil, probe.NewError(errAccessKeyIDInvalid)
}
authConfig, err := LoadConfig()
authConfig, err := loadAuthConfig()
if err != nil {
return nil, err.Trace()
}
signedHeaders := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-SignedHeaders")), ";")
signature := strings.TrimSpace(req.URL.Query().Get("X-Amz-Signature"))
for _, user := range authConfig.Users {
if user.AccessKeyID == accessKeyID {
signature := &signv4.Signature{
AccessKeyID: user.AccessKeyID,
SecretAccessKey: user.SecretAccessKey,
Signature: signature,
SignedHeaders: signedHeaders,
Presigned: true,
Request: req,
}
return signature, nil
if authConfig.AccessKeyID == accessKeyID {
signature := &fs.Signature{
AccessKeyID: authConfig.AccessKeyID,
SecretAccessKey: authConfig.SecretAccessKey,
Signature: signature,
SignedHeaders: signedHeaders,
Presigned: true,
Request: req,
}
return signature, nil
}
return nil, probe.NewError(errAccessKeyIDInvalid)
}

34
appveyor.yml Normal file
View file

@ -0,0 +1,34 @@
# version format
version: "{build}"
# Operating system (build VM template)
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\minio\minio
# environment variables
environment:
GOPATH: c:\gopath
GO15VENDOREXPERIMENT: 1
# scripts that run after cloning repository
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- rd C:\Go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.5.1.windows-amd64.zip
- 7z x go1.5.1.windows-amd64.zip -oC:\ >nul
- go version
- go env
# to run your custom scripts instead of automatic MSBuild
build_script:
- go test .
- go test -race .
- go test github.com/minio/minio/pkg...
- go test -race github.com/minio/minio/pkg...
# to disable automatic tests
test: off
# to disable deployment
deploy: off

File diff suppressed because one or more lines are too long

View file

@ -21,21 +21,15 @@ import (
"os/user"
"path/filepath"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick"
"github.com/minio/minio-xl/pkg/probe"
"github.com/minio/minio-xl/pkg/quick"
)
// AuthUser container
type AuthUser struct {
Name string `json:"name"`
AccessKeyID string `json:"accessKeyId"`
SecretAccessKey string `json:"secretAccessKey"`
}
// AuthConfig auth keys
type AuthConfig struct {
Version string
Users map[string]*AuthUser
Version string `json:"version"`
AccessKeyID string `json:"accessKeyId"`
SecretAccessKey string `json:"secretAccessKey"`
}
// getAuthConfigPath get users config path
@ -89,19 +83,14 @@ func getAuthConfigFile() (string, *probe.Error) {
if err != nil {
return "", err.Trace()
}
return filepath.Join(authConfigPath, "users.json"), nil
return filepath.Join(authConfigPath, "fsUsers.json"), nil
}
// customConfigPath not accessed from outside only allowed through get/set methods
// customConfigPath for custom config path only for testing purposes
var customConfigPath string
// SetAuthConfigPath - set custom auth config path
func SetAuthConfigPath(configPath string) {
customConfigPath = configPath
}
// SaveConfig save auth config
func SaveConfig(a *AuthConfig) *probe.Error {
// saveAuthConfig save auth config
func saveAuthConfig(a *AuthConfig) *probe.Error {
authConfigFile, err := getAuthConfigFile()
if err != nil {
return err.Trace()
@ -116,8 +105,8 @@ func SaveConfig(a *AuthConfig) *probe.Error {
return nil
}
// LoadConfig load auth config
func LoadConfig() (*AuthConfig, *probe.Error) {
// loadAuthConfig load auth config
func loadAuthConfig() (*AuthConfig, *probe.Error) {
authConfigFile, err := getAuthConfigFile()
if err != nil {
return nil, err.Trace()
@ -126,8 +115,7 @@ func LoadConfig() (*AuthConfig, *probe.Error) {
return nil, probe.NewError(err)
}
a := &AuthConfig{}
a.Version = "0.0.1"
a.Users = make(map[string]*AuthUser)
a.Version = "1"
qc, err := quick.New(a)
if err != nil {
return nil, err.Trace()

View file

@ -19,19 +19,34 @@ package main
import (
"crypto/rand"
"encoding/base64"
"regexp"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio-xl/pkg/probe"
)
const (
minioAccessID = 20
minioSecretID = 40
)
// isValidAccessKey - validate access key
func isValidAccessKey(accessKeyID string) bool {
if accessKeyID == "" {
return true
}
regex := regexp.MustCompile("^[A-Z0-9\\-\\.\\_\\~]{20}$")
return regex.MatchString(accessKeyID)
}
// generateAccessKeyID - generate random alpha numeric value using only uppercase characters
// takes input as size in integer
func generateAccessKeyID() ([]byte, *probe.Error) {
alpha := make([]byte, MinioAccessID)
alpha := make([]byte, minioAccessID)
_, err := rand.Read(alpha)
if err != nil {
return nil, probe.NewError(err)
}
for i := 0; i < MinioAccessID; i++ {
for i := 0; i < minioAccessID; i++ {
alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))]
}
return alpha, nil
@ -39,32 +54,25 @@ func generateAccessKeyID() ([]byte, *probe.Error) {
// generateSecretAccessKey - generate random base64 numeric value from a random seed.
func generateSecretAccessKey() ([]byte, *probe.Error) {
rb := make([]byte, MinioSecretID)
rb := make([]byte, minioSecretID)
_, err := rand.Read(rb)
if err != nil {
return nil, probe.NewError(err)
}
return []byte(base64.StdEncoding.EncodeToString(rb))[:MinioSecretID], nil
return []byte(base64.StdEncoding.EncodeToString(rb))[:minioSecretID], nil
}
// mustGenerateAccessKeyID - must generate random alpha numeric value using only uppercase characters
// takes input as size in integer
func mustGenerateAccessKeyID() []byte {
alpha := make([]byte, MinioAccessID)
_, err := rand.Read(alpha)
fatalIf(probe.NewError(err), "Unable to get random number from crypto/rand.", nil)
for i := 0; i < MinioAccessID; i++ {
alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))]
}
alpha, err := generateAccessKeyID()
fatalIf(err.Trace(), "Unable to generate accessKeyID.", nil)
return alpha
}
// mustGenerateSecretAccessKey - generate random base64 numeric value from a random seed.
func mustGenerateSecretAccessKey() []byte {
rb := make([]byte, MinioSecretID)
_, err := rand.Read(rb)
fatalIf(probe.NewError(err), "Unable to get random number from crypto/rand.", nil)
return []byte(base64.StdEncoding.EncodeToString(rb))[:MinioSecretID]
secretKey, err := generateSecretAccessKey()
fatalIf(err.Trace(), "Unable to generate secretAccessKey.", nil)
return secretKey
}

View file

@ -17,12 +17,14 @@
package main
import (
"encoding/hex"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/pkg/donut"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
"github.com/minio/minio/pkg/fs"
"github.com/minio/minio-xl/pkg/crypto/sha256"
"github.com/minio/minio-xl/pkg/probe"
)
// ListMultipartUploadsHandler - GET Bucket (List Multipart uploads)
@ -33,15 +35,6 @@ import (
// This operation returns at most 1,000 multipart uploads in the response.
//
func (api API) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until ticket master gives us a go
<-op.ProceedCh
}
resources := getBucketMultipartResources(req.URL.Query())
if resources.MaxUploads < 0 {
writeErrorResponse(w, req, InvalidMaxUploads, req.URL.Path)
@ -54,11 +47,11 @@ func (api API) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Requ
vars := mux.Vars(req)
bucket := vars["bucket"]
resources, err := api.Donut.ListMultipartUploads(bucket, resources)
resources, err := api.Filesystem.ListMultipartUploads(bucket, resources)
if err != nil {
errorIf(err.Trace(), "ListMultipartUploads failed.", nil)
switch err.ToGoError().(type) {
case donut.BucketNotFound:
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
@ -81,15 +74,6 @@ func (api API) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Requ
// criteria to return a subset of the objects in a bucket.
//
func (api API) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
if isRequestUploads(req.URL.Query()) {
api.ListMultipartUploadsHandler(w, req)
return
@ -107,7 +91,7 @@ func (api API) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
bucket := vars["bucket"]
objects, resources, err := api.Donut.ListObjects(bucket, resources)
objects, resources, err := api.Filesystem.ListObjects(bucket, resources)
if err == nil {
// generate response
response := generateListObjectsResponse(bucket, objects, resources)
@ -119,13 +103,13 @@ func (api API) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
return
}
switch err.ToGoError().(type) {
case donut.BucketNameInvalid:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case donut.BucketNotFound:
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case donut.ObjectNotFound:
case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case donut.ObjectNameInvalid:
case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
default:
errorIf(err.Trace(), "ListObjects failed.", nil)
@ -138,23 +122,7 @@ func (api API) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
// This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request.
func (api API) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
// uncomment this when we have webcli
// without access key credentials one cannot list buckets
// if _, err := StripAccessKeyID(req); err != nil {
// writeErrorResponse(w, req, AccessDenied, req.URL.Path)
// return
// }
buckets, err := api.Donut.ListBuckets()
buckets, err := api.Filesystem.ListBuckets()
if err == nil {
// generate response
response := generateListBucketsResponse(buckets)
@ -173,15 +141,6 @@ func (api API) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
// ----------
// This implementation of the PUT operation creates a new bucket for authenticated request
func (api API) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
if _, err := stripAccessKeyID(req.Header.Get("Authorization")); err != nil {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
@ -197,7 +156,7 @@ func (api API) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
bucket := vars["bucket"]
var signature *signv4.Signature
var signature *fs.Signature
if !api.Anonymous {
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
@ -214,24 +173,36 @@ func (api API) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
// if body of request is non-nil then check for validity of Content-Length
if req.Body != nil {
/// if Content-Length missing, deny the request
size := req.Header.Get("Content-Length")
if size == "" {
if req.Header.Get("Content-Length") == "" {
writeErrorResponse(w, req, MissingContentLength, req.URL.Path)
return
}
if signature != nil {
locationBytes, err := ioutil.ReadAll(req.Body)
if err != nil {
sh := sha256.New()
sh.Write(locationBytes)
ok, perr := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if perr != nil {
errorIf(perr.Trace(), "MakeBucket failed.", nil)
writeErrorResponse(w, req, InternalError, req.URL.Path)
return
}
if !ok {
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
return
}
}
}
}
err := api.Donut.MakeBucket(bucket, getACLTypeString(aclType), req.Body, signature)
err := api.Filesystem.MakeBucket(bucket, getACLTypeString(aclType))
if err != nil {
errorIf(err.Trace(), "MakeBucket failed.", nil)
switch err.ToGoError().(type) {
case signv4.DoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
case donut.TooManyBuckets:
writeErrorResponse(w, req, TooManyBuckets, req.URL.Path)
case donut.BucketNameInvalid:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case donut.BucketExists:
case fs.BucketExists:
writeErrorResponse(w, req, BucketAlreadyExists, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
@ -248,15 +219,6 @@ func (api API) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
// This implementation of the POST operation handles object creation with a specified
// signature policy in multipart/form-data
func (api API) PostPolicyBucketHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
// if body of request is non-nil then check for validity of Content-Length
if req.Body != nil {
/// if Content-Length missing, deny the request
@ -307,32 +269,30 @@ func (api API) PostPolicyBucketHandler(w http.ResponseWriter, req *http.Request)
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path)
return
}
metadata, perr := api.Donut.CreateObject(bucket, object, "", 0, fileBody, nil, nil)
metadata, perr := api.Filesystem.CreateObject(bucket, object, "", 0, fileBody, nil)
if perr != nil {
errorIf(perr.Trace(), "CreateObject failed.", nil)
switch perr.ToGoError().(type) {
case donut.BucketNotFound:
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case donut.BucketNameInvalid:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case donut.ObjectExists:
writeErrorResponse(w, req, MethodNotAllowed, req.URL.Path)
case donut.BadDigest:
case fs.BadDigest:
writeErrorResponse(w, req, BadDigest, req.URL.Path)
case signv4.DoesNotMatch:
case fs.SignatureDoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
case donut.IncompleteBody:
case fs.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, req.URL.Path)
case donut.EntityTooLarge:
case fs.EntityTooLarge:
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
case donut.InvalidDigest:
case fs.InvalidDigest:
writeErrorResponse(w, req, InvalidDigest, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
}
return
}
w.Header().Set("ETag", metadata.MD5Sum)
w.Header().Set("ETag", "\""+metadata.Md5+"\"")
writeSuccessResponse(w)
}
@ -340,15 +300,6 @@ func (api API) PostPolicyBucketHandler(w http.ResponseWriter, req *http.Request)
// ----------
// This implementation of the PUT operation modifies the bucketACL for authenticated request
func (api API) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
// read from 'x-amz-acl'
aclType := getACLType(req)
if aclType == unsupportedACLType {
@ -359,13 +310,13 @@ func (api API) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
bucket := vars["bucket"]
err := api.Donut.SetBucketMetadata(bucket, map[string]string{"acl": getACLTypeString(aclType)})
err := api.Filesystem.SetBucketMetadata(bucket, map[string]string{"acl": getACLTypeString(aclType)})
if err != nil {
errorIf(err.Trace(), "PutBucketACL failed.", nil)
switch err.ToGoError().(type) {
case donut.BucketNameInvalid:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case donut.BucketNotFound:
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
@ -382,25 +333,16 @@ func (api API) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
// know its ``acl``. This operation willl return response of 404
// if bucket not found and 403 for invalid credentials.
func (api API) GetBucketACLHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
vars := mux.Vars(req)
bucket := vars["bucket"]
bucketMetadata, err := api.Donut.GetBucketMetadata(bucket)
bucketMetadata, err := api.Filesystem.GetBucketMetadata(bucket)
if err != nil {
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
switch err.ToGoError().(type) {
case donut.BucketNotFound:
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case donut.BucketNameInvalid:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
@ -423,25 +365,16 @@ func (api API) GetBucketACLHandler(w http.ResponseWriter, req *http.Request) {
// have permission to access it. Otherwise, the operation might
// return responses such as 404 Not Found and 403 Forbidden.
func (api API) HeadBucketHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
vars := mux.Vars(req)
bucket := vars["bucket"]
_, err := api.Donut.GetBucketMetadata(bucket)
_, err := api.Filesystem.GetBucketMetadata(bucket)
if err != nil {
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
switch err.ToGoError().(type) {
case donut.BucketNotFound:
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case donut.BucketNameInvalid:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
@ -450,3 +383,24 @@ func (api API) HeadBucketHandler(w http.ResponseWriter, req *http.Request) {
}
writeSuccessResponse(w)
}
// DeleteBucketHandler - Delete bucket
func (api API) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
bucket := vars["bucket"]
err := api.Filesystem.DeleteBucket(bucket)
if err != nil {
errorIf(err.Trace(), "DeleteBucket failed.", nil)
switch err.ToGoError().(type) {
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case fs.BucketNotEmpty:
writeErrorResponse(w, req, BucketNotEmpty, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
}
return
}
writeSuccessResponse(w)
}

View file

@ -20,9 +20,6 @@ _init() {
shopt -s extglob
## Minimum required versions for build dependencies
GCC_VERSION="4.0"
LLVM_VERSION="7.0.0"
YASM_VERSION="1.2.0"
GIT_VERSION="1.0"
GO_VERSION="1.5.1"
OSX_VERSION="10.8"
@ -180,29 +177,6 @@ check_deps() {
if [ $? -ge 2 ]; then
MISSING="${MISSING} git"
fi
case ${UNAME%% *} in
"Linux")
check_version "$(env gcc --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${GCC_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} build-essential(${GCC_VERSION})"
fi
;;
"Darwin")
check_version "$(env gcc --version 2>/dev/null | awk '{print $4}' | head -1)" "${LLVM_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} xcode-cli(${LLVM_VERSION})"
fi
;;
"*")
;;
esac
check_version "$(env yasm --version 2>/dev/null | sed 's/^.* \([0-9.]*\).*$/\1/' | head -1)" "${YASM_VERSION}"
if [ $? -ge 2 ]; then
MISSING="${MISSING} yasm(${YASM_VERSION})"
fi
}
main() {

View file

@ -27,8 +27,7 @@ import (
type logLevel int
const (
levelUnknown logLevel = iota
levelPrint
levelPrint logLevel = iota + 1
levelDebug
levelInfo
levelError

View file

@ -1,15 +0,0 @@
#!/bin/bash
set -e
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
# see also ".mailmap" for how email addresses and names are deduplicated
{
cat <<-'EOH'
## Contributors
<!-- DO NOT EDIT - CONTRIBUTORS.md is autogenerated from git commit log by contributors.sh script. -->
EOH
echo
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf | sed 's/^/- /g'
} > CONTRIBUTORS.md

View file

@ -1,229 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"crypto/tls"
"encoding/json"
"fmt"
"net"
"net/http"
"os"
"strings"
"github.com/minio/cli"
"github.com/minio/minio/pkg/minhttp"
"github.com/minio/minio/pkg/probe"
)
var controllerCmd = cli.Command{
Name: "controller",
Usage: "Start minio controller",
Action: controllerMain,
CustomHelpTemplate: `NAME:
minio {{.Name}} - {{.Description}}
USAGE:
minio {{.Name}} [OPTION]
EXAMPLES:
1. Start minio controller
$ minio {{.Name}}
2. Fetch stored access keys
$ minio {{.Name}} keys
`,
}
// configureControllerRPC instance
func configureControllerRPC(conf minioConfig, rpcHandler http.Handler) (*http.Server, *probe.Error) {
// Minio server config
rpcServer := &http.Server{
Addr: conf.ControllerAddress,
Handler: rpcHandler,
MaxHeaderBytes: 1 << 20,
}
if conf.TLS {
var err error
rpcServer.TLSConfig = &tls.Config{}
rpcServer.TLSConfig.Certificates = make([]tls.Certificate, 1)
rpcServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(conf.CertFile, conf.KeyFile)
if err != nil {
return nil, probe.NewError(err)
}
}
host, port, err := net.SplitHostPort(conf.ControllerAddress)
if err != nil {
return nil, probe.NewError(err)
}
var hosts []string
switch {
case host != "":
hosts = append(hosts, host)
default:
addrs, err := net.InterfaceAddrs()
if err != nil {
return nil, probe.NewError(err)
}
for _, addr := range addrs {
if addr.Network() == "ip+net" {
host := strings.Split(addr.String(), "/")[0]
if ip := net.ParseIP(host); ip.To4() != nil {
hosts = append(hosts, host)
}
}
}
}
for _, host := range hosts {
if conf.TLS {
Printf("Starting minio controller on: https://%s:%s, PID: %d\n", host, port, os.Getpid())
} else {
Printf("Starting minio controller on: http://%s:%s, PID: %d\n", host, port, os.Getpid())
}
}
return rpcServer, nil
}
// startController starts a minio controller
func startController(conf minioConfig) *probe.Error {
rpcServer, err := configureControllerRPC(conf, getControllerRPCHandler(conf.Anonymous))
if err != nil {
return err.Trace()
}
// Setting rate limit to 'zero' no ratelimiting implemented
if err := minhttp.ListenAndServeLimited(0, rpcServer); err != nil {
return err.Trace()
}
return nil
}
func genAuthFirstTime() (*AuthConfig, *probe.Error) {
if isAuthConfigFileExists() {
return nil, nil
}
if err := createAuthConfigPath(); err != nil {
return nil, err
}
// Initialize new config, since config file doesn't exist yet
config := &AuthConfig{}
config.Version = "0.0.1"
config.Users = make(map[string]*AuthUser)
config.Users["admin"] = &AuthUser{
Name: "admin",
AccessKeyID: "admin",
SecretAccessKey: string(mustGenerateSecretAccessKey()),
}
config.Users["user"] = &AuthUser{
Name: "user",
AccessKeyID: string(mustGenerateAccessKeyID()),
SecretAccessKey: string(mustGenerateSecretAccessKey()),
}
if err := SaveConfig(config); err != nil {
return nil, err.Trace()
}
return config, nil
}
func getAuth() (*AuthConfig, *probe.Error) {
config, err := LoadConfig()
if err != nil {
return nil, err.Trace()
}
return config, nil
}
type accessKeys struct {
*AuthUser
}
func (a accessKeys) String() string {
return colorizeMessage(fmt.Sprintf("Username: %s, AccessKey: %s, SecretKey: %s", a.Name, a.AccessKeyID, a.SecretAccessKey))
}
// JSON - json formatted output
func (a accessKeys) JSON() string {
b, err := json.Marshal(a)
errorIf(probe.NewError(err), "Unable to marshal json", nil)
return string(b)
}
// firstTimeAuth first time authorization
func firstTimeAuth() *probe.Error {
conf, err := genAuthFirstTime()
if err != nil {
return err.Trace()
}
if conf != nil {
Println("Running for first time, generating access keys.")
for _, user := range conf.Users {
if globalJSONFlag {
Println(accessKeys{user}.JSON())
} else {
Println(accessKeys{user})
}
}
Println("To fetch your keys again.")
Println(" $ minio controller keys")
}
return nil
}
func getControllerConfig(c *cli.Context) minioConfig {
certFile := c.GlobalString("cert")
keyFile := c.GlobalString("key")
if (certFile != "" && keyFile == "") || (certFile == "" && keyFile != "") {
Fatalln("Both certificate and key are required to enable https.")
}
tls := (certFile != "" && keyFile != "")
return minioConfig{
ControllerAddress: c.GlobalString("address-controller"),
TLS: tls,
CertFile: certFile,
KeyFile: keyFile,
RateLimit: c.GlobalInt("ratelimit"),
Anonymous: c.GlobalBool("anonymous"),
}
}
func controllerMain(c *cli.Context) {
if c.Args().Present() && c.Args().First() != "keys" {
cli.ShowCommandHelpAndExit(c, "controller", 1)
}
if c.Args().First() == "keys" {
conf, err := getAuth()
fatalIf(err.Trace(), "Failed to fetch keys for minio controller.", nil)
if conf != nil {
for _, user := range conf.Users {
if globalJSONFlag {
Println(accessKeys{user}.JSON())
} else {
Println(accessKeys{user})
}
}
}
return
}
err := firstTimeAuth()
fatalIf(err.Trace(), "Failed to generate keys for minio.", nil)
err = startController(getControllerConfig(c))
fatalIf(err.Trace(), "Failed to start minio controller.", nil)
}

View file

@ -1,246 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"bytes"
"encoding/hex"
"io"
"io/ioutil"
"net/http"
"sort"
"strings"
"time"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/probe"
)
type rpcSignatureHandler struct {
handler http.Handler
}
// RPCSignatureHandler to validate authorization header for the incoming request.
func RPCSignatureHandler(h http.Handler) http.Handler {
return rpcSignatureHandler{h}
}
type rpcSignature struct {
AccessKeyID string
SecretAccessKey string
Signature string
SignedHeaders []string
Request *http.Request
}
// getCanonicalHeaders generate a list of request headers with their values
func (r *rpcSignature) getCanonicalHeaders(signedHeaders map[string][]string) string {
var headers []string
vals := make(map[string][]string)
for k, vv := range signedHeaders {
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(r.Request.Host)
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (r *rpcSignature) getSignedHeaders(signedHeaders map[string][]string) string {
var headers []string
for k := range signedHeaders {
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// extractSignedHeaders extract signed headers from Authorization header
func (r rpcSignature) extractSignedHeaders() map[string][]string {
extractedSignedHeadersMap := make(map[string][]string)
for _, header := range r.SignedHeaders {
val, ok := r.Request.Header[http.CanonicalHeaderKey(header)]
if !ok {
// if not found continue, we will fail later
continue
}
extractedSignedHeadersMap[header] = val
}
return extractedSignedHeadersMap
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (r *rpcSignature) getCanonicalRequest() string {
payload := r.Request.Header.Get(http.CanonicalHeaderKey("x-minio-content-sha256"))
r.Request.URL.RawQuery = strings.Replace(r.Request.URL.Query().Encode(), "+", "%20", -1)
encodedPath := getURLEncodedName(r.Request.URL.Path)
// convert any space strings back to "+"
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
r.Request.Method,
encodedPath,
r.Request.URL.RawQuery,
r.getCanonicalHeaders(r.extractSignedHeaders()),
r.getSignedHeaders(r.extractSignedHeaders()),
payload,
}, "\n")
return canonicalRequest
}
// getScope generate a string of a specific date, an AWS region, and a service
func (r rpcSignature) getScope(t time.Time) string {
scope := strings.Join([]string{
t.Format(yyyymmdd),
"milkyway",
"rpc",
"rpc_request",
}, "/")
return scope
}
// getStringToSign a string based on selected query values
func (r rpcSignature) getStringToSign(canonicalRequest string, t time.Time) string {
stringToSign := rpcAuthHeaderPrefix + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + r.getScope(t) + "\n"
stringToSign = stringToSign + hex.EncodeToString(sha256.Sum256([]byte(canonicalRequest)))
return stringToSign
}
// getSigningKey hmac seed to calculate final signature
func (r rpcSignature) getSigningKey(t time.Time) []byte {
secret := r.SecretAccessKey
date := sumHMAC([]byte("MINIO"+secret), []byte(t.Format(yyyymmdd)))
region := sumHMAC(date, []byte("milkyway"))
service := sumHMAC(region, []byte("rpc"))
signingKey := sumHMAC(service, []byte("rpc_request"))
return signingKey
}
// getSignature final signature in hexadecimal form
func (r rpcSignature) getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
}
func (r rpcSignature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
// set new calulated payload
r.Request.Header.Set("X-Minio-Content-Sha256", hashedPayload)
// Add date if not present throw error
var date string
if date = r.Request.Header.Get(http.CanonicalHeaderKey("x-minio-date")); date == "" {
if date = r.Request.Header.Get("Date"); date == "" {
return false, probe.NewError(errMissingDateHeader)
}
}
t, err := time.Parse(iso8601Format, date)
if err != nil {
return false, probe.NewError(err)
}
canonicalRequest := r.getCanonicalRequest()
stringToSign := r.getStringToSign(canonicalRequest, t)
signingKey := r.getSigningKey(t)
newSignature := r.getSignature(signingKey, stringToSign)
if newSignature != r.Signature {
return false, nil
}
return true, nil
}
func isRequestSignatureRPC(req *http.Request) bool {
if _, ok := req.Header["Authorization"]; ok {
return ok
}
return false
}
func (s rpcSignatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var signature *rpcSignature
if isRequestSignatureRPC(r) {
// Init signature V4 verification
var err *probe.Error
signature, err = initSignatureRPC(r)
if err != nil {
switch err.ToGoError() {
case errInvalidRegion:
errorIf(err.Trace(), "Unknown region in authorization header.", nil)
writeErrorResponse(w, r, AuthorizationHeaderMalformed, r.URL.Path)
return
case errAccessKeyIDInvalid:
errorIf(err.Trace(), "Invalid access key id.", nil)
writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path)
return
default:
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
writeErrorResponse(w, r, InternalError, r.URL.Path)
return
}
}
buffer := new(bytes.Buffer)
if _, err := io.Copy(buffer, r.Body); err != nil {
errorIf(probe.NewError(err), "Unable to read payload from request body.", nil)
writeErrorResponse(w, r, InternalError, r.URL.Path)
return
}
value := sha256.Sum256(buffer.Bytes())
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(value[:]))
if err != nil {
errorIf(err.Trace(), "Unable to verify signature.", nil)
writeErrorResponse(w, r, InternalError, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
// Copy the buffer back into request body to be read by the RPC service callers
r.Body = ioutil.NopCloser(buffer)
s.handler.ServeHTTP(w, r)
} else {
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
}
}

View file

@ -1,122 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"net/http"
"strings"
"github.com/minio/minio/pkg/probe"
)
const (
rpcAuthHeaderPrefix = "MINIORPC"
)
// getRPCCredentialsFromAuth parse credentials tag from authorization value
// Authorization:
// Authorization: MINIORPC Credential=admin/20130524/milkyway/rpc/rpc_request,
// SignedHeaders=host;x-minio-date, Signature=fe5f80f77d5fa3beca038a248ff027d0445342fe2855ddc963176630326f1024
func getRPCCredentialsFromAuth(authValue string) ([]string, *probe.Error) {
if authValue == "" {
return nil, probe.NewError(errMissingAuthHeaderValue)
}
authFields := strings.Split(strings.TrimSpace(authValue), ",")
if len(authFields) != 3 {
return nil, probe.NewError(errInvalidAuthHeaderValue)
}
authPrefixFields := strings.Fields(authFields[0])
if len(authPrefixFields) != 2 {
return nil, probe.NewError(errMissingFieldsAuthHeader)
}
if authPrefixFields[0] != rpcAuthHeaderPrefix {
return nil, probe.NewError(errInvalidAuthHeaderPrefix)
}
credentials := strings.Split(strings.TrimSpace(authPrefixFields[1]), "=")
if len(credentials) != 2 {
return nil, probe.NewError(errMissingFieldsCredentialTag)
}
if len(strings.Split(strings.TrimSpace(authFields[1]), "=")) != 2 {
return nil, probe.NewError(errMissingFieldsSignedHeadersTag)
}
if len(strings.Split(strings.TrimSpace(authFields[2]), "=")) != 2 {
return nil, probe.NewError(errMissingFieldsSignatureTag)
}
credentialElements := strings.Split(strings.TrimSpace(credentials[1]), "/")
if len(credentialElements) != 5 {
return nil, probe.NewError(errCredentialTagMalformed)
}
return credentialElements, nil
}
// verify if rpcAuthHeader value has valid region
func isValidRPCRegion(authHeaderValue string) *probe.Error {
credentialElements, err := getRPCCredentialsFromAuth(authHeaderValue)
if err != nil {
return err.Trace()
}
region := credentialElements[2]
if region != "milkyway" {
return probe.NewError(errInvalidRegion)
}
return nil
}
// stripRPCAccessKeyID - strip only access key id from auth header
func stripRPCAccessKeyID(authHeaderValue string) (string, *probe.Error) {
if err := isValidRPCRegion(authHeaderValue); err != nil {
return "", err.Trace()
}
credentialElements, err := getRPCCredentialsFromAuth(authHeaderValue)
if err != nil {
return "", err.Trace()
}
if credentialElements[0] != "admin" {
return "", probe.NewError(errAccessKeyIDInvalid)
}
return credentialElements[0], nil
}
// initSignatureRPC initializing rpc signature verification
func initSignatureRPC(req *http.Request) (*rpcSignature, *probe.Error) {
// strip auth from authorization header
authHeaderValue := req.Header.Get("Authorization")
accessKeyID, err := stripRPCAccessKeyID(authHeaderValue)
if err != nil {
return nil, err.Trace()
}
authConfig, err := LoadConfig()
if err != nil {
return nil, err.Trace()
}
authFields := strings.Split(strings.TrimSpace(authHeaderValue), ",")
signedHeaders := strings.Split(strings.Split(strings.TrimSpace(authFields[1]), "=")[1], ";")
signature := strings.Split(strings.TrimSpace(authFields[2]), "=")[1]
for _, user := range authConfig.Users {
if user.AccessKeyID == accessKeyID {
signature := &rpcSignature{
AccessKeyID: user.AccessKeyID,
SecretAccessKey: user.SecretAccessKey,
Signature: signature,
SignedHeaders: signedHeaders,
Request: req,
}
return signature, nil
}
}
return nil, probe.NewError(errAccessKeyIDInvalid)
}

View file

@ -1,300 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"errors"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"github.com/gorilla/rpc/v2/json"
"github.com/minio/minio/pkg/probe"
)
type controllerRPCService struct {
serverList []ServerRep
}
// generateAuth generate new auth keys for a user
func generateAuth(args *AuthArgs, reply *AuthRep) *probe.Error {
config, err := LoadConfig()
if err != nil {
if os.IsNotExist(err.ToGoError()) {
// Initialize new config, since config file doesn't exist yet
config = &AuthConfig{}
config.Version = "0.0.1"
config.Users = make(map[string]*AuthUser)
} else {
return err.Trace()
}
}
if _, ok := config.Users[args.User]; ok {
return probe.NewError(errors.New("Credentials already set, if you wish to change this invoke Reset() method"))
}
accessKeyID, err := generateAccessKeyID()
if err != nil {
return err.Trace()
}
reply.AccessKeyID = string(accessKeyID)
secretAccessKey, err := generateSecretAccessKey()
if err != nil {
return err.Trace()
}
reply.SecretAccessKey = string(secretAccessKey)
reply.Name = args.User
config.Users[args.User] = &AuthUser{
Name: args.User,
AccessKeyID: string(accessKeyID),
SecretAccessKey: string(secretAccessKey),
}
if err := SaveConfig(config); err != nil {
return err.Trace()
}
return nil
}
// fetchAuth fetch auth keys for a user
func fetchAuth(args *AuthArgs, reply *AuthRep) *probe.Error {
config, err := LoadConfig()
if err != nil {
return err.Trace()
}
if _, ok := config.Users[args.User]; !ok {
return probe.NewError(errors.New("User not found"))
}
reply.AccessKeyID = config.Users[args.User].AccessKeyID
reply.SecretAccessKey = config.Users[args.User].SecretAccessKey
reply.Name = args.User
return nil
}
// resetAuth reset auth keys for a user
func resetAuth(args *AuthArgs, reply *AuthRep) *probe.Error {
config, err := LoadConfig()
if err != nil {
return err.Trace()
}
if _, ok := config.Users[args.User]; !ok {
return probe.NewError(errors.New("User not found"))
}
accessKeyID, err := generateAccessKeyID()
if err != nil {
return err.Trace()
}
reply.AccessKeyID = string(accessKeyID)
secretAccessKey, err := generateSecretAccessKey()
if err != nil {
return err.Trace()
}
reply.SecretAccessKey = string(secretAccessKey)
reply.Name = args.User
config.Users[args.User] = &AuthUser{
Name: args.User,
AccessKeyID: string(accessKeyID),
SecretAccessKey: string(secretAccessKey),
}
return SaveConfig(config).Trace()
}
// Generate auth keys
func (s *controllerRPCService) GenerateAuth(r *http.Request, args *AuthArgs, reply *AuthRep) error {
if strings.TrimSpace(args.User) == "" {
return errors.New("Invalid argument")
}
if err := generateAuth(args, reply); err != nil {
return probe.WrapError(err)
}
return nil
}
// Fetch auth keys
func (s *controllerRPCService) FetchAuth(r *http.Request, args *AuthArgs, reply *AuthRep) error {
if strings.TrimSpace(args.User) == "" {
return errors.New("Invalid argument")
}
if err := fetchAuth(args, reply); err != nil {
return probe.WrapError(err)
}
return nil
}
// Reset auth keys, generates new set of auth keys
func (s *controllerRPCService) ResetAuth(r *http.Request, args *AuthArgs, reply *AuthRep) error {
if strings.TrimSpace(args.User) == "" {
return errors.New("Invalid argument")
}
if err := resetAuth(args, reply); err != nil {
return probe.WrapError(err)
}
return nil
}
func readAuthConfig() (*AuthConfig, *probe.Error) {
authConfig, err := LoadConfig()
if err != nil {
return nil, err.Trace()
}
return authConfig, nil
}
func proxyRequest(method, host string, ssl bool, res interface{}) *probe.Error {
u := &url.URL{}
if ssl {
u.Scheme = "https"
} else {
u.Scheme = "http"
}
u.Host = host
if _, _, err := net.SplitHostPort(host); err == nil {
u.Host = host
} else {
u.Host = host + ":9002"
}
u.Path = "/rpc"
op := rpcOperation{
Method: method,
Request: ServerArg{},
}
authConfig, err := readAuthConfig()
if err != nil {
return err.Trace()
}
request, err := newRPCRequest(authConfig, u.String(), op, nil)
if err != nil {
return err.Trace()
}
var resp *http.Response
resp, err = request.Do()
if err != nil {
return err.Trace()
}
if err := json.DecodeClientResponse(resp.Body, res); err != nil {
return probe.NewError(err)
}
return nil
}
// StorageStats returns dummy storage stats
func (s *controllerRPCService) StorageStats(r *http.Request, args *ControllerArgs, reply *StorageStatsRep) error {
err := proxyRequest("Donut.StorageStats", args.Host, args.SSL, reply)
if err != nil {
return probe.WrapError(err)
}
return nil
}
// RebalaceStats returns dummy rebalance stats
func (s *controllerRPCService) RebalanceStats(r *http.Request, args *ControllerArgs, reply *RebalanceStatsRep) error {
err := proxyRequest("Donut.RebalanceStats", args.Host, args.SSL, reply)
if err != nil {
return probe.WrapError(err)
}
return nil
}
func (s *controllerRPCService) AddServer(r *http.Request, args *ControllerArgs, res *ServerRep) error {
err := proxyRequest("Server.Add", args.Host, args.SSL, res)
if err != nil {
return probe.WrapError(err)
}
s.serverList = append(s.serverList, *res)
return nil
}
func (s *controllerRPCService) DiscoverServers(r *http.Request, args *DiscoverArgs, rep *DiscoverRep) error {
c := make(chan DiscoverRepEntry)
defer close(c)
for _, host := range args.Hosts {
go func(c chan DiscoverRepEntry, host string) {
err := proxyRequest("Server.Version", host, args.SSL, rep)
if err != nil {
c <- DiscoverRepEntry{host, err.ToGoError().Error()}
return
}
c <- DiscoverRepEntry{host, ""}
}(c, host)
}
for range args.Hosts {
entry := <-c
rep.Entry = append(rep.Entry, entry)
}
return nil
}
func (s *controllerRPCService) GetControllerNetInfo(r *http.Request, args *ServerArg, res *ControllerNetInfoRep) error {
addrs, err := net.InterfaceAddrs()
if err != nil {
return err
}
for _, addr := range addrs {
res.NetInfo = append(res.NetInfo, addr.String())
}
return nil
}
func (s *controllerRPCService) GetServerMemStats(r *http.Request, args *ControllerArgs, res *MemStatsRep) error {
err := proxyRequest("Server.MemStats", args.Host, args.SSL, res)
if err != nil {
return probe.WrapError(err)
}
return nil
}
func (s *controllerRPCService) GetServerDiskStats(r *http.Request, args *ControllerArgs, res *DiskStatsRep) error {
err := proxyRequest("Server.DiskStats", args.Host, args.SSL, res)
if err != nil {
return probe.WrapError(err)
}
return nil
}
func (s *controllerRPCService) GetServerSysInfo(r *http.Request, args *ControllerArgs, res *SysInfoRep) error {
err := proxyRequest("Server.SysInfo", args.Host, args.SSL, res)
if err != nil {
return probe.WrapError(err)
}
return nil
}
func (s *controllerRPCService) ListServers(r *http.Request, args *ControllerArgs, res *ListRep) error {
res.List = s.serverList
return nil
}
func (s *controllerRPCService) GetServerVersion(r *http.Request, args *ControllerArgs, res *VersionRep) error {
err := proxyRequest("Server.Version", args.Host, args.SSL, res)
if err != nil {
return probe.WrapError(err)
}
return nil
}
func (s *controllerRPCService) GetVersion(r *http.Request, args *ControllerArgs, res *VersionRep) error {
res.Version = "0.0.1"
res.BuildDate = minioVersion
res.Architecture = runtime.GOARCH
res.OperatingSystem = runtime.GOOS
return nil
}

View file

@ -1,257 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"github.com/gorilla/rpc/v2/json"
. "gopkg.in/check.v1"
)
type ControllerRPCSuite struct {
root string
url *url.URL
req *http.Request
body io.ReadSeeker
config *AuthConfig
}
var _ = Suite(&ControllerRPCSuite{})
var (
testControllerRPC *httptest.Server
testServerRPC *httptest.Server
)
func (s *ControllerRPCSuite) SetUpSuite(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "api-")
c.Assert(err, IsNil)
s.root = root
SetAuthConfigPath(root)
secretAccessKey, perr := generateSecretAccessKey()
c.Assert(perr, IsNil)
authConf := &AuthConfig{}
authConf.Users = make(map[string]*AuthUser)
authConf.Users["admin"] = &AuthUser{
Name: "admin",
AccessKeyID: "admin",
SecretAccessKey: string(secretAccessKey),
}
s.config = authConf
SetAuthConfigPath(root)
perr = SaveConfig(authConf)
c.Assert(perr, IsNil)
testControllerRPC = httptest.NewServer(getControllerRPCHandler(false))
testServerRPC = httptest.NewUnstartedServer(getServerRPCHandler(false))
testServerRPC.Config.Addr = ":9002"
testServerRPC.Start()
url, gerr := url.Parse(testServerRPC.URL)
c.Assert(gerr, IsNil)
s.url = url
}
func (s *ControllerRPCSuite) TearDownSuite(c *C) {
os.RemoveAll(s.root)
testServerRPC.Close()
testControllerRPC.Close()
}
func (s *ControllerRPCSuite) TestMemStats(c *C) {
op := rpcOperation{
Method: "Controller.GetServerMemStats",
Request: ControllerArgs{Host: s.url.Host},
}
req, err := newRPCRequest(s.config, testControllerRPC.URL+"/rpc", op, http.DefaultTransport)
c.Assert(err, IsNil)
c.Assert(req.Get("Content-Type"), Equals, "application/json")
resp, err := req.Do()
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
var reply MemStatsRep
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
resp.Body.Close()
c.Assert(reply, Not(DeepEquals), MemStatsRep{})
}
func (s *ControllerRPCSuite) TestDiskStats(c *C) {
op := rpcOperation{
Method: "Controller.GetServerDiskStats",
Request: ControllerArgs{Host: s.url.Host},
}
req, err := newRPCRequest(s.config, testControllerRPC.URL+"/rpc", op, http.DefaultTransport)
c.Assert(err, IsNil)
c.Assert(req.Get("Content-Type"), Equals, "application/json")
resp, err := req.Do()
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
var reply MemStatsRep
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
resp.Body.Close()
c.Assert(reply, Not(DeepEquals), DiskStatsRep{})
}
func (s *ControllerRPCSuite) TestSysInfo(c *C) {
op := rpcOperation{
Method: "Controller.GetServerSysInfo",
Request: ControllerArgs{Host: s.url.Host},
}
req, err := newRPCRequest(s.config, testControllerRPC.URL+"/rpc", op, http.DefaultTransport)
c.Assert(err, IsNil)
c.Assert(req.Get("Content-Type"), Equals, "application/json")
resp, err := req.Do()
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
var reply SysInfoRep
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
resp.Body.Close()
c.Assert(reply, Not(DeepEquals), SysInfoRep{})
}
func (s *ControllerRPCSuite) TestServerList(c *C) {
op := rpcOperation{
Method: "Controller.ListServers",
Request: ControllerArgs{Host: s.url.Host},
}
req, err := newRPCRequest(s.config, testControllerRPC.URL+"/rpc", op, http.DefaultTransport)
c.Assert(err, IsNil)
c.Assert(req.Get("Content-Type"), Equals, "application/json")
resp, err := req.Do()
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
var reply ServerListRep
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
resp.Body.Close()
c.Assert(reply, Not(DeepEquals), ServerListRep{List: []ServerRep{}})
}
func (s *ControllerRPCSuite) TestServerAdd(c *C) {
op := rpcOperation{
Method: "Controller.AddServer",
Request: ControllerArgs{Host: s.url.Host},
}
req, err := newRPCRequest(s.config, testControllerRPC.URL+"/rpc", op, http.DefaultTransport)
c.Assert(err, IsNil)
c.Assert(req.Get("Content-Type"), Equals, "application/json")
resp, err := req.Do()
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
var reply DefaultRep
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
resp.Body.Close()
c.Assert(reply, Not(DeepEquals), DefaultRep{nil, "Added"})
}
func (s *ControllerRPCSuite) TestAuth(c *C) {
op := rpcOperation{
Method: "Controller.GenerateAuth",
Request: AuthArgs{User: "newuser"},
}
req, err := newRPCRequest(s.config, testControllerRPC.URL+"/rpc", op, http.DefaultTransport)
c.Assert(err, IsNil)
c.Assert(req.Get("Content-Type"), Equals, "application/json")
resp, err := req.Do()
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
var reply AuthRep
c.Assert(json.DecodeClientResponse(resp.Body, &reply), IsNil)
resp.Body.Close()
c.Assert(reply, Not(DeepEquals), AuthRep{})
c.Assert(len(reply.AccessKeyID), Equals, 20)
c.Assert(len(reply.SecretAccessKey), Equals, 40)
c.Assert(len(reply.Name), Not(Equals), 0)
op = rpcOperation{
Method: "Controller.FetchAuth",
Request: AuthArgs{User: "newuser"},
}
req, err = newRPCRequest(s.config, testControllerRPC.URL+"/rpc", op, http.DefaultTransport)
c.Assert(err, IsNil)
c.Assert(req.Get("Content-Type"), Equals, "application/json")
resp, err = req.Do()
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
var newReply AuthRep
c.Assert(json.DecodeClientResponse(resp.Body, &newReply), IsNil)
resp.Body.Close()
c.Assert(newReply, Not(DeepEquals), AuthRep{})
c.Assert(reply.AccessKeyID, Equals, newReply.AccessKeyID)
c.Assert(reply.SecretAccessKey, Equals, newReply.SecretAccessKey)
c.Assert(len(reply.Name), Not(Equals), 0)
op = rpcOperation{
Method: "Controller.ResetAuth",
Request: AuthArgs{User: "newuser"},
}
req, err = newRPCRequest(s.config, testControllerRPC.URL+"/rpc", op, http.DefaultTransport)
c.Assert(err, IsNil)
c.Assert(req.Get("Content-Type"), Equals, "application/json")
resp, err = req.Do()
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusOK)
var resetReply AuthRep
c.Assert(json.DecodeClientResponse(resp.Body, &resetReply), IsNil)
resp.Body.Close()
c.Assert(newReply, Not(DeepEquals), AuthRep{})
c.Assert(reply.AccessKeyID, Not(Equals), resetReply.AccessKeyID)
c.Assert(reply.SecretAccessKey, Not(Equals), resetReply.SecretAccessKey)
c.Assert(len(reply.Name), Not(Equals), 0)
// these operations should fail
/// generating access for existing user fails
op = rpcOperation{
Method: "Controller.GenerateAuth",
Request: AuthArgs{User: "newuser"},
}
req, err = newRPCRequest(s.config, testControllerRPC.URL+"/rpc", op, http.DefaultTransport)
c.Assert(err, IsNil)
c.Assert(req.Get("Content-Type"), Equals, "application/json")
resp, err = req.Do()
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusBadRequest)
/// null user provided invalid
op = rpcOperation{
Method: "Controller.GenerateAuth",
Request: AuthArgs{User: ""},
}
req, err = newRPCRequest(s.config, testControllerRPC.URL+"/rpc", op, http.DefaultTransport)
c.Assert(err, IsNil)
c.Assert(req.Get("Content-Type"), Equals, "application/json")
resp, err = req.Do()
c.Assert(err, IsNil)
c.Assert(resp.StatusCode, Equals, http.StatusBadRequest)
}

View file

@ -1,47 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"io/ioutil"
"os"
"github.com/minio/minio/pkg/probe"
)
// isUsable provides a comprehensive way of knowing if the provided mountPath is mounted and writable
func isUsable(mountPath string) (bool, *probe.Error) {
_, e := os.Stat(mountPath)
if e != nil {
e := os.MkdirAll(mountPath, 0700)
if e != nil {
return false, probe.NewError(e)
}
}
testFile, e := ioutil.TempFile(mountPath, "writetest-")
if e != nil {
return false, probe.NewError(e)
}
defer testFile.Close()
testFileName := testFile.Name()
if e := os.Remove(testFileName); e != nil {
return false, probe.NewError(e)
}
return true, nil
}

View file

@ -1,100 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"os"
"path/filepath"
"github.com/minio/cli"
"github.com/minio/minio/pkg/donut"
"github.com/minio/minio/pkg/probe"
)
var (
donutSubCommands = []cli.Command{
{
Name: "make",
Description: "make a donut",
Action: makeDonutMain,
CustomHelpTemplate: `NAME:
minio donut {{.Name}} - {{.Description}}
USAGE:
minio donut {{.Name}} DONUTNAME [DISKS...]
EXAMPLES:
1. Make a donut with 4 exports
$ minio donut {{.Name}} mongodb-backup /mnt/export1 /mnt/export2 /mnt/export3 /mnt/export4
2. Make a donut with 16 exports
$ minio donut {{.Name}} operational-data /mnt/export1 /mnt/export2 /mnt/export3 /mnt/export4 /mnt/export5 \
/mnt/export6 /mnt/export7 /mnt/export8 /mnt/export9 /mnt/export10 /mnt/export11 \
/mnt/export12 /mnt/export13 /mnt/export14 /mnt/export15 /mnt/export16
`,
},
}
donutCmd = cli.Command{
Name: "donut",
Usage: "Create and manage a donut configuration",
Subcommands: donutSubCommands,
}
)
func makeDonutMain(c *cli.Context) {
if !c.Args().Present() || c.Args().First() == "help" {
cli.ShowCommandHelpAndExit(c, "make", 1)
}
donutName := c.Args().First()
if c.Args().First() != "" {
if !donut.IsValidDonut(donutName) {
Fatalf("Invalid donutname %s\n", donutName)
}
}
var disks []string
for _, disk := range c.Args().Tail() {
if _, err := isUsable(disk); err != nil {
Fatalln(err.Trace())
}
disks = append(disks, disk)
}
for _, disk := range disks {
if err := os.MkdirAll(filepath.Join(disk, donutName), 0700); err != nil {
Fatalln(probe.NewError(err))
}
}
hostname, err := os.Hostname()
if err != nil {
Fatalln(probe.NewError(err))
}
donutConfig := &donut.Config{}
donutConfig.Version = "0.0.1"
donutConfig.DonutName = donutName
donutConfig.NodeDiskMap = make(map[string][]string)
// keep it in exact order as it was specified, do not try to sort disks
donutConfig.NodeDiskMap[hostname] = disks
// default cache is unlimited
donutConfig.MaxSize = 512000000
if err := donut.SaveConfig(donutConfig); err != nil {
Fatalln(err.Trace())
}
Infoln("Success!")
}

View file

@ -1,67 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"net/http"
"runtime"
)
type donutRPCService struct{}
func (s *donutRPCService) ListNodes(r *http.Request, arg *DonutArg, rep *ListNodesRep) error {
rep.Nodes = []struct {
Hostname string `json:"hostname"`
Address string `json:"address"`
ID string `json:"id"`
}{
{
Hostname: "localhost",
Address: "192.168.1.102:9000",
ID: "6F27CB16-493D-40FA-B035-2A2E5646066A",
},
}
return nil
}
// Usage bytes
const (
PB = 1024 * 1024 * 1024 * 1024
TB = 1024 * 1024 * 1024 * 1024
GB = 1024 * 1024 * 1024
)
func (s *donutRPCService) StorageStats(r *http.Request, arg *DonutArg, rep *StorageStatsRep) error {
rep.Buckets = []BucketStats{{"bucket1", 4 * TB}, {"bucket2", 120 * TB}, {"bucket3", 45 * TB}}
return nil
}
func (s *donutRPCService) RebalanceStats(r *http.Request, arg *DonutArg, rep *RebalanceStatsRep) error {
rep.State = make(map[string]string)
rep.State["bucket1/obj1"] = "inProgress"
rep.State["bucket2/obj2"] = "finished"
rep.State["bucket3/obj3"] = "errored"
rep.State["bucket4/obj4"] = "unknownState"
return nil
}
func (s *donutRPCService) Version(r *http.Request, arg *ServerArg, rep *DonutVersionRep) error {
rep.Version = "0.1.0"
rep.Architecture = runtime.GOARCH
rep.OperatingSystem = runtime.GOOS
return nil
}

View file

@ -28,20 +28,6 @@ var (
Usage: "ADDRESS:PORT for cloud storage access.",
}
addressControllerFlag = cli.StringFlag{
Name: "address-controller",
Hide: true,
Value: ":9001",
Usage: "ADDRESS:PORT for management console access.",
}
addressServerRPCFlag = cli.StringFlag{
Name: "address-server-rpc",
Hide: true,
Value: ":9002",
Usage: "ADDRESS:PORT for management console access.",
}
ratelimitFlag = cli.IntFlag{
Name: "ratelimit",
Hide: true,

View file

@ -17,6 +17,5 @@
package main
var (
globalJSONFlag = false // Json flag set via command line
globalDebugFlag = false // Debug flag set via command line
globalJSONFlag = false // Json flag set via command line
)

View file

@ -22,8 +22,8 @@ import (
"strconv"
"strings"
"github.com/minio/minio/pkg/donut"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/fs"
"github.com/minio/minio-xl/pkg/probe"
)
const (
@ -60,7 +60,7 @@ func getRequestedRange(hrange string, size int64) (*httpRange, *probe.Error) {
func (r *httpRange) parse(ra string) *probe.Error {
i := strings.Index(ra, "-")
if i < 0 {
return probe.NewError(donut.InvalidRange{})
return probe.NewError(fs.InvalidRange{})
}
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
if start == "" {
@ -68,7 +68,7 @@ func (r *httpRange) parse(ra string) *probe.Error {
// range start relative to the end of the file.
i, err := strconv.ParseInt(end, 10, 64)
if err != nil {
return probe.NewError(donut.InvalidRange{})
return probe.NewError(fs.InvalidRange{})
}
if i > r.size {
i = r.size
@ -78,7 +78,7 @@ func (r *httpRange) parse(ra string) *probe.Error {
} else {
i, err := strconv.ParseInt(start, 10, 64)
if err != nil || i > r.size || i < 0 {
return probe.NewError(donut.InvalidRange{})
return probe.NewError(fs.InvalidRange{})
}
r.start = i
if end == "" {
@ -87,7 +87,7 @@ func (r *httpRange) parse(ra string) *probe.Error {
} else {
i, err := strconv.ParseInt(end, 10, 64)
if err != nil || r.start > i {
return probe.NewError(donut.InvalidRange{})
return probe.NewError(fs.InvalidRange{})
}
if i >= r.size {
i = r.size - 1
@ -104,7 +104,7 @@ func (r *httpRange) parseRange(s string) *probe.Error {
return probe.NewError(errors.New("header not present"))
}
if !strings.HasPrefix(s, b) {
return probe.NewError(donut.InvalidRange{})
return probe.NewError(fs.InvalidRange{})
}
ras := strings.Split(s[len(b):], ",")
@ -118,7 +118,7 @@ func (r *httpRange) parseRange(s string) *probe.Error {
ra := strings.TrimSpace(ras[0])
if ra == "" {
return probe.NewError(donut.InvalidRange{})
return probe.NewError(fs.InvalidRange{})
}
return r.parse(ra)
}

View file

@ -21,27 +21,13 @@ import (
"reflect"
"github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/probe"
"github.com/weekface/mgorus"
"github.com/minio/minio-xl/pkg/probe"
)
type fields map[string]interface{}
var log = logrus.New() // Default console logger.
// log2Mongo enables logging to mongodb. Use capped collection to
func log2Mongo(url, db, collection string) *probe.Error {
hooker, e := mgorus.NewHooker(url, db, collection)
if e != nil {
return probe.NewError(e)
}
log.Hooks.Add(hooker) // Add mongodb hook.
log.Formatter = &logrus.JSONFormatter{} // JSON formatted log.
log.Level = logrus.InfoLevel // Minimum log level.
return nil
}
func errorIf(err *probe.Error, msg string, fields map[string]interface{}) {
if err == nil {
return
@ -77,11 +63,3 @@ func fatalIf(err *probe.Error, msg string, fields map[string]interface{}) {
}
log.WithFields(fields).Fatal(msg)
}
func audit(msg string, fields logrus.Fields) {
if fields == nil {
fields = make(map[string]interface{})
}
log.WithFields(fields).Info(msg)
}

View file

@ -22,7 +22,7 @@ import (
"errors"
"github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio-xl/pkg/probe"
. "gopkg.in/check.v1"
)

38
main.go
View file

@ -27,16 +27,15 @@ import (
"github.com/minio/cli"
)
// minioConfig - http server config
type minioConfig struct {
Address string
ControllerAddress string
RPCAddress string
Anonymous bool
TLS bool
CertFile string
KeyFile string
RateLimit int
// fsConfig - fs http server config
type fsConfig struct {
Address string
Path string
Anonymous bool
TLS bool
CertFile string
KeyFile string
RateLimit int
}
func init() {
@ -90,15 +89,11 @@ func findClosestCommands(command string) []string {
func registerApp() *cli.App {
// register all commands
registerCommand(donutCmd)
registerCommand(serverCmd)
registerCommand(controllerCmd)
registerCommand(versionCmd)
// register all flags
registerFlag(addressFlag)
registerFlag(addressControllerFlag)
registerFlag(addressServerRPCFlag)
registerFlag(ratelimitFlag)
registerFlag(anonymousFlag)
registerFlag(certFlag)
@ -107,11 +102,12 @@ func registerApp() *cli.App {
// set up app
app := cli.NewApp()
app.Name = "minio"
app.Name = "Minio"
// hide --version flag, version is a command
app.HideVersion = true
app.Author = "Minio.io"
app.Usage = "Minio Cloud Storage"
app.Usage = "Cloud Storage Server for Micro Services & Magnetic Disks."
app.Description = `Micro services environment provisions one Minio server per application instance. Scalability is achieved to through large number of smaller personalized instances. This version of the Minio binary is built using Filesystem storage backend for magnetic disk. It is ideal for storing large objects with sizes ranging from few MBs to GBs. Minio binary is small enough to be bundled along with the application stack.`
app.Flags = flags
app.Commands = commands
@ -119,17 +115,21 @@ func registerApp() *cli.App {
{{.Name}} - {{.Usage}}
USAGE:
{{.Name}} {{if .Flags}}[global flags] {{end}}command{{if .Flags}} [command flags]{{end}} [arguments...]
minio {{if .Flags}}[flags] {{end}}command{{if .Flags}}{{end}} [arguments...]
DESCRIPTION:
{{.Description}}
COMMANDS:
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
{{end}}{{if .Flags}}
GLOBAL FLAGS:
FLAGS:
{{range .Flags}}{{.}}
{{end}}{{end}}
VERSION:
` + minioVersion +
`{{range $key, $value := ExtraInfo}}
`
{{range $key, $value := ExtraInfo}}
{{$key}}:
{{$value}}
{{end}}

View file

@ -1,72 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"runtime"
"strings"
"github.com/fatih/color"
"github.com/olekukonko/ts"
)
// colorizeMessage - inspired from Yeoman project npm package https://github.com/yeoman/update-notifier
func colorizeMessage(message string) string {
// initialize coloring
cyan := color.New(color.FgCyan, color.Bold).SprintFunc()
yellow := color.New(color.FgYellow, color.Bold).SprintfFunc()
// calculate length without color coding, due to ANSI color characters padded to actual
// string the final length is wrong than the original string length
lineStr := fmt.Sprintf(" \"%s\" . ", message)
lineLength := len(lineStr)
// populate lines with color coding
lineInColor := fmt.Sprintf(" \"%s\" . ", cyan(message))
maxContentWidth := lineLength
terminal, err := ts.GetSize()
if err != nil {
// no coloring needed just send as is
return message
}
var msg string
switch {
case len(lineStr) > terminal.Col():
msg = lineInColor
default:
// on windows terminal turn off unicode characters
var top, bottom, sideBar string
if runtime.GOOS == "windows" {
top = yellow("*" + strings.Repeat("*", maxContentWidth) + "*")
bottom = yellow("*" + strings.Repeat("*", maxContentWidth) + "*")
sideBar = yellow("|")
} else {
// color the rectangular box, use unicode characters here
top = yellow("┏" + strings.Repeat("━", maxContentWidth) + "┓")
bottom = yellow("┗" + strings.Repeat("━", maxContentWidth) + "┛")
sideBar = yellow("┃")
}
// construct the final message
msg = top + "\n" +
sideBar + lineInColor + sideBar + "\n" +
bottom
}
return msg
}

View file

@ -9,7 +9,7 @@
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implieapi.Donut.
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implieapi.Filesystem.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@ -21,9 +21,8 @@ import (
"strconv"
"github.com/gorilla/mux"
"github.com/minio/minio/pkg/donut"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
"github.com/minio/minio/pkg/fs"
"github.com/minio/minio-xl/pkg/probe"
)
const (
@ -35,31 +34,22 @@ const (
// This implementation of the GET operation retrieves object. To use GET,
// you must have READ access to the object.
func (api API) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
// ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
var object, bucket string
vars := mux.Vars(req)
bucket = vars["bucket"]
object = vars["object"]
metadata, err := api.Donut.GetObjectMetadata(bucket, object)
metadata, err := api.Filesystem.GetObjectMetadata(bucket, object)
if err != nil {
errorIf(err.Trace(), "GetObject failed.", nil)
switch err.ToGoError().(type) {
case donut.BucketNameInvalid:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case donut.BucketNotFound:
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case donut.ObjectNotFound:
case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case donut.ObjectNameInvalid:
case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
@ -73,7 +63,7 @@ func (api API) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
return
}
setObjectHeaders(w, metadata, hrange)
if _, err = api.Donut.GetObject(w, bucket, object, hrange.start, hrange.length); err != nil {
if _, err = api.Filesystem.GetObject(w, bucket, object, hrange.start, hrange.length); err != nil {
errorIf(err.Trace(), "GetObject failed.", nil)
return
}
@ -83,31 +73,21 @@ func (api API) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
// -----------
// The HEAD operation retrieves metadata from an object without returning the object itself.
func (api API) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
// ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
var object, bucket string
vars := mux.Vars(req)
bucket = vars["bucket"]
object = vars["object"]
metadata, err := api.Donut.GetObjectMetadata(bucket, object)
metadata, err := api.Filesystem.GetObjectMetadata(bucket, object)
if err != nil {
errorIf(err.Trace(), "GetObjectMetadata failed.", nil)
switch err.ToGoError().(type) {
case donut.BucketNameInvalid:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case donut.BucketNotFound:
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case donut.ObjectNotFound:
case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case donut.ObjectNameInvalid:
case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
@ -122,15 +102,6 @@ func (api API) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
// ----------
// This implementation of the PUT operation adds an object to a bucket.
func (api API) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
var object, bucket string
vars := mux.Vars(req)
bucket = vars["bucket"]
@ -153,16 +124,6 @@ func (api API) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
return
}
/// minimum Upload size for objects in a single operation
//
// Surprisingly while Amazon in their document states that S3 objects have 1byte
// as the minimum limit, they do not seem to enforce it one can successfully
// create a 0byte file using a regular putObject() operation
//
// if isMinObjectSize(size) {
// writeErrorResponse(w, req, EntityTooSmall, req.URL.Path)
// return
// }
var sizeInt64 int64
{
var err error
@ -173,7 +134,7 @@ func (api API) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
}
}
var signature *signv4.Signature
var signature *fs.Signature
if !api.Anonymous {
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
@ -187,34 +148,32 @@ func (api API) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
}
}
metadata, err := api.Donut.CreateObject(bucket, object, md5, sizeInt64, req.Body, nil, signature)
metadata, err := api.Filesystem.CreateObject(bucket, object, md5, sizeInt64, req.Body, signature)
if err != nil {
errorIf(err.Trace(), "CreateObject failed.", nil)
switch err.ToGoError().(type) {
case donut.BucketNotFound:
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case donut.BucketNameInvalid:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case donut.ObjectExists:
writeErrorResponse(w, req, MethodNotAllowed, req.URL.Path)
case donut.BadDigest:
case fs.BadDigest:
writeErrorResponse(w, req, BadDigest, req.URL.Path)
case signv4.MissingDateHeader:
case fs.MissingDateHeader:
writeErrorResponse(w, req, RequestTimeTooSkewed, req.URL.Path)
case signv4.DoesNotMatch:
case fs.SignatureDoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
case donut.IncompleteBody:
case fs.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, req.URL.Path)
case donut.EntityTooLarge:
case fs.EntityTooLarge:
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
case donut.InvalidDigest:
case fs.InvalidDigest:
writeErrorResponse(w, req, InvalidDigest, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
}
return
}
w.Header().Set("ETag", metadata.MD5Sum)
w.Header().Set("ETag", "\""+metadata.Md5+"\"")
writeSuccessResponse(w)
}
@ -222,15 +181,6 @@ func (api API) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
// NewMultipartUploadHandler - New multipart upload
func (api API) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
if !isRequestUploads(req.URL.Query()) {
writeErrorResponse(w, req, MethodNotAllowed, req.URL.Path)
return
@ -241,12 +191,18 @@ func (api API) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Reques
bucket = vars["bucket"]
object = vars["object"]
uploadID, err := api.Donut.NewMultipartUpload(bucket, object, req.Header.Get("Content-Type"))
uploadID, err := api.Filesystem.NewMultipartUpload(bucket, object)
if err != nil {
errorIf(err.Trace(), "NewMultipartUpload failed.", nil)
switch err.ToGoError().(type) {
case donut.ObjectExists:
writeErrorResponse(w, req, MethodNotAllowed, req.URL.Path)
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
}
@ -263,15 +219,6 @@ func (api API) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Reques
// PutObjectPartHandler - Upload part
func (api API) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
// get Content-MD5 sent by client and verify if valid
md5 := req.Header.Get("Content-MD5")
if !isValidMD5(md5) {
@ -319,7 +266,7 @@ func (api API) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) {
}
}
var signature *signv4.Signature
var signature *fs.Signature
if !api.Anonymous {
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
@ -333,55 +280,52 @@ func (api API) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) {
}
}
calculatedMD5, err := api.Donut.CreateObjectPart(bucket, object, uploadID, partID, "", md5, sizeInt64, req.Body, signature)
calculatedMD5, err := api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, sizeInt64, req.Body, signature)
if err != nil {
errorIf(err.Trace(), "CreateObjectPart failed.", nil)
switch err.ToGoError().(type) {
case donut.InvalidUploadID:
case fs.InvalidUploadID:
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path)
case donut.ObjectExists:
writeErrorResponse(w, req, MethodNotAllowed, req.URL.Path)
case donut.BadDigest:
case fs.BadDigest:
writeErrorResponse(w, req, BadDigest, req.URL.Path)
case signv4.DoesNotMatch:
case fs.SignatureDoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
case donut.IncompleteBody:
case fs.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, req.URL.Path)
case donut.EntityTooLarge:
case fs.EntityTooLarge:
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
case donut.InvalidDigest:
case fs.InvalidDigest:
writeErrorResponse(w, req, InvalidDigest, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
}
return
}
w.Header().Set("ETag", calculatedMD5)
w.Header().Set("ETag", "\""+calculatedMD5+"\"")
writeSuccessResponse(w)
}
// AbortMultipartUploadHandler - Abort multipart upload
func (api API) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
vars := mux.Vars(req)
bucket := vars["bucket"]
object := vars["object"]
objectResourcesMetadata := getObjectResources(req.URL.Query())
err := api.Donut.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID)
err := api.Filesystem.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID)
if err != nil {
errorIf(err.Trace(), "AbortMutlipartUpload failed.", nil)
switch err.ToGoError().(type) {
case donut.InvalidUploadID:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case fs.InvalidUploadID:
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
@ -394,15 +338,6 @@ func (api API) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Requ
// ListObjectPartsHandler - List object parts
func (api API) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
objectResourcesMetadata := getObjectResources(req.URL.Query())
if objectResourcesMetadata.PartNumberMarker < 0 {
writeErrorResponse(w, req, InvalidPartNumberMarker, req.URL.Path)
@ -420,11 +355,19 @@ func (api API) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request)
bucket := vars["bucket"]
object := vars["object"]
objectResourcesMetadata, err := api.Donut.ListObjectParts(bucket, object, objectResourcesMetadata)
objectResourcesMetadata, err := api.Filesystem.ListObjectParts(bucket, object, objectResourcesMetadata)
if err != nil {
errorIf(err.Trace(), "ListObjectParts failed.", nil)
switch err.ToGoError().(type) {
case donut.InvalidUploadID:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case fs.InvalidUploadID:
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
@ -441,22 +384,13 @@ func (api API) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request)
// CompleteMultipartUploadHandler - Complete multipart upload
func (api API) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
// Ticket master block
{
op := APIOperation{}
op.ProceedCh = make(chan struct{})
api.OP <- op
// block until Ticket master gives us a go
<-op.ProceedCh
}
vars := mux.Vars(req)
bucket := vars["bucket"]
object := vars["object"]
objectResourcesMetadata := getObjectResources(req.URL.Query())
var signature *signv4.Signature
var signature *fs.Signature
if !api.Anonymous {
if _, ok := req.Header["Authorization"]; ok {
// Init signature V4 verification
@ -470,30 +404,36 @@ func (api API) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.R
}
}
metadata, err := api.Donut.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, req.Body, signature)
metadata, err := api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, req.Body, signature)
if err != nil {
errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil)
switch err.ToGoError().(type) {
case donut.InvalidUploadID:
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case fs.InvalidUploadID:
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path)
case donut.InvalidPart:
case fs.InvalidPart:
writeErrorResponse(w, req, InvalidPart, req.URL.Path)
case donut.InvalidPartOrder:
case fs.InvalidPartOrder:
writeErrorResponse(w, req, InvalidPartOrder, req.URL.Path)
case signv4.MissingDateHeader:
writeErrorResponse(w, req, RequestTimeTooSkewed, req.URL.Path)
case signv4.DoesNotMatch:
case fs.SignatureDoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
case donut.IncompleteBody:
case fs.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, req.URL.Path)
case donut.MalformedXML:
case fs.MalformedXML:
writeErrorResponse(w, req, MalformedXML, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
}
return
}
response := generateCompleteMultpartUploadResponse(bucket, object, "", metadata.MD5Sum)
response := generateCompleteMultpartUploadResponse(bucket, object, "", metadata.Md5)
encodedSuccessResponse := encodeSuccessResponse(response)
// write headers
setCommonHeaders(w, len(encodedSuccessResponse))
@ -503,14 +443,26 @@ func (api API) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.R
/// Delete API
// DeleteBucketHandler - Delete bucket
func (api API) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) {
error := getErrorCode(MethodNotAllowed)
w.WriteHeader(error.HTTPStatusCode)
}
// DeleteObjectHandler - Delete object
func (api API) DeleteObjectHandler(w http.ResponseWriter, req *http.Request) {
error := getErrorCode(MethodNotAllowed)
w.WriteHeader(error.HTTPStatusCode)
vars := mux.Vars(req)
bucket := vars["bucket"]
object := vars["object"]
err := api.Filesystem.DeleteObject(bucket, object)
if err != nil {
errorIf(err.Trace(), "DeleteObject failed.", nil)
switch err.ToGoError().(type) {
case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
default:
writeErrorResponse(w, req, InternalError, req.URL.Path)
}
}
}

View file

@ -1,44 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package md5
import (
"crypto/md5"
"io"
)
// Sum - low memory footprint io.Reader based md5sum helper
func Sum(reader io.Reader) ([]byte, error) {
hash := md5.New()
var err error
var length int
for err == nil {
byteBuffer := make([]byte, 1024*1024)
length, err = reader.Read(byteBuffer)
// While hash.Write() wouldn't mind a Nil byteBuffer
// It is necessary for us to verify this and break
if length == 0 {
break
}
byteBuffer = byteBuffer[0:length]
hash.Write(byteBuffer)
}
if err != io.EOF {
return nil, err
}
return hash.Sum(nil), nil
}

View file

@ -1,24 +0,0 @@
package md5_test
import (
"bytes"
"encoding/hex"
"testing"
"github.com/minio/minio/pkg/crypto/md5"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestMd5sum(c *C) {
testString := []byte("Test string")
expectedHash, _ := hex.DecodeString("0fd3dbec9730101bff92acc820befc34")
hash, err := md5.Sum(bytes.NewBuffer(testString))
c.Assert(err, IsNil)
c.Assert(bytes.Equal(expectedHash, hash), Equals, true)
}

View file

@ -1 +0,0 @@
*.syso

View file

@ -1,167 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file of
// Golang project:
// https://github.com/golang/go/blob/master/LICENSE
// Using this part of Minio codebase under the license
// Apache License Version 2.0 with modifications
// Package sha1 implements the SHA1 hash algorithm as defined in RFC 3174.
package sha1
import (
"hash"
"io"
"github.com/minio/minio/pkg/cpu"
)
// The size of a SHA1 checksum in bytes.
const Size = 20
// The blocksize of SHA1 in bytes.
const BlockSize = 64
const (
chunk = 64
init0 = 0x67452301
init1 = 0xEFCDAB89
init2 = 0x98BADCFE
init3 = 0x10325476
init4 = 0xC3D2E1F0
)
// digest represents the partial evaluation of a checksum.
type digest struct {
h [5]uint32
x [chunk]byte
nx int
len uint64
}
// Reset digest
func (d *digest) Reset() {
d.h[0] = init0
d.h[1] = init1
d.h[2] = init2
d.h[3] = init3
d.h[4] = init4
d.nx = 0
d.len = 0
}
// New returns a new hash.Hash computing the SHA1 checksum.
func New() hash.Hash {
d := new(digest)
d.Reset()
return d
}
func block(dig *digest, p []byte) {
switch true {
case cpu.HasSSE41() == true:
blockSSE3(dig, p)
default:
blockGeneric(dig, p)
}
}
// Return output size
func (d *digest) Size() int { return Size }
// Return checksum blocksize
func (d *digest) BlockSize() int { return BlockSize }
// Write to digest
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == chunk {
block(d, d.x[:])
d.nx = 0
}
p = p[n:]
}
if len(p) >= chunk {
n := len(p) &^ (chunk - 1)
block(d, p[:n])
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
// Return checksum bytes
func (d *digest) Sum(in []byte) []byte {
// Make a copy of d0 so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...)
}
// Intermediate checksum function
func (d *digest) checkSum() [Size]byte {
len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
var tmp [64]byte
tmp[0] = 0x80
if len%64 < 56 {
d.Write(tmp[0 : 56-len%64])
} else {
d.Write(tmp[0 : 64+56-len%64])
}
// Length in bits.
len <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(len >> (56 - 8*i))
}
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
for i, s := range d.h {
digest[i*4] = byte(s >> 24)
digest[i*4+1] = byte(s >> 16)
digest[i*4+2] = byte(s >> 8)
digest[i*4+3] = byte(s)
}
return digest
}
/// Convenience functions
// Sum1 - single caller sha1 helper
func Sum1(data []byte) [Size]byte {
var d digest
d.Reset()
d.Write(data)
return d.checkSum()
}
// Sum - io.Reader based streaming sha1 helper
func Sum(reader io.Reader) ([]byte, error) {
h := New()
var err error
for err == nil {
length := 0
byteBuffer := make([]byte, 1024*1024)
length, err = reader.Read(byteBuffer)
byteBuffer = byteBuffer[0:length]
h.Write(byteBuffer)
}
if err != io.EOF {
return nil, err
}
return h.Sum(nil), nil
}

View file

@ -1,967 +0,0 @@
/*
* Implement fast SHA-1 with AVX2 instructions. (x86_64)
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Contact Information:
* Ilya Albrekht <ilya.albrekht@intel.com>
* Maxim Locktyukhin <maxim.locktyukhin@intel.com>
* Ronen Zohar <ronen.zohar@intel.com>
* Chandramouli Narayanan <mouli@linux.intel.com>
*
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* SHA-1 implementation with Intel(R) AVX2 instruction set extensions.
*
*This implementation is based on the previous SSSE3 release:
* https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1
*
*Updates 20-byte SHA-1 record in 'hash' for even number of
*'num_blocks' consecutive 64-byte blocks
*
*/
/*
* Using this part of Minio codebase under the license
* Apache License Version 2.0 with modifications
*
*/
#ifdef HAS_AVX2
#ifndef ENTRY
#define ENTRY(name) \
.globl name ; \
.align 4,0x90 ; \
name:
#endif
#ifndef END
#define END(name) \
.size name, .-name
#endif
#ifndef ENDPROC
#define ENDPROC(name) \
.type name, @function ; \
END(name)
#endif
#define NUM_INVALID 100
#define TYPE_R32 0
#define TYPE_R64 1
#define TYPE_XMM 2
#define TYPE_INVALID 100
.macro R32_NUM opd r32
\opd = NUM_INVALID
.ifc \r32,%eax
\opd = 0
.endif
.ifc \r32,%ecx
\opd = 1
.endif
.ifc \r32,%edx
\opd = 2
.endif
.ifc \r32,%ebx
\opd = 3
.endif
.ifc \r32,%esp
\opd = 4
.endif
.ifc \r32,%ebp
\opd = 5
.endif
.ifc \r32,%esi
\opd = 6
.endif
.ifc \r32,%edi
\opd = 7
.endif
#ifdef X86_64
.ifc \r32,%r8d
\opd = 8
.endif
.ifc \r32,%r9d
\opd = 9
.endif
.ifc \r32,%r10d
\opd = 10
.endif
.ifc \r32,%r11d
\opd = 11
.endif
.ifc \r32,%r12d
\opd = 12
.endif
.ifc \r32,%r13d
\opd = 13
.endif
.ifc \r32,%r14d
\opd = 14
.endif
.ifc \r32,%r15d
\opd = 15
.endif
#endif
.endm
.macro R64_NUM opd r64
\opd = NUM_INVALID
#ifdef X86_64
.ifc \r64,%rax
\opd = 0
.endif
.ifc \r64,%rcx
\opd = 1
.endif
.ifc \r64,%rdx
\opd = 2
.endif
.ifc \r64,%rbx
\opd = 3
.endif
.ifc \r64,%rsp
\opd = 4
.endif
.ifc \r64,%rbp
\opd = 5
.endif
.ifc \r64,%rsi
\opd = 6
.endif
.ifc \r64,%rdi
\opd = 7
.endif
.ifc \r64,%r8
\opd = 8
.endif
.ifc \r64,%r9
\opd = 9
.endif
.ifc \r64,%r10
\opd = 10
.endif
.ifc \r64,%r11
\opd = 11
.endif
.ifc \r64,%r12
\opd = 12
.endif
.ifc \r64,%r13
\opd = 13
.endif
.ifc \r64,%r14
\opd = 14
.endif
.ifc \r64,%r15
\opd = 15
.endif
#endif
.endm
.macro XMM_NUM opd xmm
\opd = NUM_INVALID
.ifc \xmm,%xmm0
\opd = 0
.endif
.ifc \xmm,%xmm1
\opd = 1
.endif
.ifc \xmm,%xmm2
\opd = 2
.endif
.ifc \xmm,%xmm3
\opd = 3
.endif
.ifc \xmm,%xmm4
\opd = 4
.endif
.ifc \xmm,%xmm5
\opd = 5
.endif
.ifc \xmm,%xmm6
\opd = 6
.endif
.ifc \xmm,%xmm7
\opd = 7
.endif
.ifc \xmm,%xmm8
\opd = 8
.endif
.ifc \xmm,%xmm9
\opd = 9
.endif
.ifc \xmm,%xmm10
\opd = 10
.endif
.ifc \xmm,%xmm11
\opd = 11
.endif
.ifc \xmm,%xmm12
\opd = 12
.endif
.ifc \xmm,%xmm13
\opd = 13
.endif
.ifc \xmm,%xmm14
\opd = 14
.endif
.ifc \xmm,%xmm15
\opd = 15
.endif
.endm
.macro TYPE type reg
R32_NUM reg_type_r32 \reg
R64_NUM reg_type_r64 \reg
XMM_NUM reg_type_xmm \reg
.if reg_type_r64 <> NUM_INVALID
\type = TYPE_R64
.elseif reg_type_r32 <> NUM_INVALID
\type = TYPE_R32
.elseif reg_type_xmm <> NUM_INVALID
\type = TYPE_XMM
.else
\type = TYPE_INVALID
.endif
.endm
.macro PFX_OPD_SIZE
.byte 0x66
.endm
.macro PFX_REX opd1 opd2 W=0
.if ((\opd1 | \opd2) & 8) || \W
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
.endif
.endm
.macro MODRM mod opd1 opd2
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
.endm
.macro PSHUFB_XMM xmm1 xmm2
XMM_NUM pshufb_opd1 \xmm1
XMM_NUM pshufb_opd2 \xmm2
PFX_OPD_SIZE
PFX_REX pshufb_opd1 pshufb_opd2
.byte 0x0f, 0x38, 0x00
MODRM 0xc0 pshufb_opd1 pshufb_opd2
.endm
.macro PCLMULQDQ imm8 xmm1 xmm2
XMM_NUM clmul_opd1 \xmm1
XMM_NUM clmul_opd2 \xmm2
PFX_OPD_SIZE
PFX_REX clmul_opd1 clmul_opd2
.byte 0x0f, 0x3a, 0x44
MODRM 0xc0 clmul_opd1 clmul_opd2
.byte \imm8
.endm
.macro PEXTRD imm8 xmm gpr
R32_NUM extrd_opd1 \gpr
XMM_NUM extrd_opd2 \xmm
PFX_OPD_SIZE
PFX_REX extrd_opd1 extrd_opd2
.byte 0x0f, 0x3a, 0x16
MODRM 0xc0 extrd_opd1 extrd_opd2
.byte \imm8
.endm
.macro MOVQ_R64_XMM opd1 opd2
TYPE movq_r64_xmm_opd1_type \opd1
.if movq_r64_xmm_opd1_type == TYPE_XMM
XMM_NUM movq_r64_xmm_opd1 \opd1
R64_NUM movq_r64_xmm_opd2 \opd2
.else
R64_NUM movq_r64_xmm_opd1 \opd1
XMM_NUM movq_r64_xmm_opd2 \opd2
.endif
PFX_OPD_SIZE
PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
.if movq_r64_xmm_opd1_type == TYPE_XMM
.byte 0x0f, 0x7e
.else
.byte 0x0f, 0x6e
.endif
MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
.endm
#define CTX %rdi /* arg1 */
#define BUF %rsi /* arg2 */
#define CNT %rdx /* arg3 */
#define REG_A %ecx
#define REG_B %esi
#define REG_C %edi
#define REG_D %eax
#define REG_E %edx
#define REG_TB %ebx
#define REG_TA %r12d
#define REG_RA %rcx
#define REG_RB %rsi
#define REG_RC %rdi
#define REG_RD %rax
#define REG_RE %rdx
#define REG_RTA %r12
#define REG_RTB %rbx
#define REG_T1 %ebp
#define xmm_mov vmovups
#define avx2_zeroupper vzeroupper
#define RND_F1 1
#define RND_F2 2
#define RND_F3 3
.macro REGALLOC
.set A, REG_A
.set B, REG_B
.set C, REG_C
.set D, REG_D
.set E, REG_E
.set TB, REG_TB
.set TA, REG_TA
.set RA, REG_RA
.set RB, REG_RB
.set RC, REG_RC
.set RD, REG_RD
.set RE, REG_RE
.set RTA, REG_RTA
.set RTB, REG_RTB
.set T1, REG_T1
.endm
#define K_BASE %r8
#define HASH_PTR %r9
#define BUFFER_PTR %r10
#define BUFFER_PTR2 %r13
#define BUFFER_END %r11
#define PRECALC_BUF %r14
#define WK_BUF %r15
#define W_TMP %xmm0
#define WY_TMP %ymm0
#define WY_TMP2 %ymm9
# AVX2 variables
#define WY0 %ymm3
#define WY4 %ymm5
#define WY08 %ymm7
#define WY12 %ymm8
#define WY16 %ymm12
#define WY20 %ymm13
#define WY24 %ymm14
#define WY28 %ymm15
#define YMM_SHUFB_BSWAP %ymm10
/*
* Keep 2 iterations precalculated at a time:
* - 80 DWORDs per iteration * 2
*/
#define W_SIZE (80*2*2 +16)
#define WK(t) ((((t) % 80) / 4)*32 + ( (t) % 4)*4 + ((t)/80)*16 )(WK_BUF)
#define PRECALC_WK(t) ((t)*2*2)(PRECALC_BUF)
.macro UPDATE_HASH hash, val
add \hash, \val
mov \val, \hash
.endm
.macro PRECALC_RESET_WY
.set WY_00, WY0
.set WY_04, WY4
.set WY_08, WY08
.set WY_12, WY12
.set WY_16, WY16
.set WY_20, WY20
.set WY_24, WY24
.set WY_28, WY28
.set WY_32, WY_00
.endm
.macro PRECALC_ROTATE_WY
/* Rotate macros */
.set WY_32, WY_28
.set WY_28, WY_24
.set WY_24, WY_20
.set WY_20, WY_16
.set WY_16, WY_12
.set WY_12, WY_08
.set WY_08, WY_04
.set WY_04, WY_00
.set WY_00, WY_32
/* Define register aliases */
.set WY, WY_00
.set WY_minus_04, WY_04
.set WY_minus_08, WY_08
.set WY_minus_12, WY_12
.set WY_minus_16, WY_16
.set WY_minus_20, WY_20
.set WY_minus_24, WY_24
.set WY_minus_28, WY_28
.set WY_minus_32, WY
.endm
.macro PRECALC_00_15
.if (i == 0) # Initialize and rotate registers
PRECALC_RESET_WY
PRECALC_ROTATE_WY
.endif
/* message scheduling pre-compute for rounds 0-15 */
.if ((i & 7) == 0)
/*
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
.elseif ((i & 7) == 1)
vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
WY_TMP, WY_TMP
.elseif ((i & 7) == 2)
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
.elseif ((i & 7) == 4)
vpaddd K_XMM(K_BASE), WY, WY_TMP
.elseif ((i & 7) == 7)
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
.endif
.endm
.macro PRECALC_16_31
/*
* message scheduling pre-compute for rounds 16-31
* calculating last 32 w[i] values in 8 XMM registers
* pre-calculate K+w[i] values and store to mem
* for later load by ALU add instruction
*
* "brute force" vectorization for rounds 16-31 only
* due to w[i]->w[i-3] dependency
*/
.if ((i & 7) == 0)
/*
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
/* w[i-14] */
vpalignr $8, WY_minus_16, WY_minus_12, WY
vpsrldq $4, WY_minus_04, WY_TMP /* w[i-3] */
.elseif ((i & 7) == 1)
vpxor WY_minus_08, WY, WY
vpxor WY_minus_16, WY_TMP, WY_TMP
.elseif ((i & 7) == 2)
vpxor WY_TMP, WY, WY
vpslldq $12, WY, WY_TMP2
.elseif ((i & 7) == 3)
vpslld $1, WY, WY_TMP
vpsrld $31, WY, WY
.elseif ((i & 7) == 4)
vpor WY, WY_TMP, WY_TMP
vpslld $2, WY_TMP2, WY
.elseif ((i & 7) == 5)
vpsrld $30, WY_TMP2, WY_TMP2
vpxor WY, WY_TMP, WY_TMP
.elseif ((i & 7) == 7)
vpxor WY_TMP2, WY_TMP, WY
vpaddd K_XMM(K_BASE), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
.endif
.endm
.macro PRECALC_32_79
/*
* in SHA-1 specification:
* w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1
* instead we do equal:
* w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2
* allows more efficient vectorization
* since w[i]=>w[i-3] dependency is broken
*/
.if ((i & 7) == 0)
/*
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
vpalignr $8, WY_minus_08, WY_minus_04, WY_TMP
.elseif ((i & 7) == 1)
/* W is W_minus_32 before xor */
vpxor WY_minus_28, WY, WY
.elseif ((i & 7) == 2)
vpxor WY_minus_16, WY_TMP, WY_TMP
.elseif ((i & 7) == 3)
vpxor WY_TMP, WY, WY
.elseif ((i & 7) == 4)
vpslld $2, WY, WY_TMP
.elseif ((i & 7) == 5)
vpsrld $30, WY, WY
vpor WY, WY_TMP, WY
.elseif ((i & 7) == 7)
vpaddd K_XMM(K_BASE), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
.endif
.endm
.macro PRECALC r, s
.set i, \r
.if (i < 40)
.set K_XMM, 32*0
.elseif (i < 80)
.set K_XMM, 32*1
.elseif (i < 120)
.set K_XMM, 32*2
.else
.set K_XMM, 32*3
.endif
.if (i<32)
PRECALC_00_15 \s
.elseif (i<64)
PRECALC_16_31 \s
.elseif (i < 160)
PRECALC_32_79 \s
.endif
.endm
.macro ROTATE_STATE
.set T_REG, E
.set E, D
.set D, C
.set C, B
.set B, TB
.set TB, A
.set A, T_REG
.set T_REG, RE
.set RE, RD
.set RD, RC
.set RC, RB
.set RB, RTB
.set RTB, RA
.set RA, T_REG
.endm
/* Macro relies on saved ROUND_Fx */
.macro RND_FUN f, r
.if (\f == RND_F1)
ROUND_F1 \r
.elseif (\f == RND_F2)
ROUND_F2 \r
.elseif (\f == RND_F3)
ROUND_F3 \r
.endif
.endm
.macro RR r
.set round_id, (\r % 80)
.if (round_id == 0) /* Precalculate F for first round */
.set ROUND_FUNC, RND_F1
mov B, TB
rorx $(32-30), B, B /* b>>>2 */
andn D, TB, T1
and C, TB
xor T1, TB
.endif
RND_FUN ROUND_FUNC, \r
ROTATE_STATE
.if (round_id == 18)
.set ROUND_FUNC, RND_F2
.elseif (round_id == 38)
.set ROUND_FUNC, RND_F3
.elseif (round_id == 58)
.set ROUND_FUNC, RND_F2
.endif
.set round_id, ( (\r+1) % 80)
RND_FUN ROUND_FUNC, (\r+1)
ROTATE_STATE
.endm
.macro ROUND_F1 r
add WK(\r), E
andn C, A, T1 /* ~b&d */
lea (RE,RTB), E /* Add F from the previous round */
rorx $(32-5), A, TA /* T2 = A >>> 5 */
rorx $(32-30),A, TB /* b>>>2 for next round */
PRECALC (\r) /* msg scheduling for next 2 blocks */
/*
* Calculate F for the next round
* (b & c) ^ andn[b, d]
*/
and B, A /* b&c */
xor T1, A /* F1 = (b&c) ^ (~b&d) */
lea (RE,RTA), E /* E += A >>> 5 */
.endm
.macro ROUND_F2 r
add WK(\r), E
lea (RE,RTB), E /* Add F from the previous round */
/* Calculate F for the next round */
rorx $(32-5), A, TA /* T2 = A >>> 5 */
.if ((round_id) < 79)
rorx $(32-30), A, TB /* b>>>2 for next round */
.endif
PRECALC (\r) /* msg scheduling for next 2 blocks */
.if ((round_id) < 79)
xor B, A
.endif
add TA, E /* E += A >>> 5 */
.if ((round_id) < 79)
xor C, A
.endif
.endm
.macro ROUND_F3 r
add WK(\r), E
PRECALC (\r) /* msg scheduling for next 2 blocks */
lea (RE,RTB), E /* Add F from the previous round */
mov B, T1
or A, T1
rorx $(32-5), A, TA /* T2 = A >>> 5 */
rorx $(32-30), A, TB /* b>>>2 for next round */
/* Calculate F for the next round
* (b and c) or (d and (b or c))
*/
and C, T1
and B, A
or T1, A
add TA, E /* E += A >>> 5 */
.endm
/*
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
*/
.macro SHA1_PIPELINED_MAIN_BODY
REGALLOC
mov (HASH_PTR), A
mov 4(HASH_PTR), B
mov 8(HASH_PTR), C
mov 12(HASH_PTR), D
mov 16(HASH_PTR), E
mov %rsp, PRECALC_BUF
lea (2*4*80+32)(%rsp), WK_BUF
# Precalc WK for first 2 blocks
PRECALC_OFFSET = 0
.set i, 0
.rept 160
PRECALC i
.set i, i + 1
.endr
PRECALC_OFFSET = 128
xchg WK_BUF, PRECALC_BUF
.align 32
_loop:
/*
* code loops through more than one block
* we use K_BASE value as a signal of a last block,
* it is set below by: cmovae BUFFER_PTR, K_BASE
*/
cmp K_BASE, BUFFER_PTR
jne _begin
.align 32
jmp _end
.align 32
_begin:
/*
* Do first block
* rounds: 0,2,4,6,8
*/
.set j, 0
.rept 5
RR j
.set j, j+2
.endr
jmp _loop0
_loop0:
/*
* rounds:
* 10,12,14,16,18
* 20,22,24,26,28
* 30,32,34,36,38
* 40,42,44,46,48
* 50,52,54,56,58
*/
.rept 25
RR j
.set j, j+2
.endr
add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
/*
* rounds
* 60,62,64,66,68
* 70,72,74,76,78
*/
.rept 10
RR j
.set j, j+2
.endr
UPDATE_HASH (HASH_PTR), A
UPDATE_HASH 4(HASH_PTR), TB
UPDATE_HASH 8(HASH_PTR), C
UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E
cmp K_BASE, BUFFER_PTR /* is current block the last one? */
je _loop
mov TB, B
/* Process second block */
/*
* rounds
* 0+80, 2+80, 4+80, 6+80, 8+80
* 10+80,12+80,14+80,16+80,18+80
*/
.set j, 0
.rept 10
RR j+80
.set j, j+2
.endr
jmp _loop1
_loop1:
/*
* rounds
* 20+80,22+80,24+80,26+80,28+80
* 30+80,32+80,34+80,36+80,38+80
*/
.rept 10
RR j+80
.set j, j+2
.endr
jmp _loop2
_loop2:
/*
* rounds
* 40+80,42+80,44+80,46+80,48+80
* 50+80,52+80,54+80,56+80,58+80
*/
.rept 10
RR j+80
.set j, j+2
.endr
add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
jmp _loop3
_loop3:
/*
* rounds
* 60+80,62+80,64+80,66+80,68+80
* 70+80,72+80,74+80,76+80,78+80
*/
.rept 10
RR j+80
.set j, j+2
.endr
UPDATE_HASH (HASH_PTR), A
UPDATE_HASH 4(HASH_PTR), TB
UPDATE_HASH 8(HASH_PTR), C
UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E
/* Reset state for AVX2 reg permutation */
mov A, TA
mov TB, A
mov C, TB
mov E, C
mov D, B
mov TA, D
REGALLOC
xchg WK_BUF, PRECALC_BUF
jmp _loop
.align 32
_end:
.endm
.section .rodata
#define K1 0x5a827999
#define K2 0x6ed9eba1
#define K3 0x8f1bbcdc
#define K4 0xca62c1d6
.align 128
K_XMM_AR:
.long K1, K1, K1, K1
.long K1, K1, K1, K1
.long K2, K2, K2, K2
.long K2, K2, K2, K2
.long K3, K3, K3, K3
.long K3, K3, K3, K3
.long K4, K4, K4, K4
.long K4, K4, K4, K4
BSWAP_SHUFB_CTL:
.long 0x00010203
.long 0x04050607
.long 0x08090a0b
.long 0x0c0d0e0f
.long 0x00010203
.long 0x04050607
.long 0x08090a0b
.long 0x0c0d0e0f
# void sha1_transform(int32_t *hash, const char* input, size_t num_blocks) ;
.text
ENTRY(sha1_transform)
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
RESERVE_STACK = (W_SIZE*4 + 8+24)
/* Align stack */
mov %rsp, %rbx
and $~(0x20-1), %rsp
push %rbx
sub $RESERVE_STACK, %rsp
avx2_zeroupper
lea K_XMM_AR(%rip), K_BASE
mov CTX, HASH_PTR
mov BUF, BUFFER_PTR
lea 64(BUF), BUFFER_PTR2
shl $6, CNT /* mul by 64 */
add BUF, CNT
add $64, CNT
mov CNT, BUFFER_END
cmp BUFFER_END, BUFFER_PTR2
cmovae K_BASE, BUFFER_PTR2
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
SHA1_PIPELINED_MAIN_BODY
avx2_zeroupper
add $RESERVE_STACK, %rsp
pop %rsp
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
pop %rbx
ret
ENDPROC(sha1_transform)
#endif

View file

@ -1,169 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file of
// Golang project:
// https://github.com/golang/go/blob/master/LICENSE
// Using this part of Minio codebase under the license
// Apache License Version 2.0 with modifications
// Package sha1 implements the SHA1 hash algorithm as defined in RFC 3174.
package sha1
import (
"hash"
"io"
"github.com/minio/minio/pkg/cpu"
)
// The size of a SHA1 checksum in bytes.
const Size = 20
// The blocksize of SHA1 in bytes.
const BlockSize = 64
const (
chunk = 64
init0 = 0x67452301
init1 = 0xEFCDAB89
init2 = 0x98BADCFE
init3 = 0x10325476
init4 = 0xC3D2E1F0
)
// digest represents the partial evaluation of a checksum.
type digest struct {
h [5]uint32
x [chunk]byte
nx int
len uint64
}
// Reset digest
func (d *digest) Reset() {
d.h[0] = init0
d.h[1] = init1
d.h[2] = init2
d.h[3] = init3
d.h[4] = init4
d.nx = 0
d.len = 0
}
// New returns a new hash.Hash computing the SHA1 checksum.
func New() hash.Hash {
d := new(digest)
d.Reset()
return d
}
func block(dig *digest, p []byte) {
switch true {
case cpu.HasAVX2() == true:
blockAVX2(dig, p)
case cpu.HasSSE41() == true:
blockSSE3(dig, p)
default:
blockGeneric(dig, p)
}
}
// Return output size
func (d *digest) Size() int { return Size }
// Return checksum blocksize
func (d *digest) BlockSize() int { return BlockSize }
// Write to digest
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == chunk {
block(d, d.x[:])
d.nx = 0
}
p = p[n:]
}
if len(p) >= chunk {
n := len(p) &^ (chunk - 1)
block(d, p[:n])
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
// Return checksum bytes
func (d *digest) Sum(in []byte) []byte {
// Make a copy of d0 so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...)
}
// Intermediate checksum function
func (d *digest) checkSum() [Size]byte {
len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
var tmp [64]byte
tmp[0] = 0x80
if len%64 < 56 {
d.Write(tmp[0 : 56-len%64])
} else {
d.Write(tmp[0 : 64+56-len%64])
}
// Length in bits.
len <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(len >> (56 - 8*i))
}
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
for i, s := range d.h {
digest[i*4] = byte(s >> 24)
digest[i*4+1] = byte(s >> 16)
digest[i*4+2] = byte(s >> 8)
digest[i*4+3] = byte(s)
}
return digest
}
/// Convenience functions
// Sum1 - single caller sha1 helper
func Sum1(data []byte) [Size]byte {
var d digest
d.Reset()
d.Write(data)
return d.checkSum()
}
// Sum - io.Reader based streaming sha1 helper
func Sum(reader io.Reader) ([]byte, error) {
h := New()
var err error
for err == nil {
length := 0
byteBuffer := make([]byte, 1024*1024)
length, err = reader.Read(byteBuffer)
byteBuffer = byteBuffer[0:length]
h.Write(byteBuffer)
}
if err != io.EOF {
return nil, err
}
return h.Sum(nil), nil
}

View file

@ -1,579 +0,0 @@
;---------------------
; https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1
;
; License information:
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; This implementation notably advances the performance of SHA-1 algorithm compared to existing
; implementations. We are encouraging all projects utilizing SHA-1 to integrate this new fast
; implementation and are ready to help if issues or concerns arise (you are welcome to leave
; a comment or write an email to the authors). It is provided 'as is' and free for either
; commercial or non-commercial use.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; This code implements two interfaces of SHA-1 update function: 1) working on a single
; 64-byte block and 2) working on a buffer of multiple 64-bit blocks. Multiple blocks
; version of code is software pipelined and faster overall, it is a default. Assemble
; with -DINTEL_SHA1_SINGLEBLOCK to select single 64-byte block function interface.
;
; C++ prototypes of implemented functions are below:
;
; #ifndef INTEL_SHA1_SINGLEBLOCK
; // Updates 20-byte SHA-1 record in 'hash' for 'num_blocks' consequtive 64-byte blocks
; extern "C" void sha1_update_intel(int *hash, const char* input, size_t num_blocks );
; #else
; // Updates 20-byte SHA-1 record in 'hash' for one 64-byte block pointed by 'input'
; extern "C" void sha1_update_intel(int *hash, const char* input);
; #endif
;
; Function name 'sha1_update_intel' can be changed in the source or via macro:
; -DINTEL_SHA1_UPDATE_FUNCNAME=my_sha1_update_func_name
;
; It implements both UNIX(default) and Windows ABIs, use -DWIN_ABI on Windows
;
; Code checks CPU for SSSE3 support via CPUID feature flag (CPUID.1.ECX.SSSE3[bit 9]==1),
; and performs dispatch. Since in most cases the functionality on non-SSSE3 supporting CPUs
; is also required, the default (e.g. one being replaced) function can be provided for
; dispatch on such CPUs, the name of old function can be changed in the source or via macro:
; -DINTEL_SHA1_UPDATE_DEFAULT_DISPATCH=default_sha1_update_function_name
;
; Authors: Maxim Locktyukhin and Ronen Zohar at Intel.com
;
%ifndef INTEL_SHA1_UPDATE_DEFAULT_DISPATCH
;; can be replaced with a default SHA-1 update function name
%define INTEL_SHA1_UPDATE_DEFAULT_DISPATCH sha1_intel_non_ssse3_cpu_stub_
%else
extern INTEL_SHA1_UPDATE_DEFAULT_DISPATCH
%endif
;; provide alternative SHA-1 update function's name here
%ifndef INTEL_SHA1_UPDATE_FUNCNAME
%define INTEL_SHA1_UPDATE_FUNCNAME sha1_update_intel
%endif
global INTEL_SHA1_UPDATE_FUNCNAME
%ifndef INTEL_SHA1_SINGLEBLOCK
%assign multiblock 1
%else
%assign multiblock 0
%endif
bits 64
default rel
%ifdef WIN_ABI
%xdefine arg1 rcx
%xdefine arg2 rdx
%xdefine arg3 r8
%else
%xdefine arg1 rdi
%xdefine arg2 rsi
%xdefine arg3 rdx
%endif
%xdefine ctx arg1
%xdefine buf arg2
%xdefine cnt arg3
%macro REGALLOC 0
%xdefine A ecx
%xdefine B esi
%xdefine C edi
%xdefine D ebp
%xdefine E edx
%xdefine T1 eax
%xdefine T2 ebx
%endmacro
%xdefine K_BASE r8
%xdefine HASH_PTR r9
%xdefine BUFFER_PTR r10
%xdefine BUFFER_END r11
%xdefine W_TMP xmm0
%xdefine W_TMP2 xmm9
%xdefine W0 xmm1
%xdefine W4 xmm2
%xdefine W8 xmm3
%xdefine W12 xmm4
%xdefine W16 xmm5
%xdefine W20 xmm6
%xdefine W24 xmm7
%xdefine W28 xmm8
%xdefine XMM_SHUFB_BSWAP xmm10
;; we keep window of 64 w[i]+K pre-calculated values in a circular buffer
%xdefine WK(t) (rsp + (t & 15)*4)
;------------------------------------------------------------------------------
;
; macro implements SHA-1 function's body for single or several 64-byte blocks
; first param: function's name
; second param: =0 - function implements single 64-byte block hash
; =1 - function implements multiple64-byte blocks hash
; 3rd function's argument is a number, greater 0, of 64-byte blocks to calc hash for
;
%macro SHA1_VECTOR_ASM 2
align 4096
%1:
push rbx
push rbp
%ifdef WIN_ABI
push rdi
push rsi
%xdefine stack_size (16*4 + 16*5 + 8)
%else
%xdefine stack_size (16*4 + 8)
%endif
sub rsp, stack_size
%ifdef WIN_ABI
%xdefine xmm_save_base (rsp + 16*4)
xmm_mov [xmm_save_base + 0*16], xmm6
xmm_mov [xmm_save_base + 1*16], xmm7
xmm_mov [xmm_save_base + 2*16], xmm8
xmm_mov [xmm_save_base + 3*16], xmm9
xmm_mov [xmm_save_base + 4*16], xmm10
%endif
mov HASH_PTR, ctx
mov BUFFER_PTR, buf
%if (%2 == 1)
shl cnt, 6 ;; mul by 64
add cnt, buf
mov BUFFER_END, cnt
%endif
lea K_BASE, [K_XMM_AR]
xmm_mov XMM_SHUFB_BSWAP, [bswap_shufb_ctl]
SHA1_PIPELINED_MAIN_BODY %2
%ifdef WIN_ABI
xmm_mov xmm6, [xmm_save_base + 0*16]
xmm_mov xmm7, [xmm_save_base + 1*16]
xmm_mov xmm8, [xmm_save_base + 2*16]
xmm_mov xmm9, [xmm_save_base + 3*16]
xmm_mov xmm10,[xmm_save_base + 4*16]
%endif
add rsp, stack_size
%ifdef WIN_ABI
pop rsi
pop rdi
%endif
pop rbp
pop rbx
ret
%endmacro
;--------------------------------------------
; macro implements 80 rounds of SHA-1, for one 64-byte block or multiple blocks with s/w pipelining
; macro param: =0 - process single 64-byte block
; =1 - multiple blocks
;
%macro SHA1_PIPELINED_MAIN_BODY 1
REGALLOC
mov A, [HASH_PTR ]
mov B, [HASH_PTR+ 4]
mov C, [HASH_PTR+ 8]
mov D, [HASH_PTR+12]
mov E, [HASH_PTR+16]
%assign i 0
%rep W_PRECALC_AHEAD
W_PRECALC i
%assign i i+1
%endrep
%xdefine F F1
%if (%1 == 1) ;; code loops through more than one block
%%_loop:
cmp BUFFER_PTR, K_BASE ;; we use K_BASE value as a signal of a last block,
jne %%_begin ;; it is set below by: cmovae BUFFER_PTR, K_BASE
jmp %%_end
align 32
%%_begin:
%endif
RR A,B,C,D,E,0
RR D,E,A,B,C,2
RR B,C,D,E,A,4
RR E,A,B,C,D,6
RR C,D,E,A,B,8
RR A,B,C,D,E,10
RR D,E,A,B,C,12
RR B,C,D,E,A,14
RR E,A,B,C,D,16
RR C,D,E,A,B,18
%xdefine F F2
RR A,B,C,D,E,20
RR D,E,A,B,C,22
RR B,C,D,E,A,24
RR E,A,B,C,D,26
RR C,D,E,A,B,28
RR A,B,C,D,E,30
RR D,E,A,B,C,32
RR B,C,D,E,A,34
RR E,A,B,C,D,36
RR C,D,E,A,B,38
%xdefine F F3
RR A,B,C,D,E,40
RR D,E,A,B,C,42
RR B,C,D,E,A,44
RR E,A,B,C,D,46
RR C,D,E,A,B,48
RR A,B,C,D,E,50
RR D,E,A,B,C,52
RR B,C,D,E,A,54
RR E,A,B,C,D,56
RR C,D,E,A,B,58
%xdefine F F4
%if (%1 == 1) ;; if code loops through more than one block
add BUFFER_PTR, 64 ;; move to next 64-byte block
cmp BUFFER_PTR, BUFFER_END ;; check if current block is the last one
cmovae BUFFER_PTR, K_BASE ;; smart way to signal the last iteration
%else
%xdefine W_NO_TAIL_PRECALC 1 ;; no software pipelining for single block interface
%endif
RR A,B,C,D,E,60
RR D,E,A,B,C,62
RR B,C,D,E,A,64
RR E,A,B,C,D,66
RR C,D,E,A,B,68
RR A,B,C,D,E,70
RR D,E,A,B,C,72
RR B,C,D,E,A,74
RR E,A,B,C,D,76
RR C,D,E,A,B,78
UPDATE_HASH [HASH_PTR ],A
UPDATE_HASH [HASH_PTR+ 4],B
UPDATE_HASH [HASH_PTR+ 8],C
UPDATE_HASH [HASH_PTR+12],D
UPDATE_HASH [HASH_PTR+16],E
%if (%1 == 1)
jmp %%_loop
align 32
%%_end:
%endif
%xdefine W_NO_TAIL_PRECALC 0
%xdefine F %error
%endmacro
%macro F1 3
mov T1,%2
xor T1,%3
and T1,%1
xor T1,%3
%endmacro
%macro F2 3
mov T1,%3
xor T1,%2
xor T1,%1
%endmacro
%macro F3 3
mov T1,%2
mov T2,%1
or T1,%1
and T2,%2
and T1,%3
or T1,T2
%endmacro
%define F4 F2
%macro UPDATE_HASH 2
add %2, %1
mov %1, %2
%endmacro
%macro W_PRECALC 1
%xdefine i (%1)
%if (i < 20)
%xdefine K_XMM 0
%elif (i < 40)
%xdefine K_XMM 16
%elif (i < 60)
%xdefine K_XMM 32
%else
%xdefine K_XMM 48
%endif
%if (i<16 || (i>=80 && i<(80 + W_PRECALC_AHEAD)))
%if (W_NO_TAIL_PRECALC == 0)
%xdefine i ((%1) % 80) ;; pre-compute for the next iteration
%if (i == 0)
W_PRECALC_RESET
%endif
W_PRECALC_00_15
%endif
%elif (i < 32)
W_PRECALC_16_31
%elif (i < 80) ;; rounds 32-79
W_PRECALC_32_79
%endif
%endmacro
%macro W_PRECALC_RESET 0
%xdefine W W0
%xdefine W_minus_04 W4
%xdefine W_minus_08 W8
%xdefine W_minus_12 W12
%xdefine W_minus_16 W16
%xdefine W_minus_20 W20
%xdefine W_minus_24 W24
%xdefine W_minus_28 W28
%xdefine W_minus_32 W
%endmacro
%macro W_PRECALC_ROTATE 0
%xdefine W_minus_32 W_minus_28
%xdefine W_minus_28 W_minus_24
%xdefine W_minus_24 W_minus_20
%xdefine W_minus_20 W_minus_16
%xdefine W_minus_16 W_minus_12
%xdefine W_minus_12 W_minus_08
%xdefine W_minus_08 W_minus_04
%xdefine W_minus_04 W
%xdefine W W_minus_32
%endmacro
%xdefine W_PRECALC_AHEAD 16
%xdefine W_NO_TAIL_PRECALC 0
%xdefine xmm_mov movdqa
%macro W_PRECALC_00_15 0
;; message scheduling pre-compute for rounds 0-15
%if ((i & 3) == 0) ;; blended SSE and ALU instruction scheduling, 1 vector iteration per 4 rounds
movdqu W_TMP, [BUFFER_PTR + (i * 4)]
%elif ((i & 3) == 1)
pshufb W_TMP, XMM_SHUFB_BSWAP
movdqa W, W_TMP
%elif ((i & 3) == 2)
paddd W_TMP, [K_BASE]
%elif ((i & 3) == 3)
movdqa [WK(i&~3)], W_TMP
W_PRECALC_ROTATE
%endif
%endmacro
%macro W_PRECALC_16_31 0
;; message scheduling pre-compute for rounds 16-31
;; calculating last 32 w[i] values in 8 XMM registers
;; pre-calculate K+w[i] values and store to mem, for later load by ALU add instruction
;;
;; "brute force" vectorization for rounds 16-31 only due to w[i]->w[i-3] dependency
;;
%if ((i & 3) == 0) ;; blended SSE and ALU instruction scheduling, 1 vector iteration per 4 rounds
movdqa W, W_minus_12
palignr W, W_minus_16, 8 ;; w[i-14]
movdqa W_TMP, W_minus_04
psrldq W_TMP, 4 ;; w[i-3]
pxor W, W_minus_08
%elif ((i & 3) == 1)
pxor W_TMP, W_minus_16
pxor W, W_TMP
movdqa W_TMP2, W
movdqa W_TMP, W
pslldq W_TMP2, 12
%elif ((i & 3) == 2)
psrld W, 31
pslld W_TMP, 1
por W_TMP, W
movdqa W, W_TMP2
psrld W_TMP2, 30
pslld W, 2
%elif ((i & 3) == 3)
pxor W_TMP, W
pxor W_TMP, W_TMP2
movdqa W, W_TMP
paddd W_TMP, [K_BASE + K_XMM]
movdqa [WK(i&~3)],W_TMP
W_PRECALC_ROTATE
%endif
%endmacro
%macro W_PRECALC_32_79 0
;; in SHA-1 specification: w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1
;; instead we do equal: w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2
;; allows more efficient vectorization since w[i]=>w[i-3] dependency is broken
;;
%if ((i & 3) == 0) ;; blended SSE and ALU instruction scheduling, 1 vector iteration per 4 rounds
movdqa W_TMP, W_minus_04
pxor W, W_minus_28 ;; W is W_minus_32 before xor
palignr W_TMP, W_minus_08, 8
%elif ((i & 3) == 1)
pxor W, W_minus_16
pxor W, W_TMP
movdqa W_TMP, W
%elif ((i & 3) == 2)
psrld W, 30
pslld W_TMP, 2
por W_TMP, W
%elif ((i & 3) == 3)
movdqa W, W_TMP
paddd W_TMP, [K_BASE + K_XMM]
movdqa [WK(i&~3)],W_TMP
W_PRECALC_ROTATE
%endif
%endmacro
%macro RR 6 ;; RR does two rounds of SHA-1 back to back with W pre-calculation
;; TEMP = A
;; A = F( i, B, C, D ) + E + ROTATE_LEFT( A, 5 ) + W[i] + K(i)
;; C = ROTATE_LEFT( B, 30 )
;; D = C
;; E = D
;; B = TEMP
W_PRECALC (%6 + W_PRECALC_AHEAD)
F %2, %3, %4 ;; F returns result in T1
add %5, [WK(%6)]
rol %2, 30
mov T2, %1
add %4, [WK(%6 + 1)]
rol T2, 5
add %5, T1
W_PRECALC (%6 + W_PRECALC_AHEAD + 1)
add T2, %5
mov %5, T2
rol T2, 5
add %4, T2
F %1, %2, %3 ;; F returns result in T1
add %4, T1
rol %1, 30
;; write: %1, %2
;; rotate: %1<=%4, %2<=%5, %3<=%1, %4<=%2, %5<=%3
%endmacro
;;----------------------
section .data align=128
%xdefine K1 0x5a827999
%xdefine K2 0x6ed9eba1
%xdefine K3 0x8f1bbcdc
%xdefine K4 0xca62c1d6
align 128
K_XMM_AR:
DD K1, K1, K1, K1
DD K2, K2, K2, K2
DD K3, K3, K3, K3
DD K4, K4, K4, K4
align 16
bswap_shufb_ctl:
DD 00010203h
DD 04050607h
DD 08090a0bh
DD 0c0d0e0fh
;; dispatch pointer, points to the init routine for the first invocation
sha1_update_intel_dispatched:
DQ sha1_update_intel_init_
;;----------------------
section .text align=4096
SHA1_VECTOR_ASM sha1_update_intel_ssse3_, multiblock
align 32
sha1_update_intel_init_: ;; we get here with the first time invocation
call sha1_update_intel_dispacth_init_
INTEL_SHA1_UPDATE_FUNCNAME: ;; we get here after init
jmp qword [sha1_update_intel_dispatched]
;; CPUID feature flag based dispatch
sha1_update_intel_dispacth_init_:
push rax
push rbx
push rcx
push rdx
push rsi
lea rsi, [INTEL_SHA1_UPDATE_DEFAULT_DISPATCH]
mov eax, 1
cpuid
test ecx, 0200h ;; SSSE3 support, CPUID.1.ECX[bit 9]
jz _done
lea rsi, [sha1_update_intel_ssse3_]
_done:
mov [sha1_update_intel_dispatched], rsi
pop rsi
pop rdx
pop rcx
pop rbx
pop rax
ret
;;----------------------
;; in the case a default SHA-1 update function implementation was not provided
;; and code was invoked on a non-SSSE3 supporting CPU, dispatch handles this
;; failure in a safest way - jumps to the stub function with UD2 instruction below
sha1_intel_non_ssse3_cpu_stub_:
ud2 ;; in the case no default SHA-1 was provided non-SSSE3 CPUs safely fail here
ret
; END
;----------------------

View file

@ -1,138 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file of
// Golang project:
// https://github.com/golang/go/blob/master/LICENSE
// Using this part of Minio codebase under the license
// Apache License Version 2.0 with modifications
// SHA1 hash algorithm. See RFC 3174.
package sha1
import (
"crypto/rand"
"fmt"
"io"
"testing"
)
type sha1Test struct {
out string
in string
}
var golden = []sha1Test{
{"da39a3ee5e6b4b0d3255bfef95601890afd80709", ""},
{"86f7e437faa5a7fce15d1ddcb9eaeaea377667b8", "a"},
{"da23614e02469a0d7c7bd1bdab5c9c474b1904dc", "ab"},
{"a9993e364706816aba3e25717850c26c9cd0d89d", "abc"},
{"81fe8bfe87576c3ecb22426f8e57847382917acf", "abcd"},
{"03de6c570bfe24bfc328ccd7ca46b76eadaf4334", "abcde"},
{"1f8ac10f23c5b5bc1167bda84b833e5c057a77d2", "abcdef"},
{"2fb5e13419fc89246865e7a324f476ec624e8740", "abcdefg"},
{"425af12a0743502b322e93a015bcf868e324d56a", "abcdefgh"},
{"c63b19f1e4c8b5f76b25c49b8b87f57d8e4872a1", "abcdefghi"},
{"d68c19a0a345b7eab78d5e11e991c026ec60db63", "abcdefghij"},
{"ebf81ddcbe5bf13aaabdc4d65354fdf2044f38a7", "Discard medicine more than two years old."},
{"e5dea09392dd886ca63531aaa00571dc07554bb6", "He who has a shady past knows that nice guys finish last."},
{"45988f7234467b94e3e9494434c96ee3609d8f8f", "I wouldn't marry him with a ten foot pole."},
{"55dee037eb7460d5a692d1ce11330b260e40c988", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"},
{"b7bc5fb91080c7de6b582ea281f8a396d7c0aee8", "The days of the digital watch are numbered. -Tom Stoppard"},
{"c3aed9358f7c77f523afe86135f06b95b3999797", "Nepal premier won't resign."},
{"6e29d302bf6e3a5e4305ff318d983197d6906bb9", "For every action there is an equal and opposite government program."},
{"597f6a540010f94c15d71806a99a2c8710e747bd", "His money is twice tainted: 'taint yours and 'taint mine."},
{"6859733b2590a8a091cecf50086febc5ceef1e80", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"},
{"514b2630ec089b8aee18795fc0cf1f4860cdacad", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"},
{"c5ca0d4a7b6676fc7aa72caa41cc3d5df567ed69", "size: a.out: bad magic"},
{"74c51fa9a04eadc8c1bbeaa7fc442f834b90a00a", "The major problem is with sendmail. -Mark Horton"},
{"0b4c4ce5f52c3ad2821852a8dc00217fa18b8b66", "Give me a rock, paper and scissors and I will move the world. CCFestoon"},
{"3ae7937dd790315beb0f48330e8642237c61550a", "If the enemy is within range, then so are you."},
{"410a2b296df92b9a47412b13281df8f830a9f44b", "It's well we cannot hear the screams/That we create in others' dreams."},
{"841e7c85ca1adcddbdd0187f1289acb5c642f7f5", "You remind me of a TV show, but that's all right: I watch it anyway."},
{"163173b825d03b952601376b25212df66763e1db", "C is as portable as Stonehedge!!"},
{"32b0377f2687eb88e22106f133c586ab314d5279", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"},
{"0885aaf99b569542fd165fa44e322718f4a984e0", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"},
{"6627d6904d71420b0bf3886ab629623538689f45", "How can you write a big system without C++? -Paul Glick"},
}
func TestGolden(t *testing.T) {
for i := 0; i < len(golden); i++ {
g := golden[i]
s := fmt.Sprintf("%x", Sum1([]byte(g.in)))
if s != g.out {
t.Fatalf("Sum function: sha1(%s) = %s want %s", g.in, s, g.out)
}
c := New()
for j := 0; j < 3; j++ {
if j < 2 {
io.WriteString(c, g.in)
} else {
io.WriteString(c, g.in[0:len(g.in)/2])
c.Sum(nil)
io.WriteString(c, g.in[len(g.in)/2:])
}
s := fmt.Sprintf("%x", c.Sum(nil))
if s != g.out {
t.Fatalf("sha1[%d](%s) = %s want %s", j, g.in, s, g.out)
}
c.Reset()
}
}
}
func TestSize(t *testing.T) {
c := New()
if got := c.Size(); got != Size {
t.Errorf("Size = %d; want %d", got, Size)
}
}
func TestBlockSize(t *testing.T) {
c := New()
if got := c.BlockSize(); got != BlockSize {
t.Errorf("BlockSize = %d; want %d", got, BlockSize)
}
}
// Tests that blockGeneric (pure Go) and block (in assembly for amd64, 386, arm) match.
func TestBlockGeneric(t *testing.T) {
gen, asm := New().(*digest), New().(*digest)
buf := make([]byte, BlockSize*20) // arbitrary factor
rand.Read(buf)
blockGeneric(gen, buf)
block(asm, buf)
if *gen != *asm {
t.Error("block and blockGeneric resulted in different states")
}
}
var bench = New()
var buf = make([]byte, 1024*1024)
func benchmarkSize(b *testing.B, size int) {
b.SetBytes(int64(size))
sum := make([]byte, bench.Size())
for i := 0; i < b.N; i++ {
bench.Reset()
bench.Write(buf[:size])
bench.Sum(sum[:0])
}
}
func BenchmarkHash8Bytes(b *testing.B) {
benchmarkSize(b, 8)
}
func BenchmarkHash1K(b *testing.B) {
benchmarkSize(b, 1024)
}
func BenchmarkHash8K(b *testing.B) {
benchmarkSize(b, 8192)
}
func BenchmarkHash1M(b *testing.B) {
benchmarkSize(b, 1024*1024)
}

View file

@ -1,167 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file of
// Golang project:
// https://github.com/golang/go/blob/master/LICENSE
// Using this part of Minio codebase under the license
// Apache License Version 2.0 with modifications
// Package sha1 implements the SHA1 hash algorithm as defined in RFC 3174.
package sha1
import (
"hash"
"io"
"github.com/minio/minio/pkg/cpu"
)
// The size of a SHA1 checksum in bytes.
const Size = 20
// The blocksize of SHA1 in bytes.
const BlockSize = 64
const (
chunk = 64
init0 = 0x67452301
init1 = 0xEFCDAB89
init2 = 0x98BADCFE
init3 = 0x10325476
init4 = 0xC3D2E1F0
)
// digest represents the partial evaluation of a checksum.
type digest struct {
h [5]uint32
x [chunk]byte
nx int
len uint64
}
// Reset digest
func (d *digest) Reset() {
d.h[0] = init0
d.h[1] = init1
d.h[2] = init2
d.h[3] = init3
d.h[4] = init4
d.nx = 0
d.len = 0
}
// New returns a new hash.Hash computing the SHA1 checksum.
func New() hash.Hash {
d := new(digest)
d.Reset()
return d
}
func block(dig *digest, p []byte) {
switch true {
case cpu.HasSSE41() == true:
blockSSE3(dig, p)
default:
blockGeneric(dig, p)
}
}
// Return output size
func (d *digest) Size() int { return Size }
// Return checksum blocksize
func (d *digest) BlockSize() int { return BlockSize }
// Write to digest
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == chunk {
block(d, d.x[:])
d.nx = 0
}
p = p[n:]
}
if len(p) >= chunk {
n := len(p) &^ (chunk - 1)
block(d, p[:n])
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
// Return checksum bytes
func (d *digest) Sum(in []byte) []byte {
// Make a copy of d0 so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...)
}
// Intermediate checksum function
func (d *digest) checkSum() [Size]byte {
len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
var tmp [64]byte
tmp[0] = 0x80
if len%64 < 56 {
d.Write(tmp[0 : 56-len%64])
} else {
d.Write(tmp[0 : 64+56-len%64])
}
// Length in bits.
len <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(len >> (56 - 8*i))
}
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
for i, s := range d.h {
digest[i*4] = byte(s >> 24)
digest[i*4+1] = byte(s >> 16)
digest[i*4+2] = byte(s >> 8)
digest[i*4+3] = byte(s)
}
return digest
}
/// Convenience functions
// Sum1 - single caller sha1 helper
func Sum1(data []byte) [Size]byte {
var d digest
d.Reset()
d.Write(data)
return d.checkSum()
}
// Sum - io.Reader based streaming sha1 helper
func Sum(reader io.Reader) ([]byte, error) {
h := New()
var err error
for err == nil {
length := 0
byteBuffer := make([]byte, 1024*1024)
length, err = reader.Read(byteBuffer)
byteBuffer = byteBuffer[0:length]
h.Write(byteBuffer)
}
if err != io.EOF {
return nil, err
}
return h.Sum(nil), nil
}

View file

@ -1,21 +0,0 @@
// !build amd64
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
//go:generate yasm -f macho64 -DINTEL_SHA1_UPDATE_FUNCNAME=_sha1_update_intel sha1_sse3_amd64.asm -o sha1_sse3_amd64.syso

View file

@ -1,21 +0,0 @@
// !build amd64
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
//go:generate yasm -f elf64 sha1_sse3_amd64.asm -o sha1_sse3_amd64.syso

View file

@ -1,21 +0,0 @@
// !build amd64
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
//go:generate yasm -f win64 -DWIN_ABI=1 sha1_sse3_amd64.asm -o sha1_sse3_amd64.syso

View file

@ -1,29 +0,0 @@
// +build amd64
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
// #include <stdint.h>
// #include <stdlib.h>
// void sha1_update_intel(int32_t *hash, const char* input, size_t num_blocks);
import "C"
import "unsafe"
func blockSSE3(dig *digest, p []byte) {
C.sha1_update_intel((*C.int32_t)(unsafe.Pointer(&dig.h[0])), (*C.char)(unsafe.Pointer(&p[0])), (C.size_t)(len(p)/chunk))
}

View file

@ -1,96 +0,0 @@
// +build amd64
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file of
// Golang project:
// https://github.com/golang/go/blob/master/LICENSE
// Using this part of Minio codebase under the license
// Apache License Version 2.0 with modifications
package sha1
const (
_K0 = 0x5A827999
_K1 = 0x6ED9EBA1
_K2 = 0x8F1BBCDC
_K3 = 0xCA62C1D6
)
// blockGeneric is a portable, pure Go version of the SHA1 block step.
// It's used by sha1block_generic.go and tests.
func blockGeneric(dig *digest, p []byte) {
var w [16]uint32
h0, h1, h2, h3, h4 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4]
for len(p) >= chunk {
// Can interlace the computation of w with the
// rounds below if needed for speed.
for i := 0; i < 16; i++ {
j := i * 4
w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
}
a, b, c, d, e := h0, h1, h2, h3, h4
// Each of the four 20-iteration rounds
// differs only in the computation of f and
// the choice of K (_K0, _K1, etc).
i := 0
for ; i < 16; i++ {
f := b&c | (^b)&d
a5 := a<<5 | a>>(32-5)
b30 := b<<30 | b>>(32-30)
t := a5 + f + e + w[i&0xf] + _K0
a, b, c, d, e = t, a, b30, c, d
}
for ; i < 20; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b&c | (^b)&d
a5 := a<<5 | a>>(32-5)
b30 := b<<30 | b>>(32-30)
t := a5 + f + e + w[i&0xf] + _K0
a, b, c, d, e = t, a, b30, c, d
}
for ; i < 40; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b ^ c ^ d
a5 := a<<5 | a>>(32-5)
b30 := b<<30 | b>>(32-30)
t := a5 + f + e + w[i&0xf] + _K1
a, b, c, d, e = t, a, b30, c, d
}
for ; i < 60; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := ((b | c) & d) | (b & c)
a5 := a<<5 | a>>(32-5)
b30 := b<<30 | b>>(32-30)
t := a5 + f + e + w[i&0xf] + _K2
a, b, c, d, e = t, a, b30, c, d
}
for ; i < 80; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b ^ c ^ d
a5 := a<<5 | a>>(32-5)
b30 := b<<30 | b>>(32-30)
t := a5 + f + e + w[i&0xf] + _K3
a, b, c, d, e = t, a, b30, c, d
}
h0 += a
h1 += b
h2 += c
h3 += d
h4 += e
p = p[chunk:]
}
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] = h0, h1, h2, h3, h4
}

View file

@ -1,35 +0,0 @@
// +build amd64
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
// #cgo CFLAGS: -DHAS_AVX2
// #include <stdint.h>
// #include <stdlib.h>
// void sha1_transform(int32_t *hash, const char* input, size_t num_blocks);
// void sha1_update_intel(int32_t *hash, const char* input, size_t num_blocks);
import "C"
import "unsafe"
func blockAVX2(dig *digest, p []byte) {
C.sha1_transform((*C.int32_t)(unsafe.Pointer(&dig.h[0])), (*C.char)(unsafe.Pointer(&p[0])), (C.size_t)(len(p)/chunk))
}
func blockSSE3(dig *digest, p []byte) {
C.sha1_update_intel((*C.int32_t)(unsafe.Pointer(&dig.h[0])), (*C.char)(unsafe.Pointer(&p[0])), (C.size_t)(len(p)/chunk))
}

View file

@ -1,29 +0,0 @@
// +build amd64
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha1
// #include <stdint.h>
// #include <stdlib.h>
// void sha1_update_intel(int32_t *hash, const char* input, size_t num_blocks);
import "C"
import "unsafe"
func blockSSE3(dig *digest, p []byte) {
C.sha1_update_intel((*C.int32_t)(unsafe.Pointer(&dig.h[0])), (*C.char)(unsafe.Pointer(&p[0])), (C.size_t)(len(p)/chunk))
}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,3 +0,0 @@
# Donut
donut - Donut (do not delete) on disk format implementation released under [Apache license v2](./LICENSE).

View file

@ -1,47 +0,0 @@
package donut
// BucketACL - bucket level access control
type BucketACL string
// different types of ACL's currently supported for buckets
const (
BucketPrivate = BucketACL("private")
BucketPublicRead = BucketACL("public-read")
BucketPublicReadWrite = BucketACL("public-read-write")
)
func (b BucketACL) String() string {
return string(b)
}
// IsPrivate - is acl Private
func (b BucketACL) IsPrivate() bool {
return b == BucketACL("private")
}
// IsPublicRead - is acl PublicRead
func (b BucketACL) IsPublicRead() bool {
return b == BucketACL("public-read")
}
// IsPublicReadWrite - is acl PublicReadWrite
func (b BucketACL) IsPublicReadWrite() bool {
return b == BucketACL("public-read-write")
}
// IsValidBucketACL - is provided acl string supported
func IsValidBucketACL(acl string) bool {
switch acl {
case "private":
fallthrough
case "public-read":
fallthrough
case "public-read-write":
return true
case "":
// by default its "private"
return true
default:
return false
}
}

View file

@ -1,639 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"bytes"
"fmt"
"hash"
"io"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"crypto/md5"
"encoding/hex"
"encoding/json"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
const (
blockSize = 10 * 1024 * 1024
)
// internal struct carrying bucket specific information
type bucket struct {
name string
acl string
time time.Time
donutName string
nodes map[string]node
lock *sync.Mutex
}
// newBucket - instantiate a new bucket
func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bucket, BucketMetadata, *probe.Error) {
if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" {
return bucket{}, BucketMetadata{}, probe.NewError(InvalidArgument{})
}
b := bucket{}
t := time.Now().UTC()
b.name = bucketName
b.acl = aclType
b.time = t
b.donutName = donutName
b.nodes = nodes
b.lock = new(sync.Mutex)
metadata := BucketMetadata{}
metadata.Version = bucketMetadataVersion
metadata.Name = bucketName
metadata.ACL = BucketACL(aclType)
metadata.Created = t
metadata.Metadata = make(map[string]string)
metadata.BucketObjects = make(map[string]struct{})
return b, metadata, nil
}
// getBucketName -
func (b bucket) getBucketName() string {
return b.name
}
// getBucketMetadataReaders -
func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) {
readers := make(map[int]io.ReadCloser)
var disks map[int]disk.Disk
var err *probe.Error
for _, node := range b.nodes {
disks, err = node.ListDisks()
if err != nil {
return nil, err.Trace()
}
}
var bucketMetaDataReader io.ReadCloser
for order, disk := range disks {
bucketMetaDataReader, err = disk.Open(filepath.Join(b.donutName, bucketMetadataConfig))
if err != nil {
continue
}
readers[order] = bucketMetaDataReader
}
if err != nil {
return nil, err.Trace()
}
return readers, nil
}
// getBucketMetadata -
func (b bucket) getBucketMetadata() (*AllBuckets, *probe.Error) {
metadata := new(AllBuckets)
var readers map[int]io.ReadCloser
{
var err *probe.Error
readers, err = b.getBucketMetadataReaders()
if err != nil {
return nil, err.Trace()
}
}
for _, reader := range readers {
defer reader.Close()
}
var err error
for _, reader := range readers {
jenc := json.NewDecoder(reader)
if err = jenc.Decode(metadata); err == nil {
return metadata, nil
}
}
return nil, probe.NewError(err)
}
// GetObjectMetadata - get metadata for an object
func (b bucket) GetObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
return b.readObjectMetadata(normalizeObjectName(objectName))
}
// ListObjects - list all objects
func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
if maxkeys <= 0 {
maxkeys = 1000
}
var isTruncated bool
var objects []string
bucketMetadata, err := b.getBucketMetadata()
if err != nil {
return ListObjectsResults{}, err.Trace()
}
for objectName := range bucketMetadata.Buckets[b.getBucketName()].Multiparts {
if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) {
if objectName > marker {
objects = append(objects, objectName)
}
}
}
for objectName := range bucketMetadata.Buckets[b.getBucketName()].BucketObjects {
if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) {
if objectName > marker {
objects = append(objects, objectName)
}
}
}
if strings.TrimSpace(prefix) != "" {
objects = TrimPrefix(objects, prefix)
}
var prefixes []string
var filteredObjects []string
filteredObjects = objects
if strings.TrimSpace(delimiter) != "" {
filteredObjects = HasNoDelimiter(objects, delimiter)
prefixes = HasDelimiter(objects, delimiter)
prefixes = SplitDelimiter(prefixes, delimiter)
prefixes = SortUnique(prefixes)
}
var results []string
var commonPrefixes []string
for _, commonPrefix := range prefixes {
commonPrefixes = append(commonPrefixes, prefix+commonPrefix)
}
filteredObjects = RemoveDuplicates(filteredObjects)
sort.Strings(filteredObjects)
for _, objectName := range filteredObjects {
if len(results) >= maxkeys {
isTruncated = true
break
}
results = append(results, prefix+objectName)
}
results = RemoveDuplicates(results)
commonPrefixes = RemoveDuplicates(commonPrefixes)
sort.Strings(commonPrefixes)
listObjects := ListObjectsResults{}
listObjects.Objects = make(map[string]ObjectMetadata)
listObjects.CommonPrefixes = commonPrefixes
listObjects.IsTruncated = isTruncated
for _, objectName := range results {
objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName))
if err != nil {
return ListObjectsResults{}, err.Trace()
}
listObjects.Objects[objectName] = objMetadata
}
return listObjects, nil
}
// ReadObject - open an object to read
func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, err *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
reader, writer := io.Pipe()
// get list of objects
bucketMetadata, err := b.getBucketMetadata()
if err != nil {
return nil, 0, err.Trace()
}
// check if object exists
if _, ok := bucketMetadata.Buckets[b.getBucketName()].BucketObjects[objectName]; !ok {
return nil, 0, probe.NewError(ObjectNotFound{Object: objectName})
}
objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName))
if err != nil {
return nil, 0, err.Trace()
}
// read and reply back to GetObject() request in a go-routine
go b.readObjectData(normalizeObjectName(objectName), writer, objMetadata)
return reader, objMetadata.Size, nil
}
// WriteObject - write a new object into bucket
func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
if objectName == "" || objectData == nil {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
writers, err := b.getObjectWriters(normalizeObjectName(objectName), "data")
if err != nil {
return ObjectMetadata{}, err.Trace()
}
sumMD5 := md5.New()
sum512 := sha512.New()
var sum256 hash.Hash
var mwriter io.Writer
if signature != nil {
sum256 = sha256.New()
mwriter = io.MultiWriter(sumMD5, sum256, sum512)
} else {
mwriter = io.MultiWriter(sumMD5, sum512)
}
objMetadata := ObjectMetadata{}
objMetadata.Version = objectMetadataVersion
objMetadata.Created = time.Now().UTC()
// if total writers are only '1' do not compute erasure
switch len(writers) == 1 {
case true:
mw := io.MultiWriter(writers[0], mwriter)
totalLength, err := io.Copy(mw, objectData)
if err != nil {
CleanupWritersOnError(writers)
return ObjectMetadata{}, probe.NewError(err)
}
objMetadata.Size = totalLength
case false:
// calculate data and parity dictated by total number of writers
k, m, err := b.getDataAndParity(len(writers))
if err != nil {
CleanupWritersOnError(writers)
return ObjectMetadata{}, err.Trace()
}
// write encoded data with k, m and writers
chunkCount, totalLength, err := b.writeObjectData(k, m, writers, objectData, size, mwriter)
if err != nil {
CleanupWritersOnError(writers)
return ObjectMetadata{}, err.Trace()
}
/// donutMetadata section
objMetadata.BlockSize = blockSize
objMetadata.ChunkCount = chunkCount
objMetadata.DataDisks = k
objMetadata.ParityDisks = m
objMetadata.Size = int64(totalLength)
}
objMetadata.Bucket = b.getBucketName()
objMetadata.Object = objectName
dataMD5sum := sumMD5.Sum(nil)
dataSHA512sum := sum512.Sum(nil)
if signature != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sum256.Sum(nil)))
if err != nil {
// error occurred while doing signature calculation, we return and also cleanup any temporary writers.
CleanupWritersOnError(writers)
return ObjectMetadata{}, err.Trace()
}
if !ok {
// purge all writers, when control flow reaches here
//
// Signature mismatch occurred all temp files to be removed and all data purged.
CleanupWritersOnError(writers)
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum)
objMetadata.SHA512Sum = hex.EncodeToString(dataSHA512sum)
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objMetadata.MD5Sum); err != nil {
return ObjectMetadata{}, err.Trace()
}
}
objMetadata.Metadata = metadata
// write object specific metadata
if err := b.writeObjectMetadata(normalizeObjectName(objectName), objMetadata); err != nil {
// purge all writers, when control flow reaches here
CleanupWritersOnError(writers)
return ObjectMetadata{}, err.Trace()
}
// close all writers, when control flow reaches here
for _, writer := range writers {
writer.Close()
}
return objMetadata, nil
}
// isMD5SumEqual - returns error if md5sum mismatches, other its `nil`
func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
if err != nil {
return probe.NewError(err)
}
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
if err != nil {
return probe.NewError(err)
}
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
return probe.NewError(BadDigest{})
}
return nil
}
return probe.NewError(InvalidArgument{})
}
// writeObjectMetadata - write additional object metadata
func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadata) *probe.Error {
if objMetadata.Object == "" {
return probe.NewError(InvalidArgument{})
}
objMetadataWriters, err := b.getObjectWriters(objectName, objectMetadataConfig)
if err != nil {
return err.Trace()
}
for _, objMetadataWriter := range objMetadataWriters {
jenc := json.NewEncoder(objMetadataWriter)
if err := jenc.Encode(&objMetadata); err != nil {
// Close writers and purge all temporary entries
CleanupWritersOnError(objMetadataWriters)
return probe.NewError(err)
}
}
for _, objMetadataWriter := range objMetadataWriters {
objMetadataWriter.Close()
}
return nil
}
// readObjectMetadata - read object metadata
func (b bucket) readObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) {
if objectName == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
objMetadata := ObjectMetadata{}
objMetadataReaders, err := b.getObjectReaders(objectName, objectMetadataConfig)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
for _, objMetadataReader := range objMetadataReaders {
defer objMetadataReader.Close()
}
{
var err error
for _, objMetadataReader := range objMetadataReaders {
jdec := json.NewDecoder(objMetadataReader)
if err = jdec.Decode(&objMetadata); err == nil {
return objMetadata, nil
}
}
return ObjectMetadata{}, probe.NewError(err)
}
}
// TODO - This a temporary normalization of objectNames, need to find a better way
//
// normalizedObjectName - all objectNames with "/" get normalized to a simple objectName
//
// example:
// user provided value - "this/is/my/deep/directory/structure"
// donut normalized value - "this-is-my-deep-directory-structure"
//
func normalizeObjectName(objectName string) string {
// replace every '/' with '-'
return strings.Replace(objectName, "/", "-", -1)
}
// getDataAndParity - calculate k, m (data and parity) values from number of disks
func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err *probe.Error) {
if totalWriters <= 1 {
return 0, 0, probe.NewError(InvalidArgument{})
}
quotient := totalWriters / 2 // not using float or abs to let integer round off to lower value
// quotient cannot be bigger than (255 / 2) = 127
if quotient > 127 {
return 0, 0, probe.NewError(ParityOverflow{})
}
remainder := totalWriters % 2 // will be 1 for odd and 0 for even numbers
k = uint8(quotient + remainder)
m = uint8(quotient)
return k, m, nil
}
// writeObjectData -
func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, hashWriter io.Writer) (int, int, *probe.Error) {
encoder, err := newEncoder(k, m)
if err != nil {
return 0, 0, err.Trace()
}
chunkSize := int64(10 * 1024 * 1024)
chunkCount := 0
totalLength := 0
var e error
for e == nil {
var length int
inputData := make([]byte, chunkSize)
length, e = objectData.Read(inputData)
if length != 0 {
encodedBlocks, err := encoder.Encode(inputData[0:length])
if err != nil {
return 0, 0, err.Trace()
}
if _, err := hashWriter.Write(inputData[0:length]); err != nil {
return 0, 0, probe.NewError(err)
}
for blockIndex, block := range encodedBlocks {
errCh := make(chan error, 1)
go func(writer io.Writer, reader io.Reader, errCh chan<- error) {
defer close(errCh)
_, err := io.Copy(writer, reader)
errCh <- err
}(writers[blockIndex], bytes.NewReader(block), errCh)
if err := <-errCh; err != nil {
// Returning error is fine here CleanupErrors() would cleanup writers
return 0, 0, probe.NewError(err)
}
}
totalLength += length
chunkCount = chunkCount + 1
}
}
if e != io.EOF {
return 0, 0, probe.NewError(e)
}
return chunkCount, totalLength, nil
}
// readObjectData -
func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) {
readers, err := b.getObjectReaders(objectName, "data")
if err != nil {
writer.CloseWithError(probe.WrapError(err))
return
}
for _, reader := range readers {
defer reader.Close()
}
var expected512Sum, expectedMd5sum []byte
{
var err error
expectedMd5sum, err = hex.DecodeString(objMetadata.MD5Sum)
if err != nil {
writer.CloseWithError(probe.WrapError(probe.NewError(err)))
return
}
expected512Sum, err = hex.DecodeString(objMetadata.SHA512Sum)
if err != nil {
writer.CloseWithError(probe.WrapError(probe.NewError(err)))
return
}
}
hasher := md5.New()
sum512hasher := sha256.New()
mwriter := io.MultiWriter(writer, hasher, sum512hasher)
switch len(readers) > 1 {
case true:
encoder, err := newEncoder(objMetadata.DataDisks, objMetadata.ParityDisks)
if err != nil {
writer.CloseWithError(probe.WrapError(err))
return
}
totalLeft := objMetadata.Size
for i := 0; i < objMetadata.ChunkCount; i++ {
decodedData, err := b.decodeEncodedData(totalLeft, int64(objMetadata.BlockSize), readers, encoder, writer)
if err != nil {
writer.CloseWithError(probe.WrapError(err))
return
}
if _, err := io.Copy(mwriter, bytes.NewReader(decodedData)); err != nil {
writer.CloseWithError(probe.WrapError(probe.NewError(err)))
return
}
totalLeft = totalLeft - int64(objMetadata.BlockSize)
}
case false:
_, err := io.Copy(writer, readers[0])
if err != nil {
writer.CloseWithError(probe.WrapError(probe.NewError(err)))
return
}
}
// check if decodedData md5sum matches
if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) {
writer.CloseWithError(probe.WrapError(probe.NewError(ChecksumMismatch{})))
return
}
if !bytes.Equal(expected512Sum, sum512hasher.Sum(nil)) {
writer.CloseWithError(probe.WrapError(probe.NewError(ChecksumMismatch{})))
return
}
writer.Close()
return
}
// decodeEncodedData -
func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, *probe.Error) {
var curBlockSize int64
if blockSize < totalLeft {
curBlockSize = blockSize
} else {
curBlockSize = totalLeft
}
curChunkSize, err := encoder.GetEncodedBlockLen(int(curBlockSize))
if err != nil {
return nil, err.Trace()
}
encodedBytes := make([][]byte, encoder.k+encoder.m)
errCh := make(chan error, len(readers))
var errRet error
var readCnt int
for i, reader := range readers {
go func(reader io.Reader, i int) {
encodedBytes[i] = make([]byte, curChunkSize)
_, err := io.ReadFull(reader, encodedBytes[i])
if err != nil {
encodedBytes[i] = nil
errCh <- err
return
}
errCh <- nil
}(reader, i)
// read through errCh for any errors
err := <-errCh
if err != nil {
errRet = err
} else {
readCnt++
}
}
if readCnt < int(encoder.k) {
return nil, probe.NewError(errRet)
}
decodedData, err := encoder.Decode(encodedBytes, int(curBlockSize))
if err != nil {
return nil, err.Trace()
}
return decodedData, nil
}
// getObjectReaders -
func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.ReadCloser, *probe.Error) {
readers := make(map[int]io.ReadCloser)
var disks map[int]disk.Disk
var err *probe.Error
nodeSlice := 0
for _, node := range b.nodes {
disks, err = node.ListDisks()
if err != nil {
return nil, err.Trace()
}
for order, disk := range disks {
var objectSlice io.ReadCloser
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
objectPath := filepath.Join(b.donutName, bucketSlice, objectName, objectMeta)
objectSlice, err = disk.Open(objectPath)
if err == nil {
readers[order] = objectSlice
}
}
nodeSlice = nodeSlice + 1
}
if err != nil {
return nil, err.Trace()
}
return readers, nil
}
// getObjectWriters -
func (b bucket) getObjectWriters(objectName, objectMeta string) ([]io.WriteCloser, *probe.Error) {
var writers []io.WriteCloser
nodeSlice := 0
for _, node := range b.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err.Trace()
}
writers = make([]io.WriteCloser, len(disks))
for order, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
objectPath := filepath.Join(b.donutName, bucketSlice, objectName, objectMeta)
objectSlice, err := disk.CreateFile(objectPath)
if err != nil {
return nil, err.Trace()
}
writers[order] = objectSlice
}
nodeSlice = nodeSlice + 1
}
return writers, nil
}

View file

@ -1,204 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package data implements in memory caching methods for data
package data
import (
"container/list"
"sync"
"time"
)
var noExpiration = time.Duration(0)
// Cache holds the required variables to compose an in memory cache system
// which also provides expiring key mechanism and also maxSize
type Cache struct {
// Mutex is used for handling the concurrent
// read/write requests for cache
sync.Mutex
// items hold the cached objects
items *list.List
// reverseItems holds the time that related item's updated at
reverseItems map[interface{}]*list.Element
// maxSize is a total size for overall cache
maxSize uint64
// currentSize is a current size in memory
currentSize uint64
// OnEvicted - callback function for eviction
OnEvicted func(a ...interface{})
// totalEvicted counter to keep track of total expirations
totalEvicted int
}
// Stats current cache statistics
type Stats struct {
Bytes uint64
Items int
Evicted int
}
type element struct {
key interface{}
value []byte
}
// NewCache creates an inmemory cache
//
// maxSize is used for expiring objects before we run out of memory
// expiration is used for expiration of a key from cache
func NewCache(maxSize uint64) *Cache {
return &Cache{
items: list.New(),
reverseItems: make(map[interface{}]*list.Element),
maxSize: maxSize,
}
}
// SetMaxSize set a new max size
func (r *Cache) SetMaxSize(maxSize uint64) {
r.Lock()
defer r.Unlock()
r.maxSize = maxSize
return
}
// Stats get current cache statistics
func (r *Cache) Stats() Stats {
return Stats{
Bytes: r.currentSize,
Items: r.items.Len(),
Evicted: r.totalEvicted,
}
}
// Get returns a value of a given key if it exists
func (r *Cache) Get(key interface{}) ([]byte, bool) {
r.Lock()
defer r.Unlock()
ele, hit := r.reverseItems[key]
if !hit {
return nil, false
}
r.items.MoveToFront(ele)
return ele.Value.(*element).value, true
}
// Len returns length of the value of a given key, returns zero if key doesn't exist
func (r *Cache) Len(key interface{}) int {
r.Lock()
defer r.Unlock()
_, ok := r.reverseItems[key]
if !ok {
return 0
}
return len(r.reverseItems[key].Value.(*element).value)
}
// Append will append new data to an existing key,
// if key doesn't exist it behaves like Set()
func (r *Cache) Append(key interface{}, value []byte) bool {
r.Lock()
defer r.Unlock()
valueLen := uint64(len(value))
if r.maxSize > 0 {
// check if the size of the object is not bigger than the
// capacity of the cache
if valueLen > r.maxSize {
return false
}
// remove random key if only we reach the maxSize threshold
for (r.currentSize + valueLen) > r.maxSize {
r.doDeleteOldest()
break
}
}
ele, hit := r.reverseItems[key]
if !hit {
ele := r.items.PushFront(&element{key, value})
r.currentSize += valueLen
r.reverseItems[key] = ele
return true
}
r.items.MoveToFront(ele)
r.currentSize += valueLen
ele.Value.(*element).value = append(ele.Value.(*element).value, value...)
return true
}
// Set will persist a value to the cache
func (r *Cache) Set(key interface{}, value []byte) bool {
r.Lock()
defer r.Unlock()
valueLen := uint64(len(value))
if r.maxSize > 0 {
// check if the size of the object is not bigger than the
// capacity of the cache
if valueLen > r.maxSize {
return false
}
// remove random key if only we reach the maxSize threshold
for (r.currentSize + valueLen) > r.maxSize {
r.doDeleteOldest()
}
}
if _, hit := r.reverseItems[key]; hit {
return false
}
ele := r.items.PushFront(&element{key, value})
r.currentSize += valueLen
r.reverseItems[key] = ele
return true
}
// Delete deletes a given key if exists
func (r *Cache) Delete(key interface{}) {
r.Lock()
defer r.Unlock()
ele, ok := r.reverseItems[key]
if !ok {
return
}
if ele != nil {
r.currentSize -= uint64(len(r.reverseItems[key].Value.(*element).value))
r.items.Remove(ele)
delete(r.reverseItems, key)
r.totalEvicted++
if r.OnEvicted != nil {
r.OnEvicted(key)
}
}
}
func (r *Cache) doDeleteOldest() {
ele := r.items.Back()
if ele != nil {
r.currentSize -= uint64(len(r.reverseItems[ele.Value.(*element).key].Value.(*element).value))
delete(r.reverseItems, ele.Value.(*element).key)
r.items.Remove(ele)
r.totalEvicted++
if r.OnEvicted != nil {
r.OnEvicted(ele.Value.(*element).key)
}
}
}

View file

@ -1,45 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package data
import (
"testing"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestCache(c *C) {
cache := NewCache(1000)
data := []byte("Hello, world!")
ok := cache.Set("filename", data)
c.Assert(ok, Equals, true)
storedata, ok := cache.Get("filename")
c.Assert(ok, Equals, true)
c.Assert(data, DeepEquals, storedata)
cache.Delete("filename")
_, ok = cache.Get("filename")
c.Assert(ok, Equals, false)
}

View file

@ -1,110 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package metadata implements in memory caching methods for metadata information
package metadata
import (
"sync"
"time"
)
var noExpiration = time.Duration(0)
// Cache holds the required variables to compose an in memory cache system
// which also provides expiring key mechanism and also maxSize
type Cache struct {
// Mutex is used for handling the concurrent
// read/write requests for cache
sync.Mutex
// items hold the cached objects
items map[string]interface{}
// updatedAt holds the time that related item's updated at
updatedAt map[string]time.Time
}
// Stats current cache statistics
type Stats struct {
Items int
}
// NewCache creates an inmemory cache
//
func NewCache() *Cache {
return &Cache{
items: make(map[string]interface{}),
updatedAt: map[string]time.Time{},
}
}
// Stats get current cache statistics
func (r *Cache) Stats() Stats {
return Stats{
Items: len(r.items),
}
}
// GetAll returs all the items
func (r *Cache) GetAll() map[string]interface{} {
r.Lock()
defer r.Unlock()
// copy
items := r.items
return items
}
// Get returns a value of a given key if it exists
func (r *Cache) Get(key string) interface{} {
r.Lock()
defer r.Unlock()
value, ok := r.items[key]
if !ok {
return nil
}
return value
}
// Exists returns true if key exists
func (r *Cache) Exists(key string) bool {
r.Lock()
defer r.Unlock()
_, ok := r.items[key]
return ok
}
// Set will persist a value to the cache
func (r *Cache) Set(key string, value interface{}) bool {
r.Lock()
defer r.Unlock()
r.items[key] = value
return true
}
// Delete deletes a given key if exists
func (r *Cache) Delete(key string) {
r.Lock()
defer r.Unlock()
r.doDelete(key)
}
func (r *Cache) doDelete(key string) {
if _, ok := r.items[key]; ok {
delete(r.items, key)
delete(r.updatedAt, key)
}
}

View file

@ -1,46 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package metadata
import (
"testing"
. "gopkg.in/check.v1"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestCache(c *C) {
cache := NewCache()
data := []byte("Hello, world!")
ok := cache.Set("filename", data)
c.Assert(ok, Equals, true)
storedata := cache.Get("filename")
c.Assert(ok, Equals, true)
c.Assert(data, DeepEquals, storedata)
cache.Delete("filename")
ok = cache.Exists("filename")
c.Assert(ok, Equals, false)
}

View file

@ -1,190 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"bufio"
"bytes"
"io"
"regexp"
"sort"
"strings"
"unicode/utf8"
"github.com/minio/minio/pkg/atomic"
)
// IsValidDonut - verify donut name is correct
func IsValidDonut(donutName string) bool {
if len(donutName) < 3 || len(donutName) > 63 {
return false
}
if donutName[0] == '.' || donutName[len(donutName)-1] == '.' {
return false
}
if match, _ := regexp.MatchString("\\.\\.", donutName); match == true {
return false
}
// We don't support donutNames with '.' in them
match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", donutName)
return match
}
// IsValidBucket - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func IsValidBucket(bucket string) bool {
if len(bucket) < 3 || len(bucket) > 63 {
return false
}
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
return false
}
if match, _ := regexp.MatchString("\\.\\.", bucket); match == true {
return false
}
// We don't support buckets with '.' in them
match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket)
return match
}
// IsValidObjectName - verify object name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func IsValidObjectName(object string) bool {
if strings.TrimSpace(object) == "" {
return false
}
if len(object) > 1024 || len(object) == 0 {
return false
}
if !utf8.ValidString(object) {
return false
}
return true
}
// IsValidPrefix - verify prefix name is correct, an empty prefix is valid
func IsValidPrefix(prefix string) bool {
if strings.TrimSpace(prefix) == "" {
return true
}
return IsValidObjectName(prefix)
}
// ProxyWriter implements io.Writer to trap written bytes
type ProxyWriter struct {
writer io.Writer
writtenBytes []byte
}
func (r *ProxyWriter) Write(p []byte) (n int, err error) {
n, err = r.writer.Write(p)
if err != nil {
return
}
r.writtenBytes = append(r.writtenBytes, p[0:n]...)
return
}
// NewProxyWriter - wrap around a given writer with ProxyWriter
func NewProxyWriter(w io.Writer) *ProxyWriter {
return &ProxyWriter{writer: w, writtenBytes: nil}
}
// Delimiter delims the string at delimiter
func Delimiter(object, delimiter string) string {
readBuffer := bytes.NewBufferString(object)
reader := bufio.NewReader(readBuffer)
stringReader := strings.NewReader(delimiter)
delimited, _ := stringReader.ReadByte()
delimitedStr, _ := reader.ReadString(delimited)
return delimitedStr
}
// RemoveDuplicates removes duplicate elements from a slice
func RemoveDuplicates(slice []string) []string {
newSlice := []string{}
seen := make(map[string]struct{})
for _, val := range slice {
if _, ok := seen[val]; !ok {
newSlice = append(newSlice, val)
seen[val] = struct{}{} // avoiding byte allocation
}
}
return newSlice
}
// TrimPrefix trims off a prefix string from all the elements in a given slice
func TrimPrefix(objects []string, prefix string) []string {
var results []string
for _, object := range objects {
results = append(results, strings.TrimPrefix(object, prefix))
}
return results
}
// HasNoDelimiter provides a new slice from an input slice which has elements without delimiter
func HasNoDelimiter(objects []string, delim string) []string {
var results []string
for _, object := range objects {
if !strings.Contains(object, delim) {
results = append(results, object)
}
}
return results
}
// HasDelimiter provides a new slice from an input slice which has elements with a delimiter
func HasDelimiter(objects []string, delim string) []string {
var results []string
for _, object := range objects {
if strings.Contains(object, delim) {
results = append(results, object)
}
}
return results
}
// SplitDelimiter provides a new slice from an input slice by splitting a delimiter
func SplitDelimiter(objects []string, delim string) []string {
var results []string
for _, object := range objects {
parts := strings.Split(object, delim)
results = append(results, parts[0]+delim)
}
return results
}
// SortUnique sort a slice in lexical order, removing duplicate elements
func SortUnique(objects []string) []string {
objectMap := make(map[string]string)
for _, v := range objects {
objectMap[v] = v
}
var results []string
for k := range objectMap {
results = append(results, k)
}
sort.Strings(results)
return results
}
// CleanupWritersOnError purge writers on error
func CleanupWritersOnError(writers []io.WriteCloser) {
for _, writer := range writers {
writer.(*atomic.File).CloseAndPurge()
}
}

View file

@ -1,80 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"os/user"
"path/filepath"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick"
)
// getDonutConfigPath get donut config file path
func getDonutConfigPath() (string, *probe.Error) {
if customConfigPath != "" {
return customConfigPath, nil
}
u, err := user.Current()
if err != nil {
return "", probe.NewError(err)
}
donutConfigPath := filepath.Join(u.HomeDir, ".minio", "donut.json")
return donutConfigPath, nil
}
// internal variable only accessed via get/set methods
var customConfigPath string
// SetDonutConfigPath - set custom donut config path
func SetDonutConfigPath(configPath string) {
customConfigPath = configPath
}
// SaveConfig save donut config
func SaveConfig(a *Config) *probe.Error {
donutConfigPath, err := getDonutConfigPath()
if err != nil {
return err.Trace()
}
qc, err := quick.New(a)
if err != nil {
return err.Trace()
}
if err := qc.Save(donutConfigPath); err != nil {
return err.Trace()
}
return nil
}
// LoadConfig load donut config
func LoadConfig() (*Config, *probe.Error) {
donutConfigPath, err := getDonutConfigPath()
if err != nil {
return nil, err.Trace()
}
a := &Config{}
a.Version = "0.0.1"
qc, err := quick.New(a)
if err != nil {
return nil, err.Trace()
}
if err := qc.Load(donutConfigPath); err != nil {
return nil, err.Trace()
}
return qc.Data().(*Config), nil
}

View file

@ -1,222 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedisk.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package disk
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"github.com/minio/minio/pkg/atomic"
"github.com/minio/minio/pkg/probe"
)
// Disk container for disk parameters
type Disk struct {
lock *sync.Mutex
path string
fsInfo map[string]string
}
// New - instantiate new disk
func New(diskPath string) (Disk, *probe.Error) {
if diskPath == "" {
return Disk{}, probe.NewError(InvalidArgument{})
}
st, err := os.Stat(diskPath)
if err != nil {
return Disk{}, probe.NewError(err)
}
if !st.IsDir() {
return Disk{}, probe.NewError(syscall.ENOTDIR)
}
s := syscall.Statfs_t{}
err = syscall.Statfs(diskPath, &s)
if err != nil {
return Disk{}, probe.NewError(err)
}
disk := Disk{
lock: &sync.Mutex{},
path: diskPath,
fsInfo: make(map[string]string),
}
if fsType := getFSType(s.Type); fsType != "UNKNOWN" {
disk.fsInfo["FSType"] = fsType
disk.fsInfo["MountPoint"] = disk.path
return disk, nil
}
return Disk{}, probe.NewError(UnsupportedFilesystem{Type: strconv.FormatInt(int64(s.Type), 10)})
}
// IsUsable - is disk usable, alive
func (disk Disk) IsUsable() bool {
_, err := os.Stat(disk.path)
if err != nil {
return false
}
return true
}
// GetPath - get root disk path
func (disk Disk) GetPath() string {
return disk.path
}
// GetFSInfo - get disk filesystem and its usage information
func (disk Disk) GetFSInfo() map[string]string {
disk.lock.Lock()
defer disk.lock.Unlock()
s := syscall.Statfs_t{}
err := syscall.Statfs(disk.path, &s)
if err != nil {
return nil
}
disk.fsInfo["Total"] = formatBytes(int64(s.Bsize) * int64(s.Blocks))
disk.fsInfo["Free"] = formatBytes(int64(s.Bsize) * int64(s.Bfree))
disk.fsInfo["TotalB"] = strconv.FormatInt(int64(s.Bsize)*int64(s.Blocks), 10)
disk.fsInfo["FreeB"] = strconv.FormatInt(int64(s.Bsize)*int64(s.Bfree), 10)
return disk.fsInfo
}
// MakeDir - make a directory inside disk root path
func (disk Disk) MakeDir(dirname string) *probe.Error {
disk.lock.Lock()
defer disk.lock.Unlock()
if err := os.MkdirAll(filepath.Join(disk.path, dirname), 0700); err != nil {
return probe.NewError(err)
}
return nil
}
// ListDir - list a directory inside disk root path, get only directories
func (disk Disk) ListDir(dirname string) ([]os.FileInfo, *probe.Error) {
disk.lock.Lock()
defer disk.lock.Unlock()
dir, err := os.Open(filepath.Join(disk.path, dirname))
if err != nil {
return nil, probe.NewError(err)
}
defer dir.Close()
contents, err := dir.Readdir(-1)
if err != nil {
return nil, probe.NewError(err)
}
var directories []os.FileInfo
for _, content := range contents {
// Include only directories, ignore everything else
if content.IsDir() {
directories = append(directories, content)
}
}
return directories, nil
}
// ListFiles - list a directory inside disk root path, get only files
func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, *probe.Error) {
disk.lock.Lock()
defer disk.lock.Unlock()
dir, err := os.Open(filepath.Join(disk.path, dirname))
if err != nil {
return nil, probe.NewError(err)
}
defer dir.Close()
contents, err := dir.Readdir(-1)
if err != nil {
return nil, probe.NewError(err)
}
var files []os.FileInfo
for _, content := range contents {
// Include only regular files, ignore everything else
if content.Mode().IsRegular() {
files = append(files, content)
}
}
return files, nil
}
// CreateFile - create a file inside disk root path, replies with custome disk.File which provides atomic writes
func (disk Disk) CreateFile(filename string) (*atomic.File, *probe.Error) {
disk.lock.Lock()
defer disk.lock.Unlock()
if filename == "" {
return nil, probe.NewError(InvalidArgument{})
}
f, err := atomic.FileCreate(filepath.Join(disk.path, filename))
if err != nil {
return nil, probe.NewError(err)
}
return f, nil
}
// Open - read a file inside disk root path
func (disk Disk) Open(filename string) (*os.File, *probe.Error) {
disk.lock.Lock()
defer disk.lock.Unlock()
if filename == "" {
return nil, probe.NewError(InvalidArgument{})
}
dataFile, err := os.Open(filepath.Join(disk.path, filename))
if err != nil {
return nil, probe.NewError(err)
}
return dataFile, nil
}
// OpenFile - Use with caution
func (disk Disk) OpenFile(filename string, flags int, perm os.FileMode) (*os.File, *probe.Error) {
disk.lock.Lock()
defer disk.lock.Unlock()
if filename == "" {
return nil, probe.NewError(InvalidArgument{})
}
dataFile, err := os.OpenFile(filepath.Join(disk.path, filename), flags, perm)
if err != nil {
return nil, probe.NewError(err)
}
return dataFile, nil
}
// formatBytes - Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
func formatBytes(i int64) (result string) {
switch {
case i > (1024 * 1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024)
case i > (1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024)
case i > (1024 * 1024):
result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024)
case i > 1024:
result = fmt.Sprintf("%.02f KB", float64(i)/1024)
default:
result = fmt.Sprintf("%d B", i)
}
result = strings.Trim(result, " ")
return
}

View file

@ -1,34 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package disk
import "strconv"
// fsType2StrinMap - list of filesystems supported by donut
var fsType2StringMap = map[string]string{
"11": "HFS",
}
// getFSType - get filesystem type
func getFSType(fsType uint32) string {
fsTypeHex := strconv.FormatUint(uint64(fsType), 16)
fsTypeString, ok := fsType2StringMap[fsTypeHex]
if ok == false {
return "UNKNOWN"
}
return fsTypeString
}

View file

@ -1,45 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package disk
import "strconv"
// fsType2StringMap - list of filesystems supported by donut on linux
var fsType2StringMap = map[string]string{
"1021994": "TMPFS",
"137d": "EXT",
"4244": "HFS",
"4d44": "MSDOS",
"52654973": "REISERFS",
"5346544e": "NTFS",
"58465342": "XFS",
"61756673": "AUFS",
"6969": "NFS",
"ef51": "EXT2OLD",
"ef53": "EXT4",
"f15f": "ecryptfs",
}
// getFSType - get filesystem type
func getFSType(fsType int64) string {
fsTypeHex := strconv.FormatInt(fsType, 16)
fsTypeString, ok := fsType2StringMap[fsTypeHex]
if ok == false {
return "UNKNOWN"
}
return fsTypeString
}

View file

@ -1,84 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedisk.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package disk
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
. "gopkg.in/check.v1"
)
func TestDisk(t *testing.T) { TestingT(t) }
type MyDiskSuite struct {
path string
disk Disk
}
var _ = Suite(&MyDiskSuite{})
func (s *MyDiskSuite) SetUpSuite(c *C) {
path, err := ioutil.TempDir(os.TempDir(), "disk-")
c.Assert(err, IsNil)
s.path = path
d, perr := New(s.path)
c.Assert(perr, IsNil)
s.disk = d
}
func (s *MyDiskSuite) TearDownSuite(c *C) {
os.RemoveAll(s.path)
}
func (s *MyDiskSuite) TestDiskInfo(c *C) {
c.Assert(s.path, Equals, s.disk.GetPath())
fsInfo := s.disk.GetFSInfo()
c.Assert(fsInfo["MountPoint"], Equals, s.disk.GetPath())
c.Assert(fsInfo["FSType"], Not(Equals), "UNKNOWN")
}
func (s *MyDiskSuite) TestDiskCreateDir(c *C) {
c.Assert(s.disk.MakeDir("hello"), IsNil)
}
func (s *MyDiskSuite) TestDiskCreateFile(c *C) {
f, err := s.disk.CreateFile("hello1")
c.Assert(err, IsNil)
c.Assert(f.Name(), Not(Equals), filepath.Join(s.path, "hello1"))
// close renames the file
f.Close()
// Open should be a success
_, err = s.disk.Open("hello1")
c.Assert(err, IsNil)
}
func (s *MyDiskSuite) TestDiskOpen(c *C) {
f1, err := s.disk.CreateFile("hello2")
c.Assert(err, IsNil)
c.Assert(f1.Name(), Not(Equals), filepath.Join(s.path, "hello2"))
// close renames the file
f1.Close()
f2, err := s.disk.Open("hello2")
c.Assert(err, IsNil)
c.Assert(f2.Name(), Equals, filepath.Join(s.path, "hello2"))
defer f2.Close()
}

View file

@ -1,33 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedisk.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package disk
// InvalidArgument invalid argument
type InvalidArgument struct{}
func (e InvalidArgument) Error() string {
return "Invalid argument"
}
// UnsupportedFilesystem unsupported filesystem type
type UnsupportedFilesystem struct {
Type string
}
func (e UnsupportedFilesystem) Error() string {
return "Unsupported filesystem: " + e.Type
}

View file

@ -1,55 +0,0 @@
##### Users Collection
```js
"minio": {
"version": 1,
"users": [{
"secretAccessKey": String,
"accessKeyId": String,
"status": String // enum: ok, disabled, deleted
}],
"hosts": [{
"address": String,
"uuid": String,
"status": String, // enum: ok, disabled, deleted, busy, offline.
"disks": [{
"disk": String,
"uuid": String,
"status": String // ok, offline, disabled, busy.
}]
}]
}
```
##### Bucket Collection
```js
"buckets": {
"bucket": String, // index
"deleted": Boolean,
"permissions": String
}
```
##### Object Collection
```js
"objects": {
"key": String, // index
"createdAt": Date,
"hosts[16]": [{
"host": String,
"disk": String,
}],
"deleted": Boolean
}
```
```js
"meta": {
"key": String, // index
"type": String // content-type
// type speific meta
}
```

View file

@ -1,680 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
// config files used inside Donut
const (
// bucket, object metadata
bucketMetadataConfig = "bucketMetadata.json"
objectMetadataConfig = "objectMetadata.json"
// versions
objectMetadataVersion = "1.0.0"
bucketMetadataVersion = "1.0.0"
)
/// v1 API functions
// makeBucket - make a new bucket
func (donut API) makeBucket(bucket string, acl BucketACL) *probe.Error {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return probe.NewError(InvalidArgument{})
}
return donut.makeDonutBucket(bucket, acl.String())
}
// getBucketMetadata - get bucket metadata
func (donut API) getBucketMetadata(bucketName string) (BucketMetadata, *probe.Error) {
if err := donut.listDonutBuckets(); err != nil {
return BucketMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucketName]; !ok {
return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucketName})
}
metadata, err := donut.getDonutBucketMetadata()
if err != nil {
return BucketMetadata{}, err.Trace()
}
return metadata.Buckets[bucketName], nil
}
// setBucketMetadata - set bucket metadata
func (donut API) setBucketMetadata(bucketName string, bucketMetadata map[string]string) *probe.Error {
if err := donut.listDonutBuckets(); err != nil {
return err.Trace()
}
metadata, err := donut.getDonutBucketMetadata()
if err != nil {
return err.Trace()
}
oldBucketMetadata := metadata.Buckets[bucketName]
acl, ok := bucketMetadata["acl"]
if !ok {
return probe.NewError(InvalidArgument{})
}
oldBucketMetadata.ACL = BucketACL(acl)
metadata.Buckets[bucketName] = oldBucketMetadata
return donut.setDonutBucketMetadata(metadata)
}
// listBuckets - return list of buckets
func (donut API) listBuckets() (map[string]BucketMetadata, *probe.Error) {
if err := donut.listDonutBuckets(); err != nil {
return nil, err.Trace()
}
metadata, err := donut.getDonutBucketMetadata()
if err != nil {
// intentionally left out the error when Donut is empty
// but we need to revisit this area in future - since we need
// to figure out between acceptable and unacceptable errors
return make(map[string]BucketMetadata), nil
}
if metadata == nil {
return make(map[string]BucketMetadata), nil
}
return metadata.Buckets, nil
}
// listObjects - return list of objects
func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, *probe.Error) {
if err := donut.listDonutBuckets(); err != nil {
return ListObjectsResults{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return ListObjectsResults{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
listObjects, err := donut.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys)
if err != nil {
return ListObjectsResults{}, err.Trace()
}
return listObjects, nil
}
// putObject - put object
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if err := donut.listDonutBuckets(); err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
bucketMeta, err := donut.getDonutBucketMetadata()
if err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok {
return ObjectMetadata{}, probe.NewError(ObjectExists{Object: object})
}
objMetadata, err := donut.buckets[bucket].WriteObject(object, reader, size, expectedMD5Sum, metadata, signature)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
bucketMeta.Buckets[bucket].BucketObjects[object] = struct{}{}
if err := donut.setDonutBucketMetadata(bucketMeta); err != nil {
return ObjectMetadata{}, err.Trace()
}
return objMetadata, nil
}
// putObject - put object
func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *signv4.Signature) (PartMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return PartMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return PartMetadata{}, probe.NewError(InvalidArgument{})
}
if err := donut.listDonutBuckets(); err != nil {
return PartMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return PartMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
bucketMeta, err := donut.getDonutBucketMetadata()
if err != nil {
return PartMetadata{}, err.Trace()
}
if _, ok := bucketMeta.Buckets[bucket].Multiparts[object]; !ok {
return PartMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok {
return PartMetadata{}, probe.NewError(ObjectExists{Object: object})
}
objectPart := object + "/" + "multipart" + "/" + strconv.Itoa(partID)
objmetadata, err := donut.buckets[bucket].WriteObject(objectPart, reader, size, expectedMD5Sum, metadata, signature)
if err != nil {
return PartMetadata{}, err.Trace()
}
partMetadata := PartMetadata{
PartNumber: partID,
LastModified: objmetadata.Created,
ETag: objmetadata.MD5Sum,
Size: objmetadata.Size,
}
multipartSession := bucketMeta.Buckets[bucket].Multiparts[object]
multipartSession.Parts[strconv.Itoa(partID)] = partMetadata
bucketMeta.Buckets[bucket].Multiparts[object] = multipartSession
if err := donut.setDonutBucketMetadata(bucketMeta); err != nil {
return PartMetadata{}, err.Trace()
}
return partMetadata, nil
}
// getObject - get object
func (donut API) getObject(bucket, object string) (reader io.ReadCloser, size int64, err *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return nil, 0, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return nil, 0, probe.NewError(InvalidArgument{})
}
if err := donut.listDonutBuckets(); err != nil {
return nil, 0, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return nil, 0, probe.NewError(BucketNotFound{Bucket: bucket})
}
return donut.buckets[bucket].ReadObject(object)
}
// getObjectMetadata - get object metadata
func (donut API) getObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) {
if err := donut.listDonutBuckets(); err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
bucketMeta, err := donut.getDonutBucketMetadata()
if err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; !ok {
return ObjectMetadata{}, probe.NewError(ObjectNotFound{Object: object})
}
objectMetadata, err := donut.buckets[bucket].GetObjectMetadata(object)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
return objectMetadata, nil
}
// newMultipartUpload - new multipart upload request
func (donut API) newMultipartUpload(bucket, object, contentType string) (string, *probe.Error) {
if err := donut.listDonutBuckets(); err != nil {
return "", err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
allbuckets, err := donut.getDonutBucketMetadata()
if err != nil {
return "", err.Trace()
}
bucketMetadata := allbuckets.Buckets[bucket]
multiparts := make(map[string]MultiPartSession)
if len(bucketMetadata.Multiparts) > 0 {
multiparts = bucketMetadata.Multiparts
}
id := []byte(strconv.Itoa(rand.Int()) + bucket + object + time.Now().String())
uploadIDSum := sha512.Sum512(id)
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
multipartSession := MultiPartSession{
UploadID: uploadID,
Initiated: time.Now().UTC(),
Parts: make(map[string]PartMetadata),
TotalParts: 0,
}
multiparts[object] = multipartSession
bucketMetadata.Multiparts = multiparts
allbuckets.Buckets[bucket] = bucketMetadata
if err := donut.setDonutBucketMetadata(allbuckets); err != nil {
return "", err.Trace()
}
return uploadID, nil
}
// listObjectParts list all object parts
func (donut API) listObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectResourcesMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return ObjectResourcesMetadata{}, probe.NewError(InvalidArgument{})
}
if err := donut.listDonutBuckets(); err != nil {
return ObjectResourcesMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
allBuckets, err := donut.getDonutBucketMetadata()
if err != nil {
return ObjectResourcesMetadata{}, err.Trace()
}
bucketMetadata := allBuckets.Buckets[bucket]
if _, ok := bucketMetadata.Multiparts[object]; !ok {
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
}
if bucketMetadata.Multiparts[object].UploadID != resources.UploadID {
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
}
objectResourcesMetadata := resources
objectResourcesMetadata.Bucket = bucket
objectResourcesMetadata.Key = object
var parts []*PartMetadata
var startPartNumber int
switch {
case objectResourcesMetadata.PartNumberMarker == 0:
startPartNumber = 1
default:
startPartNumber = objectResourcesMetadata.PartNumberMarker
}
for i := startPartNumber; i <= bucketMetadata.Multiparts[object].TotalParts; i++ {
if len(parts) > objectResourcesMetadata.MaxParts {
sort.Sort(partNumber(parts))
objectResourcesMetadata.IsTruncated = true
objectResourcesMetadata.Part = parts
objectResourcesMetadata.NextPartNumberMarker = i
return objectResourcesMetadata, nil
}
part, ok := bucketMetadata.Multiparts[object].Parts[strconv.Itoa(i)]
if !ok {
return ObjectResourcesMetadata{}, probe.NewError(InvalidPart{})
}
parts = append(parts, &part)
}
sort.Sort(partNumber(parts))
objectResourcesMetadata.Part = parts
return objectResourcesMetadata, nil
}
// completeMultipartUpload complete an incomplete multipart upload
func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if object == "" || strings.TrimSpace(object) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
if err := donut.listDonutBuckets(); err != nil {
return ObjectMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
allBuckets, err := donut.getDonutBucketMetadata()
if err != nil {
return ObjectMetadata{}, err.Trace()
}
bucketMetadata := allBuckets.Buckets[bucket]
if _, ok := bucketMetadata.Multiparts[object]; !ok {
return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
if bucketMetadata.Multiparts[object].UploadID != uploadID {
return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
var partBytes []byte
{
var err error
partBytes, err = ioutil.ReadAll(data)
if err != nil {
return ObjectMetadata{}, probe.NewError(err)
}
}
if signature != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256(partBytes)[:]))
if err != nil {
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}
if err := xml.Unmarshal(partBytes, parts); err != nil {
return ObjectMetadata{}, probe.NewError(MalformedXML{})
}
if !sort.IsSorted(completedParts(parts.Part)) {
return ObjectMetadata{}, probe.NewError(InvalidPartOrder{})
}
for _, part := range parts.Part {
if strings.Trim(part.ETag, "\"") != bucketMetadata.Multiparts[object].Parts[strconv.Itoa(part.PartNumber)].ETag {
return ObjectMetadata{}, probe.NewError(InvalidPart{})
}
}
var finalETagBytes []byte
var finalSize int64
totalParts := strconv.Itoa(bucketMetadata.Multiparts[object].TotalParts)
for _, part := range bucketMetadata.Multiparts[object].Parts {
partETagBytes, err := hex.DecodeString(part.ETag)
if err != nil {
return ObjectMetadata{}, probe.NewError(err)
}
finalETagBytes = append(finalETagBytes, partETagBytes...)
finalSize += part.Size
}
finalETag := hex.EncodeToString(finalETagBytes)
objMetadata := ObjectMetadata{}
objMetadata.MD5Sum = finalETag + "-" + totalParts
objMetadata.Object = object
objMetadata.Bucket = bucket
objMetadata.Size = finalSize
objMetadata.Created = bucketMetadata.Multiparts[object].Parts[totalParts].LastModified
return objMetadata, nil
}
// listMultipartUploads list all multipart uploads
func (donut API) listMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) {
if err := donut.listDonutBuckets(); err != nil {
return BucketMultipartResourcesMetadata{}, err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
allbuckets, err := donut.getDonutBucketMetadata()
if err != nil {
return BucketMultipartResourcesMetadata{}, err.Trace()
}
bucketMetadata := allbuckets.Buckets[bucket]
var uploads []*UploadMetadata
for key, session := range bucketMetadata.Multiparts {
if strings.HasPrefix(key, resources.Prefix) {
if len(uploads) > resources.MaxUploads {
sort.Sort(byKey(uploads))
resources.Upload = uploads
resources.NextKeyMarker = key
resources.NextUploadIDMarker = session.UploadID
resources.IsTruncated = true
return resources, nil
}
// uploadIDMarker is ignored if KeyMarker is empty
switch {
case resources.KeyMarker != "" && resources.UploadIDMarker == "":
if key > resources.KeyMarker {
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
case resources.KeyMarker != "" && resources.UploadIDMarker != "":
if session.UploadID > resources.UploadIDMarker {
if key >= resources.KeyMarker {
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
}
default:
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
}
}
sort.Sort(byKey(uploads))
resources.Upload = uploads
return resources, nil
}
// abortMultipartUpload - abort a incomplete multipart upload
func (donut API) abortMultipartUpload(bucket, object, uploadID string) *probe.Error {
if err := donut.listDonutBuckets(); err != nil {
return err.Trace()
}
if _, ok := donut.buckets[bucket]; !ok {
return probe.NewError(BucketNotFound{Bucket: bucket})
}
allbuckets, err := donut.getDonutBucketMetadata()
if err != nil {
return err.Trace()
}
bucketMetadata := allbuckets.Buckets[bucket]
if _, ok := bucketMetadata.Multiparts[object]; !ok {
return probe.NewError(InvalidUploadID{UploadID: uploadID})
}
if bucketMetadata.Multiparts[object].UploadID != uploadID {
return probe.NewError(InvalidUploadID{UploadID: uploadID})
}
delete(bucketMetadata.Multiparts, object)
allbuckets.Buckets[bucket] = bucketMetadata
if err := donut.setDonutBucketMetadata(allbuckets); err != nil {
return err.Trace()
}
return nil
}
//// internal functions
// getBucketMetadataWriters -
func (donut API) getBucketMetadataWriters() ([]io.WriteCloser, *probe.Error) {
var writers []io.WriteCloser
for _, node := range donut.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, err.Trace()
}
writers = make([]io.WriteCloser, len(disks))
for order, disk := range disks {
bucketMetaDataWriter, err := disk.CreateFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig))
if err != nil {
return nil, err.Trace()
}
writers[order] = bucketMetaDataWriter
}
}
return writers, nil
}
// getBucketMetadataReaders - readers are returned in map rather than slice
func (donut API) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) {
readers := make(map[int]io.ReadCloser)
disks := make(map[int]disk.Disk)
var err *probe.Error
for _, node := range donut.nodes {
nDisks := make(map[int]disk.Disk)
nDisks, err = node.ListDisks()
if err != nil {
return nil, err.Trace()
}
for k, v := range nDisks {
disks[k] = v
}
}
var bucketMetaDataReader io.ReadCloser
for order, disk := range disks {
bucketMetaDataReader, err = disk.Open(filepath.Join(donut.config.DonutName, bucketMetadataConfig))
if err != nil {
continue
}
readers[order] = bucketMetaDataReader
}
if err != nil {
return nil, err.Trace()
}
return readers, nil
}
// setDonutBucketMetadata -
func (donut API) setDonutBucketMetadata(metadata *AllBuckets) *probe.Error {
writers, err := donut.getBucketMetadataWriters()
if err != nil {
return err.Trace()
}
for _, writer := range writers {
jenc := json.NewEncoder(writer)
if err := jenc.Encode(metadata); err != nil {
CleanupWritersOnError(writers)
return probe.NewError(err)
}
}
for _, writer := range writers {
writer.Close()
}
return nil
}
// getDonutBucketMetadata -
func (donut API) getDonutBucketMetadata() (*AllBuckets, *probe.Error) {
metadata := &AllBuckets{}
readers, err := donut.getBucketMetadataReaders()
if err != nil {
return nil, err.Trace()
}
for _, reader := range readers {
defer reader.Close()
}
{
var err error
for _, reader := range readers {
jenc := json.NewDecoder(reader)
if err = jenc.Decode(metadata); err == nil {
return metadata, nil
}
}
return nil, probe.NewError(err)
}
}
// makeDonutBucket -
func (donut API) makeDonutBucket(bucketName, acl string) *probe.Error {
if err := donut.listDonutBuckets(); err != nil {
return err.Trace()
}
if _, ok := donut.buckets[bucketName]; ok {
return probe.NewError(BucketExists{Bucket: bucketName})
}
bkt, bucketMetadata, err := newBucket(bucketName, acl, donut.config.DonutName, donut.nodes)
if err != nil {
return err.Trace()
}
nodeNumber := 0
donut.buckets[bucketName] = bkt
for _, node := range donut.nodes {
disks := make(map[int]disk.Disk)
disks, err = node.ListDisks()
if err != nil {
return err.Trace()
}
for order, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order)
err := disk.MakeDir(filepath.Join(donut.config.DonutName, bucketSlice))
if err != nil {
return err.Trace()
}
}
nodeNumber = nodeNumber + 1
}
var metadata *AllBuckets
metadata, err = donut.getDonutBucketMetadata()
if err != nil {
if os.IsNotExist(err.ToGoError()) {
metadata = new(AllBuckets)
metadata.Buckets = make(map[string]BucketMetadata)
metadata.Buckets[bucketName] = bucketMetadata
err = donut.setDonutBucketMetadata(metadata)
if err != nil {
return err.Trace()
}
return nil
}
return err.Trace()
}
metadata.Buckets[bucketName] = bucketMetadata
err = donut.setDonutBucketMetadata(metadata)
if err != nil {
return err.Trace()
}
return nil
}
// listDonutBuckets -
func (donut API) listDonutBuckets() *probe.Error {
var disks map[int]disk.Disk
var err *probe.Error
for _, node := range donut.nodes {
disks, err = node.ListDisks()
if err != nil {
return err.Trace()
}
}
var dirs []os.FileInfo
for _, disk := range disks {
dirs, err = disk.ListDir(donut.config.DonutName)
if err == nil {
break
}
}
// if all disks are missing then return error
if err != nil {
return err.Trace()
}
for _, dir := range dirs {
splitDir := strings.Split(dir.Name(), "$")
if len(splitDir) < 3 {
return probe.NewError(CorruptedBackend{Backend: dir.Name()})
}
bucketName := splitDir[0]
// we dont need this once we cache from makeDonutBucket()
bkt, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes)
if err != nil {
return err.Trace()
}
donut.buckets[bucketName] = bkt
}
return nil
}

View file

@ -1,290 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedd.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
. "gopkg.in/check.v1"
)
func TestDonut(t *testing.T) { TestingT(t) }
type MyDonutSuite struct {
root string
}
var _ = Suite(&MyDonutSuite{})
// create a dummy TestNodeDiskMap
func createTestNodeDiskMap(p string) map[string][]string {
nodes := make(map[string][]string)
nodes["localhost"] = make([]string, 16)
for i := 0; i < len(nodes["localhost"]); i++ {
diskPath := filepath.Join(p, strconv.Itoa(i))
if _, err := os.Stat(diskPath); err != nil {
if os.IsNotExist(err) {
os.MkdirAll(diskPath, 0700)
}
}
nodes["localhost"][i] = diskPath
}
return nodes
}
var dd Interface
func (s *MyDonutSuite) SetUpSuite(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
s.root = root
conf := new(Config)
conf.Version = "0.0.1"
conf.DonutName = "test"
conf.NodeDiskMap = createTestNodeDiskMap(root)
conf.MaxSize = 100000
SetDonutConfigPath(filepath.Join(root, "donut.json"))
perr := SaveConfig(conf)
c.Assert(perr, IsNil)
dd, perr = New()
c.Assert(perr, IsNil)
// testing empty donut
buckets, perr := dd.ListBuckets()
c.Assert(perr, IsNil)
c.Assert(len(buckets), Equals, 0)
}
func (s *MyDonutSuite) TearDownSuite(c *C) {
os.RemoveAll(s.root)
}
// test make bucket without name
func (s *MyDonutSuite) TestBucketWithoutNameFails(c *C) {
// fail to create new bucket without a name
err := dd.MakeBucket("", "private", nil, nil)
c.Assert(err, Not(IsNil))
err = dd.MakeBucket(" ", "private", nil, nil)
c.Assert(err, Not(IsNil))
}
// test empty bucket
func (s *MyDonutSuite) TestEmptyBucket(c *C) {
c.Assert(dd.MakeBucket("foo1", "private", nil, nil), IsNil)
// check if bucket is empty
var resources BucketResourcesMetadata
resources.Maxkeys = 1
objectsMetadata, resources, err := dd.ListObjects("foo1", resources)
c.Assert(err, IsNil)
c.Assert(len(objectsMetadata), Equals, 0)
c.Assert(resources.CommonPrefixes, DeepEquals, []string{})
c.Assert(resources.IsTruncated, Equals, false)
}
// test bucket list
func (s *MyDonutSuite) TestMakeBucketAndList(c *C) {
// create bucket
err := dd.MakeBucket("foo2", "private", nil, nil)
c.Assert(err, IsNil)
// check bucket exists
buckets, err := dd.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 5)
c.Assert(buckets[0].ACL, Equals, BucketACL("private"))
}
// test re-create bucket
func (s *MyDonutSuite) TestMakeBucketWithSameNameFails(c *C) {
err := dd.MakeBucket("foo3", "private", nil, nil)
c.Assert(err, IsNil)
err = dd.MakeBucket("foo3", "private", nil, nil)
c.Assert(err, Not(IsNil))
}
// test make multiple buckets
func (s *MyDonutSuite) TestCreateMultipleBucketsAndList(c *C) {
// add a second bucket
err := dd.MakeBucket("foo4", "private", nil, nil)
c.Assert(err, IsNil)
err = dd.MakeBucket("bar1", "private", nil, nil)
c.Assert(err, IsNil)
buckets, err := dd.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 2)
c.Assert(buckets[0].Name, Equals, "bar1")
c.Assert(buckets[1].Name, Equals, "foo4")
err = dd.MakeBucket("foobar1", "private", nil, nil)
c.Assert(err, IsNil)
buckets, err = dd.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 3)
c.Assert(buckets[2].Name, Equals, "foobar1")
}
// test object create without bucket
func (s *MyDonutSuite) TestNewObjectFailsWithoutBucket(c *C) {
_, err := dd.CreateObject("unknown", "obj", "", 0, nil, nil, nil)
c.Assert(err, Not(IsNil))
}
// test create object metadata
func (s *MyDonutSuite) TestNewObjectMetadata(c *C) {
data := "Hello World"
hasher := md5.New()
hasher.Write([]byte(data))
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
err := dd.MakeBucket("foo6", "private", nil, nil)
c.Assert(err, IsNil)
objectMetadata, err := dd.CreateObject("foo6", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/json"}, nil)
c.Assert(err, IsNil)
c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json")
}
// test create object fails without name
func (s *MyDonutSuite) TestNewObjectFailsWithEmptyName(c *C) {
_, err := dd.CreateObject("foo", "", "", 0, nil, nil, nil)
c.Assert(err, Not(IsNil))
}
// test create object
func (s *MyDonutSuite) TestNewObjectCanBeWritten(c *C) {
err := dd.MakeBucket("foo", "private", nil, nil)
c.Assert(err, IsNil)
data := "Hello World"
hasher := md5.New()
hasher.Write([]byte(data))
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
actualMetadata, err := dd.CreateObject("foo", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/octet-stream"}, nil)
c.Assert(err, IsNil)
c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
var buffer bytes.Buffer
size, err := dd.GetObject(&buffer, "foo", "obj", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len(data)))
c.Assert(buffer.Bytes(), DeepEquals, []byte(data))
actualMetadata, err = dd.GetObjectMetadata("foo", "obj")
c.Assert(err, IsNil)
c.Assert(hex.EncodeToString(hasher.Sum(nil)), Equals, actualMetadata.MD5Sum)
c.Assert(int64(len(data)), Equals, actualMetadata.Size)
}
// test list objects
func (s *MyDonutSuite) TestMultipleNewObjects(c *C) {
c.Assert(dd.MakeBucket("foo5", "private", nil, nil), IsNil)
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
_, err := dd.CreateObject("foo5", "obj1", "", int64(len("one")), one, nil, nil)
c.Assert(err, IsNil)
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
_, err = dd.CreateObject("foo5", "obj2", "", int64(len("two")), two, nil, nil)
c.Assert(err, IsNil)
var buffer1 bytes.Buffer
size, err := dd.GetObject(&buffer1, "foo5", "obj1", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("one"))))
c.Assert(buffer1.Bytes(), DeepEquals, []byte("one"))
var buffer2 bytes.Buffer
size, err = dd.GetObject(&buffer2, "foo5", "obj2", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("two"))))
c.Assert(buffer2.Bytes(), DeepEquals, []byte("two"))
/// test list of objects
// test list objects with prefix and delimiter
var resources BucketResourcesMetadata
resources.Prefix = "o"
resources.Delimiter = "1"
resources.Maxkeys = 10
objectsMetadata, resources, err := dd.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(resources.CommonPrefixes[0], Equals, "obj1")
// test list objects with only delimiter
resources.Prefix = ""
resources.Delimiter = "1"
resources.Maxkeys = 10
objectsMetadata, resources, err = dd.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(objectsMetadata[0].Object, Equals, "obj2")
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(resources.CommonPrefixes[0], Equals, "obj1")
// test list objects with only prefix
resources.Prefix = "o"
resources.Delimiter = ""
resources.Maxkeys = 10
objectsMetadata, resources, err = dd.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(objectsMetadata[0].Object, Equals, "obj1")
c.Assert(objectsMetadata[1].Object, Equals, "obj2")
three := ioutil.NopCloser(bytes.NewReader([]byte("three")))
_, err = dd.CreateObject("foo5", "obj3", "", int64(len("three")), three, nil, nil)
c.Assert(err, IsNil)
var buffer bytes.Buffer
size, err = dd.GetObject(&buffer, "foo5", "obj3", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("three"))))
c.Assert(buffer.Bytes(), DeepEquals, []byte("three"))
// test list objects with maxkeys
resources.Prefix = "o"
resources.Delimiter = ""
resources.Maxkeys = 2
objectsMetadata, resources, err = dd.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, true)
c.Assert(len(objectsMetadata), Equals, 2)
}

View file

@ -1,636 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"io"
"io/ioutil"
"log"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/donut/cache/data"
"github.com/minio/minio/pkg/donut/cache/metadata"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick"
signv4 "github.com/minio/minio/pkg/signature"
)
// total Number of buckets allowed
const (
totalBuckets = 100
)
// Config donut config
type Config struct {
Version string `json:"version"`
MaxSize uint64 `json:"max-size"`
DonutName string `json:"donut-name"`
NodeDiskMap map[string][]string `json:"node-disk-map"`
}
// API - local variables
type API struct {
config *Config
lock *sync.Mutex
objects *data.Cache
multiPartObjects map[string]*data.Cache
storedBuckets *metadata.Cache
nodes map[string]node
buckets map[string]bucket
}
// storedBucket saved bucket
type storedBucket struct {
bucketMetadata BucketMetadata
objectMetadata map[string]ObjectMetadata
partMetadata map[string]map[int]PartMetadata
multiPartSession map[string]MultiPartSession
}
// New instantiate a new donut
func New() (Interface, *probe.Error) {
var conf *Config
var err *probe.Error
conf, err = LoadConfig()
if err != nil {
conf = &Config{
Version: "0.0.1",
MaxSize: 512000000,
NodeDiskMap: nil,
DonutName: "",
}
if err := quick.CheckData(conf); err != nil {
return nil, err.Trace()
}
}
a := API{config: conf}
a.storedBuckets = metadata.NewCache()
a.nodes = make(map[string]node)
a.buckets = make(map[string]bucket)
a.objects = data.NewCache(a.config.MaxSize)
a.multiPartObjects = make(map[string]*data.Cache)
a.objects.OnEvicted = a.evictedObject
a.lock = new(sync.Mutex)
if len(a.config.NodeDiskMap) > 0 {
for k, v := range a.config.NodeDiskMap {
if len(v) == 0 {
return nil, probe.NewError(InvalidDisksArgument{})
}
err := a.AttachNode(k, v)
if err != nil {
return nil, err.Trace()
}
}
/// Initialization, populate all buckets into memory
buckets, err := a.listBuckets()
if err != nil {
return nil, err.Trace()
}
for k, v := range buckets {
var newBucket = storedBucket{}
newBucket.bucketMetadata = v
newBucket.objectMetadata = make(map[string]ObjectMetadata)
newBucket.multiPartSession = make(map[string]MultiPartSession)
newBucket.partMetadata = make(map[string]map[int]PartMetadata)
a.storedBuckets.Set(k, newBucket)
}
a.Heal()
}
return a, nil
}
/// V2 API functions
// GetObject - GET object from cache buffer
func (donut API) GetObject(w io.Writer, bucket string, object string, start, length int64) (int64, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return 0, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(object) {
return 0, probe.NewError(ObjectNameInvalid{Object: object})
}
if start < 0 {
return 0, probe.NewError(InvalidRange{
Start: start,
Length: length,
})
}
if !donut.storedBuckets.Exists(bucket) {
return 0, probe.NewError(BucketNotFound{Bucket: bucket})
}
objectKey := bucket + "/" + object
data, ok := donut.objects.Get(objectKey)
var written int64
if !ok {
if len(donut.config.NodeDiskMap) > 0 {
reader, size, err := donut.getObject(bucket, object)
if err != nil {
return 0, err.Trace()
}
if start > 0 {
if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil {
return 0, probe.NewError(err)
}
}
// new proxy writer to capture data read from disk
pw := NewProxyWriter(w)
{
var err error
if length > 0 {
written, err = io.CopyN(pw, reader, length)
if err != nil {
return 0, probe.NewError(err)
}
} else {
written, err = io.CopyN(pw, reader, size)
if err != nil {
return 0, probe.NewError(err)
}
}
}
/// cache object read from disk
ok := donut.objects.Append(objectKey, pw.writtenBytes)
pw.writtenBytes = nil
go debug.FreeOSMemory()
if !ok {
return 0, probe.NewError(InternalError{})
}
return written, nil
}
return 0, probe.NewError(ObjectNotFound{Object: object})
}
var err error
if start == 0 && length == 0 {
written, err = io.CopyN(w, bytes.NewBuffer(data), int64(donut.objects.Len(objectKey)))
if err != nil {
return 0, probe.NewError(err)
}
return written, nil
}
written, err = io.CopyN(w, bytes.NewBuffer(data[start:]), length)
if err != nil {
return 0, probe.NewError(err)
}
return written, nil
}
// GetBucketMetadata -
func (donut API) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !donut.storedBuckets.Exists(bucket) {
if len(donut.config.NodeDiskMap) > 0 {
bucketMetadata, err := donut.getBucketMetadata(bucket)
if err != nil {
return BucketMetadata{}, err.Trace()
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
storedBucket.bucketMetadata = bucketMetadata
donut.storedBuckets.Set(bucket, storedBucket)
}
return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
return donut.storedBuckets.Get(bucket).(storedBucket).bucketMetadata, nil
}
// SetBucketMetadata -
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !donut.storedBuckets.Exists(bucket) {
return probe.NewError(BucketNotFound{Bucket: bucket})
}
if len(donut.config.NodeDiskMap) > 0 {
if err := donut.setBucketMetadata(bucket, metadata); err != nil {
return err.Trace()
}
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
storedBucket.bucketMetadata.ACL = BucketACL(metadata["acl"])
donut.storedBuckets.Set(bucket, storedBucket)
return nil
}
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
if err != nil {
return probe.NewError(err)
}
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
if err != nil {
return probe.NewError(err)
}
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
return probe.NewError(BadDigest{})
}
return nil
}
return probe.NewError(InvalidArgument{})
}
// CreateObject - create an object
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
contentType := metadata["contentType"]
objectMetadata, err := donut.createObject(bucket, key, contentType, expectedMD5Sum, size, data, signature)
// free
debug.FreeOSMemory()
return objectMetadata, err.Trace()
}
// createObject - PUT object to cache buffer
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if len(donut.config.NodeDiskMap) == 0 {
if size > int64(donut.config.MaxSize) {
generic := GenericObjectError{Bucket: bucket, Object: key}
return ObjectMetadata{}, probe.NewError(EntityTooLarge{
GenericObjectError: generic,
Size: strconv.FormatInt(size, 10),
MaxSize: strconv.FormatUint(donut.config.MaxSize, 10),
})
}
}
if !IsValidBucket(bucket) {
return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key})
}
if !donut.storedBuckets.Exists(bucket) {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// get object key
objectKey := bucket + "/" + key
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
return ObjectMetadata{}, probe.NewError(ObjectExists{Object: key})
}
if contentType == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
if strings.TrimSpace(expectedMD5Sum) != "" {
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return ObjectMetadata{}, probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
if len(donut.config.NodeDiskMap) > 0 {
objMetadata, err := donut.putObject(
bucket,
key,
expectedMD5Sum,
data,
size,
map[string]string{
"contentType": contentType,
"contentLength": strconv.FormatInt(size, 10),
},
signature,
)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
storedBucket.objectMetadata[objectKey] = objMetadata
donut.storedBuckets.Set(bucket, storedBucket)
return objMetadata, nil
}
// calculate md5
hash := md5.New()
sha256hash := sha256.New()
var err error
var totalLength int64
for err == nil {
var length int
byteBuffer := make([]byte, 1024*1024)
length, err = data.Read(byteBuffer)
if length != 0 {
hash.Write(byteBuffer[0:length])
sha256hash.Write(byteBuffer[0:length])
ok := donut.objects.Append(objectKey, byteBuffer[0:length])
if !ok {
return ObjectMetadata{}, probe.NewError(InternalError{})
}
totalLength += int64(length)
go debug.FreeOSMemory()
}
}
if size != 0 {
if totalLength != size {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
}
}
if err != io.EOF {
return ObjectMetadata{}, probe.NewError(err)
}
md5SumBytes := hash.Sum(nil)
md5Sum := hex.EncodeToString(md5SumBytes)
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(BadDigest{})
}
}
if signature != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil)))
if err != nil {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, err.Trace()
}
if !ok {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
m := make(map[string]string)
m["contentType"] = contentType
newObject := ObjectMetadata{
Bucket: bucket,
Object: key,
Metadata: m,
Created: time.Now().UTC(),
MD5Sum: md5Sum,
Size: int64(totalLength),
}
storedBucket.objectMetadata[objectKey] = newObject
donut.storedBuckets.Set(bucket, storedBucket)
return newObject, nil
}
// MakeBucket - create bucket in cache
func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *signv4.Signature) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
// do not have to parse location constraint, using this just for signature verification
locationSum := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
if location != nil {
locationConstraintBytes, err := ioutil.ReadAll(location)
if err != nil {
return probe.NewError(InternalError{})
}
locationSum = hex.EncodeToString(sha256.Sum256(locationConstraintBytes)[:])
}
if signature != nil {
ok, err := signature.DoesSignatureMatch(locationSum)
if err != nil {
return err.Trace()
}
if !ok {
return probe.NewError(signv4.DoesNotMatch{})
}
}
if donut.storedBuckets.Stats().Items == totalBuckets {
return probe.NewError(TooManyBuckets{Bucket: bucketName})
}
if !IsValidBucket(bucketName) {
return probe.NewError(BucketNameInvalid{Bucket: bucketName})
}
if !IsValidBucketACL(acl) {
return probe.NewError(InvalidACL{ACL: acl})
}
if donut.storedBuckets.Exists(bucketName) {
return probe.NewError(BucketExists{Bucket: bucketName})
}
if strings.TrimSpace(acl) == "" {
// default is private
acl = "private"
}
if len(donut.config.NodeDiskMap) > 0 {
if err := donut.makeBucket(bucketName, BucketACL(acl)); err != nil {
return err.Trace()
}
}
var newBucket = storedBucket{}
newBucket.objectMetadata = make(map[string]ObjectMetadata)
newBucket.multiPartSession = make(map[string]MultiPartSession)
newBucket.partMetadata = make(map[string]map[int]PartMetadata)
newBucket.bucketMetadata = BucketMetadata{}
newBucket.bucketMetadata.Name = bucketName
newBucket.bucketMetadata.Created = time.Now().UTC()
newBucket.bucketMetadata.ACL = BucketACL(acl)
donut.storedBuckets.Set(bucketName, newBucket)
return nil
}
// ListObjects - list objects from cache
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidPrefix(resources.Prefix) {
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(ObjectNameInvalid{Object: resources.Prefix})
}
if !donut.storedBuckets.Exists(bucket) {
return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(BucketNotFound{Bucket: bucket})
}
var results []ObjectMetadata
var keys []string
if len(donut.config.NodeDiskMap) > 0 {
listObjects, err := donut.listObjects(
bucket,
resources.Prefix,
resources.Marker,
resources.Delimiter,
resources.Maxkeys,
)
if err != nil {
return nil, BucketResourcesMetadata{IsTruncated: false}, err.Trace()
}
resources.CommonPrefixes = listObjects.CommonPrefixes
resources.IsTruncated = listObjects.IsTruncated
for key := range listObjects.Objects {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
results = append(results, listObjects.Objects[key])
}
if resources.IsTruncated && resources.Delimiter != "" {
resources.NextMarker = results[len(results)-1].Object
}
return results, resources, nil
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
for key := range storedBucket.objectMetadata {
if strings.HasPrefix(key, bucket+"/") {
key = key[len(bucket)+1:]
if strings.HasPrefix(key, resources.Prefix) {
if key > resources.Marker {
keys = append(keys, key)
}
}
}
}
if strings.TrimSpace(resources.Prefix) != "" {
keys = TrimPrefix(keys, resources.Prefix)
}
var prefixes []string
var filteredKeys []string
filteredKeys = keys
if strings.TrimSpace(resources.Delimiter) != "" {
filteredKeys = HasNoDelimiter(keys, resources.Delimiter)
prefixes = HasDelimiter(keys, resources.Delimiter)
prefixes = SplitDelimiter(prefixes, resources.Delimiter)
prefixes = SortUnique(prefixes)
}
for _, commonPrefix := range prefixes {
resources.CommonPrefixes = append(resources.CommonPrefixes, resources.Prefix+commonPrefix)
}
filteredKeys = RemoveDuplicates(filteredKeys)
sort.Strings(filteredKeys)
for _, key := range filteredKeys {
if len(results) == resources.Maxkeys {
resources.IsTruncated = true
if resources.IsTruncated && resources.Delimiter != "" {
resources.NextMarker = results[len(results)-1].Object
}
return results, resources, nil
}
object := storedBucket.objectMetadata[bucket+"/"+resources.Prefix+key]
results = append(results, object)
}
resources.CommonPrefixes = RemoveDuplicates(resources.CommonPrefixes)
sort.Strings(resources.CommonPrefixes)
return results, resources, nil
}
// byBucketName is a type for sorting bucket metadata by bucket name
type byBucketName []BucketMetadata
func (b byBucketName) Len() int { return len(b) }
func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// ListBuckets - List buckets from cache
func (donut API) ListBuckets() ([]BucketMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
var results []BucketMetadata
if len(donut.config.NodeDiskMap) > 0 {
buckets, err := donut.listBuckets()
if err != nil {
return nil, err.Trace()
}
for _, bucketMetadata := range buckets {
results = append(results, bucketMetadata)
}
sort.Sort(byBucketName(results))
return results, nil
}
for _, bucket := range donut.storedBuckets.GetAll() {
results = append(results, bucket.(storedBucket).bucketMetadata)
}
sort.Sort(byBucketName(results))
return results, nil
}
// GetObjectMetadata - get object metadata from cache
func (donut API) GetObjectMetadata(bucket, key string) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
// check if bucket exists
if !IsValidBucket(bucket) {
return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key})
}
if !donut.storedBuckets.Exists(bucket) {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
objectKey := bucket + "/" + key
if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true {
return objMetadata, nil
}
if len(donut.config.NodeDiskMap) > 0 {
objMetadata, err := donut.getObjectMetadata(bucket, key)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
// update
storedBucket.objectMetadata[objectKey] = objMetadata
donut.storedBuckets.Set(bucket, storedBucket)
return objMetadata, nil
}
return ObjectMetadata{}, probe.NewError(ObjectNotFound{Object: key})
}
// evictedObject callback function called when an item is evicted from memory
func (donut API) evictedObject(a ...interface{}) {
cacheStats := donut.objects.Stats()
log.Printf("CurrentSize: %d, CurrentItems: %d, TotalEvicted: %d",
cacheStats.Bytes, cacheStats.Items, cacheStats.Evicted)
key := a[0].(string)
// loop through all buckets
for _, bucket := range donut.storedBuckets.GetAll() {
delete(bucket.(storedBucket).objectMetadata, key)
}
debug.FreeOSMemory()
}

View file

@ -1,265 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedc.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"io/ioutil"
"os"
"path/filepath"
"testing"
. "gopkg.in/check.v1"
)
func TestCache(t *testing.T) { TestingT(t) }
type MyCacheSuite struct {
root string
}
var _ = Suite(&MyCacheSuite{})
var dc Interface
func (s *MyCacheSuite) SetUpSuite(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "donut-")
c.Assert(err, IsNil)
s.root = root
SetDonutConfigPath(filepath.Join(root, "donut.json"))
dc, _ = New()
// testing empty cache
var buckets []BucketMetadata
buckets, perr := dc.ListBuckets()
c.Assert(perr, IsNil)
c.Assert(len(buckets), Equals, 0)
}
func (s *MyCacheSuite) TearDownSuite(c *C) {
os.RemoveAll(s.root)
}
// test make bucket without name
func (s *MyCacheSuite) TestBucketWithoutNameFails(c *C) {
// fail to create new bucket without a name
err := dc.MakeBucket("", "private", nil, nil)
c.Assert(err, Not(IsNil))
err = dc.MakeBucket(" ", "private", nil, nil)
c.Assert(err, Not(IsNil))
}
// test empty bucket
func (s *MyCacheSuite) TestEmptyBucket(c *C) {
c.Assert(dc.MakeBucket("foo1", "private", nil, nil), IsNil)
// check if bucket is empty
var resources BucketResourcesMetadata
resources.Maxkeys = 1
objectsMetadata, resources, err := dc.ListObjects("foo1", resources)
c.Assert(err, IsNil)
c.Assert(len(objectsMetadata), Equals, 0)
c.Assert(resources.CommonPrefixes, DeepEquals, []string{})
c.Assert(resources.IsTruncated, Equals, false)
}
// test bucket list
func (s *MyCacheSuite) TestMakeBucketAndList(c *C) {
// create bucket
err := dc.MakeBucket("foo2", "private", nil, nil)
c.Assert(err, IsNil)
// check bucket exists
buckets, err := dc.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 5)
c.Assert(buckets[0].ACL, Equals, BucketACL("private"))
}
// test re-create bucket
func (s *MyCacheSuite) TestMakeBucketWithSameNameFails(c *C) {
err := dc.MakeBucket("foo3", "private", nil, nil)
c.Assert(err, IsNil)
err = dc.MakeBucket("foo3", "private", nil, nil)
c.Assert(err, Not(IsNil))
}
// test make multiple buckets
func (s *MyCacheSuite) TestCreateMultipleBucketsAndList(c *C) {
// add a second bucket
err := dc.MakeBucket("foo4", "private", nil, nil)
c.Assert(err, IsNil)
err = dc.MakeBucket("bar1", "private", nil, nil)
c.Assert(err, IsNil)
buckets, err := dc.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 2)
c.Assert(buckets[0].Name, Equals, "bar1")
c.Assert(buckets[1].Name, Equals, "foo4")
err = dc.MakeBucket("foobar1", "private", nil, nil)
c.Assert(err, IsNil)
buckets, err = dc.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets), Equals, 3)
c.Assert(buckets[2].Name, Equals, "foobar1")
}
// test object create without bucket
func (s *MyCacheSuite) TestNewObjectFailsWithoutBucket(c *C) {
_, err := dc.CreateObject("unknown", "obj", "", 0, nil, nil, nil)
c.Assert(err, Not(IsNil))
}
// test create object metadata
func (s *MyCacheSuite) TestNewObjectMetadata(c *C) {
data := "Hello World"
hasher := md5.New()
hasher.Write([]byte(data))
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
err := dc.MakeBucket("foo6", "private", nil, nil)
c.Assert(err, IsNil)
objectMetadata, err := dc.CreateObject("foo6", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/json"}, nil)
c.Assert(err, IsNil)
c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json")
}
// test create object fails without name
func (s *MyCacheSuite) TestNewObjectFailsWithEmptyName(c *C) {
_, err := dc.CreateObject("foo", "", "", 0, nil, nil, nil)
c.Assert(err, Not(IsNil))
}
// test create object
func (s *MyCacheSuite) TestNewObjectCanBeWritten(c *C) {
err := dc.MakeBucket("foo", "private", nil, nil)
c.Assert(err, IsNil)
data := "Hello World"
hasher := md5.New()
hasher.Write([]byte(data))
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
actualMetadata, err := dc.CreateObject("foo", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/octet-stream"}, nil)
c.Assert(err, IsNil)
c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
var buffer bytes.Buffer
size, err := dc.GetObject(&buffer, "foo", "obj", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len(data)))
c.Assert(buffer.Bytes(), DeepEquals, []byte(data))
actualMetadata, err = dc.GetObjectMetadata("foo", "obj")
c.Assert(err, IsNil)
c.Assert(hex.EncodeToString(hasher.Sum(nil)), Equals, actualMetadata.MD5Sum)
c.Assert(int64(len(data)), Equals, actualMetadata.Size)
}
// test list objects
func (s *MyCacheSuite) TestMultipleNewObjects(c *C) {
c.Assert(dc.MakeBucket("foo5", "private", nil, nil), IsNil)
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
_, err := dc.CreateObject("foo5", "obj1", "", int64(len("one")), one, nil, nil)
c.Assert(err, IsNil)
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
_, err = dc.CreateObject("foo5", "obj2", "", int64(len("two")), two, nil, nil)
c.Assert(err, IsNil)
var buffer1 bytes.Buffer
size, err := dc.GetObject(&buffer1, "foo5", "obj1", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("one"))))
c.Assert(buffer1.Bytes(), DeepEquals, []byte("one"))
var buffer2 bytes.Buffer
size, err = dc.GetObject(&buffer2, "foo5", "obj2", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("two"))))
c.Assert(buffer2.Bytes(), DeepEquals, []byte("two"))
/// test list of objects
// test list objects with prefix and delimiter
var resources BucketResourcesMetadata
resources.Prefix = "o"
resources.Delimiter = "1"
resources.Maxkeys = 10
objectsMetadata, resources, err := dc.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(resources.CommonPrefixes[0], Equals, "obj1")
// test list objects with only delimiter
resources.Prefix = ""
resources.Delimiter = "1"
resources.Maxkeys = 10
objectsMetadata, resources, err = dc.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(objectsMetadata[0].Object, Equals, "obj2")
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(resources.CommonPrefixes[0], Equals, "obj1")
// test list objects with only prefix
resources.Prefix = "o"
resources.Delimiter = ""
resources.Maxkeys = 10
objectsMetadata, resources, err = dc.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, false)
c.Assert(objectsMetadata[0].Object, Equals, "obj1")
c.Assert(objectsMetadata[1].Object, Equals, "obj2")
three := ioutil.NopCloser(bytes.NewReader([]byte("three")))
_, err = dc.CreateObject("foo5", "obj3", "", int64(len("three")), three, nil, nil)
c.Assert(err, IsNil)
var buffer bytes.Buffer
size, err = dc.GetObject(&buffer, "foo5", "obj3", 0, 0)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(len([]byte("three"))))
c.Assert(buffer.Bytes(), DeepEquals, []byte("three"))
// test list objects with maxkeys
resources.Prefix = "o"
resources.Delimiter = ""
resources.Maxkeys = 2
objectsMetadata, resources, err = dc.ListObjects("foo5", resources)
c.Assert(err, IsNil)
c.Assert(resources.IsTruncated, Equals, true)
c.Assert(len(objectsMetadata), Equals, 2)
}

View file

@ -1,71 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
encoding "github.com/minio/minio/pkg/erasure"
"github.com/minio/minio/pkg/probe"
)
// encoder internal struct
type encoder struct {
encoder *encoding.Erasure
k, m uint8
}
// newEncoder - instantiate a new encoder
func newEncoder(k, m uint8) (encoder, *probe.Error) {
e := encoder{}
params, err := encoding.ValidateParams(k, m)
if err != nil {
return encoder{}, probe.NewError(err)
}
e.encoder = encoding.NewErasure(params)
e.k = k
e.m = m
return e, nil
}
// TODO - think again if this is needed
// GetEncodedBlockLen - wrapper around erasure function with the same name
func (e encoder) GetEncodedBlockLen(dataLength int) (int, *probe.Error) {
if dataLength <= 0 {
return 0, probe.NewError(InvalidArgument{})
}
return encoding.GetEncodedBlockLen(dataLength, e.k), nil
}
// Encode - erasure code input bytes
func (e encoder) Encode(data []byte) ([][]byte, *probe.Error) {
if data == nil {
return nil, probe.NewError(InvalidArgument{})
}
encodedData, err := e.encoder.Encode(data)
if err != nil {
return nil, probe.NewError(err)
}
return encodedData, nil
}
// Decode - erasure decode input encoded bytes
func (e encoder) Decode(encodedData [][]byte, dataLength int) ([]byte, *probe.Error) {
decodedData, err := e.encoder.Decode(encodedData, dataLength)
if err != nil {
return nil, probe.NewError(err)
}
return decodedData, nil
}

View file

@ -1,69 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"encoding/json"
"fmt"
"path/filepath"
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
)
// healBuckets heal bucket slices
func (donut API) healBuckets() *probe.Error {
if err := donut.listDonutBuckets(); err != nil {
return err.Trace()
}
bucketMetadata, err := donut.getDonutBucketMetadata()
if err != nil {
return err.Trace()
}
disks := make(map[int]disk.Disk)
for _, node := range donut.nodes {
nDisks, err := node.ListDisks()
if err != nil {
return err.Trace()
}
for k, v := range nDisks {
disks[k] = v
}
}
for order, disk := range disks {
if disk.IsUsable() {
disk.MakeDir(donut.config.DonutName)
bucketMetadataWriter, err := disk.CreateFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig))
if err != nil {
return err.Trace()
}
defer bucketMetadataWriter.Close()
jenc := json.NewEncoder(bucketMetadataWriter)
if err := jenc.Encode(bucketMetadata); err != nil {
return probe.NewError(err)
}
for bucket := range bucketMetadata.Buckets {
bucketSlice := fmt.Sprintf("%s$0$%d", bucket, order) // TODO handle node slices
err := disk.MakeDir(filepath.Join(donut.config.DonutName, bucketSlice))
if err != nil {
return err.Trace()
}
}
}
}
return nil
}

View file

@ -1,72 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"io"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
// Collection of Donut specification interfaces
// Interface is a collection of cloud storage and management interface
type Interface interface {
CloudStorage
Management
}
// CloudStorage is a donut cloud storage interface
type CloudStorage interface {
// Storage service operations
GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error)
SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error
ListBuckets() ([]BucketMetadata, *probe.Error)
MakeBucket(bucket string, ACL string, location io.Reader, signature *signv4.Signature) *probe.Error
// Bucket operations
ListObjects(string, BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error)
// Object operations
GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error)
GetObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error)
// bucket, object, expectedMD5Sum, size, reader, metadata, signature
CreateObject(string, string, string, int64, io.Reader, map[string]string, *signv4.Signature) (ObjectMetadata, *probe.Error)
Multipart
}
// Multipart API
type Multipart interface {
NewMultipartUpload(bucket, key, contentType string) (string, *probe.Error)
AbortMultipartUpload(bucket, key, uploadID string) *probe.Error
CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *signv4.Signature) (string, *probe.Error)
CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error)
ListMultipartUploads(string, BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
ListObjectParts(string, string, ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
}
// Management is a donut management system interface
type Management interface {
Heal() *probe.Error
Rebalance() *probe.Error
Info() (map[string][]string, *probe.Error)
AttachNode(hostname string, disks []string) *probe.Error
DetachNode(hostname string) *probe.Error
}

View file

@ -1,81 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
)
// Info - return info about donut configuration
func (donut API) Info() (nodeDiskMap map[string][]string, err *probe.Error) {
nodeDiskMap = make(map[string][]string)
for nodeName, n := range donut.nodes {
disks, err := n.ListDisks()
if err != nil {
return nil, err.Trace()
}
diskList := make([]string, len(disks))
for diskOrder, disk := range disks {
diskList[diskOrder] = disk.GetPath()
}
nodeDiskMap[nodeName] = diskList
}
return nodeDiskMap, nil
}
// AttachNode - attach node
func (donut API) AttachNode(hostname string, disks []string) *probe.Error {
if hostname == "" || len(disks) == 0 {
return probe.NewError(InvalidArgument{})
}
n, err := newNode(hostname)
if err != nil {
return err.Trace()
}
donut.nodes[hostname] = n
for i, d := range disks {
newDisk, err := disk.New(d)
if err != nil {
continue
}
if err := newDisk.MakeDir(donut.config.DonutName); err != nil {
return err.Trace()
}
if err := n.AttachDisk(newDisk, i); err != nil {
return err.Trace()
}
}
return nil
}
// DetachNode - detach node
func (donut API) DetachNode(hostname string) *probe.Error {
delete(donut.nodes, hostname)
return nil
}
// Rebalance - rebalance an existing donut with new disks and nodes
func (donut API) Rebalance() *probe.Error {
return probe.NewError(APINotImplemented{API: "management.Rebalance"})
}
// Heal - heal your donuts
func (donut API) Heal() *probe.Error {
// TODO handle data heal
return donut.healBuckets()
}

View file

@ -1,513 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"bytes"
"crypto/md5"
"crypto/sha512"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
"math/rand"
"runtime/debug"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/donut/cache/data"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
/// V2 API functions
// NewMultipartUpload - initiate a new multipart session
func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return "", probe.NewError(ObjectNameInvalid{Object: key})
}
// if len(donut.config.NodeDiskMap) > 0 {
// return donut.newMultipartUpload(bucket, key, contentType)
// }
if !donut.storedBuckets.Exists(bucket) {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
objectKey := bucket + "/" + key
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
return "", probe.NewError(ObjectExists{Object: key})
}
id := []byte(strconv.Itoa(rand.Int()) + bucket + key + time.Now().UTC().String())
uploadIDSum := sha512.Sum512(id)
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
storedBucket.multiPartSession[key] = MultiPartSession{
UploadID: uploadID,
Initiated: time.Now().UTC(),
TotalParts: 0,
}
storedBucket.partMetadata[key] = make(map[int]PartMetadata)
multiPartCache := data.NewCache(0)
multiPartCache.OnEvicted = donut.evictedPart
donut.multiPartObjects[uploadID] = multiPartCache
donut.storedBuckets.Set(bucket, storedBucket)
return uploadID, nil
}
// AbortMultipartUpload - abort an incomplete multipart session
func (donut API) AbortMultipartUpload(bucket, key, uploadID string) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return probe.NewError(ObjectNameInvalid{Object: key})
}
// TODO: multipart support for donut is broken, since we haven't finalized the format in which
// it can be stored, disabling this for now until we get the underlying layout stable.
//
// if len(donut.config.NodeDiskMap) > 0 {
// return donut.abortMultipartUpload(bucket, key, uploadID)
// }
if !donut.storedBuckets.Exists(bucket) {
return probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
if storedBucket.multiPartSession[key].UploadID != uploadID {
return probe.NewError(InvalidUploadID{UploadID: uploadID})
}
donut.cleanupMultipartSession(bucket, key, uploadID)
return nil
}
// CreateObjectPart - create a part in a multipart session
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (string, *probe.Error) {
donut.lock.Lock()
etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data, signature)
donut.lock.Unlock()
// possible free
debug.FreeOSMemory()
return etag, err.Trace()
}
// createObject - internal wrapper function called by CreateObjectPart
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (string, *probe.Error) {
if !IsValidBucket(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return "", probe.NewError(ObjectNameInvalid{Object: key})
}
// TODO: multipart support for donut is broken, since we haven't finalized the format in which
// it can be stored, disabling this for now until we get the underlying layout stable.
//
/*
if len(donut.config.NodeDiskMap) > 0 {
metadata := make(map[string]string)
if contentType == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
metadata["contentType"] = contentType
if strings.TrimSpace(expectedMD5Sum) != "" {
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
partMetadata, err := donut.putObjectPart(bucket, key, expectedMD5Sum, uploadID, partID, data, size, metadata, signature)
if err != nil {
return "", err.Trace()
}
return partMetadata.ETag, nil
}
*/
if !donut.storedBuckets.Exists(bucket) {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
strBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// Verify upload id
if strBucket.multiPartSession[key].UploadID != uploadID {
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
}
// get object key
parts := strBucket.partMetadata[key]
if _, ok := parts[partID]; ok {
return parts[partID].ETag, nil
}
if contentType == "" {
contentType = "application/octet-stream"
}
contentType = strings.TrimSpace(contentType)
if strings.TrimSpace(expectedMD5Sum) != "" {
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
// calculate md5
hash := md5.New()
sha256hash := sha256.New()
var totalLength int64
var err error
for err == nil {
var length int
byteBuffer := make([]byte, 1024*1024)
length, err = data.Read(byteBuffer) // do not read error return error here, we will handle this error later
if length != 0 {
hash.Write(byteBuffer[0:length])
sha256hash.Write(byteBuffer[0:length])
ok := donut.multiPartObjects[uploadID].Append(partID, byteBuffer[0:length])
if !ok {
return "", probe.NewError(InternalError{})
}
totalLength += int64(length)
go debug.FreeOSMemory()
}
}
if totalLength != size {
donut.multiPartObjects[uploadID].Delete(partID)
return "", probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
}
if err != io.EOF {
return "", probe.NewError(err)
}
md5SumBytes := hash.Sum(nil)
md5Sum := hex.EncodeToString(md5SumBytes)
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
return "", err.Trace()
}
}
if signature != nil {
{
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil)))
if err != nil {
return "", err.Trace()
}
if !ok {
return "", probe.NewError(signv4.DoesNotMatch{})
}
}
}
newPart := PartMetadata{
PartNumber: partID,
LastModified: time.Now().UTC(),
ETag: md5Sum,
Size: totalLength,
}
parts[partID] = newPart
strBucket.partMetadata[key] = parts
multiPartSession := strBucket.multiPartSession[key]
multiPartSession.TotalParts++
strBucket.multiPartSession[key] = multiPartSession
donut.storedBuckets.Set(bucket, strBucket)
return md5Sum, nil
}
// cleanupMultipartSession invoked during an abort or complete multipart session to cleanup session from memory
func (donut API) cleanupMultipartSession(bucket, key, uploadID string) {
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
for i := 1; i <= storedBucket.multiPartSession[key].TotalParts; i++ {
donut.multiPartObjects[uploadID].Delete(i)
}
delete(storedBucket.multiPartSession, key)
delete(storedBucket.partMetadata, key)
donut.storedBuckets.Set(bucket, storedBucket)
}
func (donut API) mergeMultipart(parts *CompleteMultipartUpload, uploadID string, fullObjectWriter *io.PipeWriter) {
for _, part := range parts.Part {
recvMD5 := part.ETag
object, ok := donut.multiPartObjects[uploadID].Get(part.PartNumber)
if ok == false {
fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidPart{})))
return
}
calcMD5Bytes := md5.Sum(object)
// complete multi part request header md5sum per part is hex encoded
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
if err != nil {
fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidDigest{Md5: recvMD5})))
return
}
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(BadDigest{})))
return
}
if _, err := io.Copy(fullObjectWriter, bytes.NewReader(object)); err != nil {
fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(err)))
return
}
object = nil
}
fullObjectWriter.Close()
return
}
// CompleteMultipartUpload - complete a multipart upload and persist the data
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
size := int64(donut.multiPartObjects[uploadID].Stats().Bytes)
fullObjectReader, err := donut.completeMultipartUploadV2(bucket, key, uploadID, data, signature)
if err != nil {
return ObjectMetadata{}, err.Trace()
}
objectMetadata, err := donut.createObject(bucket, key, "", "", size, fullObjectReader, nil)
if err != nil {
// No need to call internal cleanup functions here, caller should call AbortMultipartUpload()
// which would in-turn cleanup properly in accordance with S3 Spec
return ObjectMetadata{}, err.Trace()
}
donut.cleanupMultipartSession(bucket, key, uploadID)
return objectMetadata, nil
}
func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (io.Reader, *probe.Error) {
if !IsValidBucket(bucket) {
return nil, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return nil, probe.NewError(ObjectNameInvalid{Object: key})
}
// TODO: multipart support for donut is broken, since we haven't finalized the format in which
// it can be stored, disabling this for now until we get the underlying layout stable.
//
// if len(donut.config.NodeDiskMap) > 0 {
// donut.lock.Unlock()
// return donut.completeMultipartUpload(bucket, key, uploadID, data, signature)
// }
if !donut.storedBuckets.Exists(bucket) {
return nil, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// Verify upload id
if storedBucket.multiPartSession[key].UploadID != uploadID {
return nil, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
partBytes, err := ioutil.ReadAll(data)
if err != nil {
return nil, probe.NewError(err)
}
if signature != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256(partBytes)[:]))
if err != nil {
return nil, err.Trace()
}
if !ok {
return nil, probe.NewError(signv4.DoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}
if err := xml.Unmarshal(partBytes, parts); err != nil {
return nil, probe.NewError(MalformedXML{})
}
if !sort.IsSorted(completedParts(parts.Part)) {
return nil, probe.NewError(InvalidPartOrder{})
}
fullObjectReader, fullObjectWriter := io.Pipe()
go donut.mergeMultipart(parts, uploadID, fullObjectWriter)
return fullObjectReader, nil
}
// byKey is a sortable interface for UploadMetadata slice
type byKey []*UploadMetadata
func (a byKey) Len() int { return len(a) }
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
// ListMultipartUploads - list incomplete multipart sessions for a given bucket
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) {
// TODO handle delimiter, low priority
donut.lock.Lock()
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
// TODO: multipart support for donut is broken, since we haven't finalized the format in which
// it can be stored, disabling this for now until we get the underlying layout stable.
//
// if len(donut.config.NodeDiskMap) > 0 {
// return donut.listMultipartUploads(bucket, resources)
// }
if !donut.storedBuckets.Exists(bucket) {
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
var uploads []*UploadMetadata
for key, session := range storedBucket.multiPartSession {
if strings.HasPrefix(key, resources.Prefix) {
if len(uploads) > resources.MaxUploads {
sort.Sort(byKey(uploads))
resources.Upload = uploads
resources.NextKeyMarker = key
resources.NextUploadIDMarker = session.UploadID
resources.IsTruncated = true
return resources, nil
}
// uploadIDMarker is ignored if KeyMarker is empty
switch {
case resources.KeyMarker != "" && resources.UploadIDMarker == "":
if key > resources.KeyMarker {
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
case resources.KeyMarker != "" && resources.UploadIDMarker != "":
if session.UploadID > resources.UploadIDMarker {
if key >= resources.KeyMarker {
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
}
default:
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
}
}
sort.Sort(byKey(uploads))
resources.Upload = uploads
return resources, nil
}
// partNumber is a sortable interface for Part slice
type partNumber []*PartMetadata
func (a partNumber) Len() int { return len(a) }
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// ListObjectParts - list parts from incomplete multipart session for a given object
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
// Verify upload id
donut.lock.Lock()
defer donut.lock.Unlock()
if !IsValidBucket(bucket) {
return ObjectResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(key) {
return ObjectResourcesMetadata{}, probe.NewError(ObjectNameInvalid{Object: key})
}
// TODO: multipart support for donut is broken, since we haven't finalized the format in which
// it can be stored, disabling this for now until we get the underlying layout stable.
//
// if len(donut.config.NodeDiskMap) > 0 {
// return donut.listObjectParts(bucket, key, resources)
// }
if !donut.storedBuckets.Exists(bucket) {
return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
if _, ok := storedBucket.multiPartSession[key]; ok == false {
return ObjectResourcesMetadata{}, probe.NewError(ObjectNotFound{Object: key})
}
if storedBucket.multiPartSession[key].UploadID != resources.UploadID {
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
}
storedParts := storedBucket.partMetadata[key]
objectResourcesMetadata := resources
objectResourcesMetadata.Bucket = bucket
objectResourcesMetadata.Key = key
var parts []*PartMetadata
var startPartNumber int
switch {
case objectResourcesMetadata.PartNumberMarker == 0:
startPartNumber = 1
default:
startPartNumber = objectResourcesMetadata.PartNumberMarker
}
for i := startPartNumber; i <= storedBucket.multiPartSession[key].TotalParts; i++ {
if len(parts) > objectResourcesMetadata.MaxParts {
sort.Sort(partNumber(parts))
objectResourcesMetadata.IsTruncated = true
objectResourcesMetadata.Part = parts
objectResourcesMetadata.NextPartNumberMarker = i
return objectResourcesMetadata, nil
}
part, ok := storedParts[i]
if !ok {
return ObjectResourcesMetadata{}, probe.NewError(InvalidPart{})
}
parts = append(parts, &part)
}
sort.Sort(partNumber(parts))
objectResourcesMetadata.Part = parts
return objectResourcesMetadata, nil
}
// evictedPart - call back function called by caching module during individual cache evictions
func (donut API) evictedPart(a ...interface{}) {
// loop through all buckets
buckets := donut.storedBuckets.GetAll()
for bucketName, bucket := range buckets {
b := bucket.(storedBucket)
donut.storedBuckets.Set(bucketName, b)
}
debug.FreeOSMemory()
}

View file

@ -1,76 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
)
// node struct internal
type node struct {
hostname string
disks map[int]disk.Disk
}
// newNode - instantiates a new node
func newNode(hostname string) (node, *probe.Error) {
if hostname == "" {
return node{}, probe.NewError(InvalidArgument{})
}
disks := make(map[int]disk.Disk)
n := node{
hostname: hostname,
disks: disks,
}
return n, nil
}
// GetHostname - return hostname
func (n node) GetHostname() string {
return n.hostname
}
// ListDisks - return number of disks
func (n node) ListDisks() (map[int]disk.Disk, *probe.Error) {
return n.disks, nil
}
// AttachDisk - attach a disk
func (n node) AttachDisk(disk disk.Disk, diskOrder int) *probe.Error {
if diskOrder < 0 {
return probe.NewError(InvalidArgument{})
}
n.disks[diskOrder] = disk
return nil
}
// DetachDisk - detach a disk
func (n node) DetachDisk(diskOrder int) *probe.Error {
delete(n.disks, diskOrder)
return nil
}
// SaveConfig - save node configuration
func (n node) SaveConfig() *probe.Error {
return probe.NewError(NotImplemented{Function: "SaveConfig"})
}
// LoadConfig - load node configuration from saved configs
func (n node) LoadConfig() *probe.Error {
return probe.NewError(NotImplemented{Function: "LoadConfig"})
}

View file

@ -1 +0,0 @@
*.syso

View file

@ -1,64 +0,0 @@
## Ubuntu (Kylin) 14.04
### Build Dependencies
This installation document assumes Ubuntu 14.04+ on x86-64 platform.
##### Install Git, GCC, yasm
```sh
$ sudo apt-get install git build-essential yasm
```
##### Install Go 1.5+
Download Go 1.5+ from [https://golang.org/dl/](https://golang.org/dl/).
```sh
$ wget https://storage.googleapis.com/golang/go1.5.linux-amd64.tar.gz
$ mkdir -p ${HOME}/bin/
$ mkdir -p ${HOME}/go/
$ tar -C ${HOME}/bin/ -xzf go1.5.linux-amd64.tar.gz
```
##### Setup GOROOT and GOPATH
Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries
and GOPATH specifies the location of your project workspace.
```sh
$ export GOROOT=${HOME}/bin/go
$ export GOPATH=${HOME}/go
$ export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin
```
## OS X (Yosemite) 10.10
### Build Dependencies
This installation document assumes OS X Yosemite 10.10+ on x86-64 platform.
##### Install brew
```sh
$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
```
##### Install Git, Python
```sh
$ brew install git python yasm
```
##### Install Go 1.5+
Install golang binaries using `brew`
```sh
$ brew install go
$ mkdir -p $HOME/go
```
##### Setup GOROOT and GOPATH
Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries
and GOPATH specifies the location of your project workspace.
```sh
$ export GOPATH=${HOME}/go
$ export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6)
$ export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
$ export PATH=$PATH:${GOPATH}/bin
```

View file

@ -1,26 +0,0 @@
Copyright(c) 2011-2014 Intel Corporation All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,25 +0,0 @@
## Introduction
Erasure is an open source Golang library written on top of ISAL (Intel Intelligent Storage Library) released under [Apache license v2](./LICENSE)
### Developers
* [Get Source](./CONTRIBUTING.md)
* [Build Dependencies](./BUILDDEPS.md)
* [Development Workflow](./CONTRIBUTING.md#developer-guidelines)
* [Developer discussions and bugs](https://github.com/minio/minio/issues)
### Supported platforms
| Name | Supported |
| ------------- | ------------- |
| Linux | Yes |
| Windows | Not yet |
| Mac OSX | Yes |
### Supported architectures
| Arch | Supported |
| ------------- | ------------- |
| x86-64 | Yes |
| arm64 | Not yet|
| i386 | Never |

View file

@ -1,49 +0,0 @@
================================================================================
v2.10 Intel Intelligent Storage Acceleration Library Release Notes
Open Source Version
================================================================================
================================================================================
RELEASE NOTE CONTENTS
================================================================================
1. KNOWN ISSUES
2. FIXED ISSUES
3. CHANGE LOG & FEATURES ADDED
================================================================================
1. KNOWN ISSUES
================================================================================
* Only erasure code unit included in open source version at this time.
* Perf tests do not run in Windows environment.
* Leaving <unit>/bin directories from builds in unit directories will cause the
top-level make build to fail. Build only in top-level or ensure unit
directories are clean of objects and /bin.
* 32-bit lib is not supported in Windows.
================================================================================
2. FIXED ISSUES
================================================================================
v2.10
* Fix for windows register save overlap in gf_{3-6}vect_dot_prod_sse.asm. Only
affects windows versions of erasure code. GP register saves/restore were
pushed to same stack area as XMM.
================================================================================
3. CHANGE LOG & FEATURES ADDED
================================================================================
v2.10
* Erasure code updates
- New AVX and AVX2 support functions.
- Changes min len requirement on gf_vect_dot_prod() to 32 from 16.
- Tests include both source and parity recovery with ec_encode_data().
- New encoding examples with Vandermonde or Cauchy matrix.
v2.8
* First open release of erasure code unit that is part of ISA-L.

View file

@ -1,3 +0,0 @@
v1.0 - Erasure Golang Package
============================
- First release, supports only amd64 or x86-64 architecture

View file

@ -1,62 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package erasure
// #include <stdint.h>
import "C"
import (
"fmt"
"unsafe"
)
// intSlice2CIntArray converts Go int slice to C int array
func intSlice2CIntArray(srcErrList []int) *C.int32_t {
if len(srcErrList) == 0 {
return (*C.int32_t)(unsafe.Pointer(nil))
}
var sizeErrInt = int(unsafe.Sizeof(srcErrList[0]))
switch sizeInt {
case sizeErrInt:
return (*C.int32_t)(unsafe.Pointer(&srcErrList[0]))
case sizeInt8:
int8Array := make([]int8, len(srcErrList))
for i, v := range srcErrList {
int8Array[i] = int8(v)
}
return (*C.int32_t)(unsafe.Pointer(&int8Array[0]))
case sizeInt16:
int16Array := make([]int16, len(srcErrList))
for i, v := range srcErrList {
int16Array[i] = int16(v)
}
return (*C.int32_t)(unsafe.Pointer(&int16Array[0]))
case sizeInt32:
int32Array := make([]int32, len(srcErrList))
for i, v := range srcErrList {
int32Array[i] = int32(v)
}
return (*C.int32_t)(unsafe.Pointer(&int32Array[0]))
case sizeInt64:
int64Array := make([]int64, len(srcErrList))
for i, v := range srcErrList {
int64Array[i] = int64(v)
}
return (*C.int32_t)(unsafe.Pointer(&int64Array[0]))
default:
panic(fmt.Sprintf("Unsupported: %d", sizeInt))
}
}

View file

@ -1,66 +0,0 @@
// Package erasure is a Go wrapper for the Intel Intelligent Storage
// Acceleration Library (Intel ISA-L). Intel ISA-L is a CPU optimized
// implementation of erasure coding algorithms.
//
// For more information on Intel ISA-L, please visit:
// https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version
//
// Usage:
//
// Encode encodes a block of data. The input is the original data. The output
// is a 2 tuple containing (k + m) chunks of erasure encoded data and the
// length of the original object.
//
// Decode decodes 2 tuple data containing (k + m) chunks back into its original form.
// Additionally original block length should also be provided as input.
//
// Decoded data is exactly similar in length and content as the original data.
//
// Encoding data may be performed in 3 steps.
//
// 1. Create a parse set of encoder parameters
// 2. Create a new encoder
// 3. Encode data
//
// Decoding data is also performed in 3 steps.
//
// 1. Create a parse set of encoder parameters for validation
// 2. Create a new encoder
// 3. Decode data
//
// Erasure parameters contain three configurable elements:
// ValidateParams(k, m, technique int) (ErasureParams, error)
// k - Number of rows in matrix
// m - Number of colums in matrix
// technique - Matrix type, can be either Cauchy (recommended) or Vandermonde
// constraints: k + m < Galois Field (2^8)
//
// Choosing right parity and matrix technique is left for application to decide.
//
// But here are the few points to keep in mind
//
// Matrix Type:
// - Vandermonde is most commonly used method for choosing coefficients in erasure
// encoding but does not guarantee invertable for every sub matrix.
// - Whereas Cauchy is our recommended method for choosing coefficients in erasure coding.
// Since any sub-matrix of a Cauchy matrix is invertable.
//
// Total blocks:
// - Data blocks and Parity blocks should not be greater than 'Galois Field' (2^8)
//
// Example
//
// Creating and using an encoder
// var bytes []byte
// params := erasure.ValidateParams(10, 5)
// encoder := erasure.NewErasure(params)
// encodedData, length := encoder.Encode(bytes)
//
// Creating and using a decoder
// var encodedData [][]byte
// var length int
// params := erasure.ValidateParams(10, 5)
// encoder := erasure.NewErasure(params)
// originalData, err := encoder.Decode(encodedData, length)
//
package erasure

File diff suppressed because it is too large Load diff

View file

@ -1,41 +0,0 @@
/**********************************************************************
Copyright(c) 2011-2015 Intel Corporation All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**********************************************************************/
#ifndef _ISAL_H_
#define _ISAL_H_
#define ISAL_MAJOR_VERSION 2
#define ISAL_MINOR_VERSION 13
#define ISAL_PATCH_VERSION 0
#define ISAL_MAKE_VERSION(maj, min, patch) ((maj) * 0x10000 + (min) * 0x100 + (patch))
#define ISAL_VERSION ISAL_MAKE_VERSION(ISAL_MAJOR_VERSION, ISAL_MINOR_VERSION, ISAL_PATCH_VERSION)
#include "ec_code.h"
#include "gf_vect_mul.h"
#endif //_ISAL_H_

View file

@ -1,348 +0,0 @@
/**********************************************************************
Copyright(c) 2011-2015 Intel Corporation All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**********************************************************************/
#include <limits.h>
#include <string.h> // for memset
#include "ec_code.h"
#include "ec_base.h" // for GF tables
#include "ec_types.h"
unsigned char gf_mul(unsigned char a, unsigned char b)
{
#ifndef GF_LARGE_TABLES
int i;
if ((a == 0) || (b == 0))
return 0;
return gff_base[(i = gflog_base[a] + gflog_base[b]) > 254 ? i - 255 : i];
#else
return gf_mul_table_base[b * 256 + a];
#endif
}
unsigned char gf_inv(unsigned char a)
{
#ifndef GF_LARGE_TABLES
if (a == 0)
return 0;
return gff_base[255 - gflog_base[a]];
#else
return gf_inv_table_base[a];
#endif
}
void gf_gen_rs_matrix(unsigned char *a, int m, int k)
{
int i, j;
unsigned char p, gen = 1;
memset(a, 0, k * m);
for (i = 0; i < k; i++)
a[k * i + i] = 1;
for (i = k; i < m; i++) {
p = 1;
for (j = 0; j < k; j++) {
a[k * i + j] = p;
p = gf_mul(p, gen);
}
gen = gf_mul(gen, 2);
}
}
void gf_gen_cauchy1_matrix(unsigned char *a, int m, int k)
{
int i, j;
unsigned char *p;
// Identity matrix in high position
memset(a, 0, k * m);
for (i = 0; i < k; i++)
a[k * i + i] = 1;
// For the rest choose 1/(i + j) | i != j
p = &a[k * k];
for (i = k; i < m; i++)
for (j = 0; j < k; j++)
*p++ = gf_inv(i ^ j);
}
int gf_invert_matrix(unsigned char *in_mat, unsigned char *out_mat, const int n)
{
int i, j, k;
unsigned char temp;
// Set out_mat[] to the identity matrix
for (i = 0; i < n * n; i++) // memset(out_mat, 0, n*n)
out_mat[i] = 0;
for (i = 0; i < n; i++)
out_mat[i * n + i] = 1;
// Inverse
for (i = 0; i < n; i++) {
// Check for 0 in pivot element
if (in_mat[i * n + i] == 0) {
// Find a row with non-zero in current column and swap
for (j = i + 1; j < n; j++)
if (in_mat[j * n + i])
break;
if (j == n) // Couldn't find means it's singular
return -1;
for (k = 0; k < n; k++) { // Swap rows i,j
temp = in_mat[i * n + k];
in_mat[i * n + k] = in_mat[j * n + k];
in_mat[j * n + k] = temp;
temp = out_mat[i * n + k];
out_mat[i * n + k] = out_mat[j * n + k];
out_mat[j * n + k] = temp;
}
}
temp = gf_inv(in_mat[i * n + i]); // 1/pivot
for (j = 0; j < n; j++) { // Scale row i by 1/pivot
in_mat[i * n + j] = gf_mul(in_mat[i * n + j], temp);
out_mat[i * n + j] = gf_mul(out_mat[i * n + j], temp);
}
for (j = 0; j < n; j++) {
if (j == i)
continue;
temp = in_mat[j * n + i];
for (k = 0; k < n; k++) {
out_mat[j * n + k] ^= gf_mul(temp, out_mat[i * n + k]);
in_mat[j * n + k] ^= gf_mul(temp, in_mat[i * n + k]);
}
}
}
return 0;
}
// Calculates const table gftbl in GF(2^8) from single input A
// gftbl(A) = {A{00}, A{01}, A{02}, ... , A{0f} }, {A{00}, A{10}, A{20}, ... , A{f0} }
void gf_vect_mul_init(unsigned char c, unsigned char *tbl)
{
unsigned char c2 = (c << 1) ^ ((c & 0x80) ? 0x1d : 0); //Mult by GF{2}
unsigned char c4 = (c2 << 1) ^ ((c2 & 0x80) ? 0x1d : 0); //Mult by GF{2}
unsigned char c8 = (c4 << 1) ^ ((c4 & 0x80) ? 0x1d : 0); //Mult by GF{2}
#if __WORDSIZE == 64 || _WIN64 || __x86_64__
unsigned long long v1, v2, v4, v8, *t;
unsigned long long v10, v20, v40, v80;
unsigned char c17, c18, c20, c24;
t = (unsigned long long *)tbl;
v1 = c * 0x0100010001000100ull;
v2 = c2 * 0x0101000001010000ull;
v4 = c4 * 0x0101010100000000ull;
v8 = c8 * 0x0101010101010101ull;
v4 = v1 ^ v2 ^ v4;
t[0] = v4;
t[1] = v8 ^ v4;
c17 = (c8 << 1) ^ ((c8 & 0x80) ? 0x1d : 0); //Mult by GF{2}
c18 = (c17 << 1) ^ ((c17 & 0x80) ? 0x1d : 0); //Mult by GF{2}
c20 = (c18 << 1) ^ ((c18 & 0x80) ? 0x1d : 0); //Mult by GF{2}
c24 = (c20 << 1) ^ ((c20 & 0x80) ? 0x1d : 0); //Mult by GF{2}
v10 = c17 * 0x0100010001000100ull;
v20 = c18 * 0x0101000001010000ull;
v40 = c20 * 0x0101010100000000ull;
v80 = c24 * 0x0101010101010101ull;
v40 = v10 ^ v20 ^ v40;
t[2] = v40;
t[3] = v80 ^ v40;
#else // 32-bit or other
unsigned char c3, c5, c6, c7, c9, c10, c11, c12, c13, c14, c15;
unsigned char c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30,
c31;
c3 = c2 ^ c;
c5 = c4 ^ c;
c6 = c4 ^ c2;
c7 = c4 ^ c3;
c9 = c8 ^ c;
c10 = c8 ^ c2;
c11 = c8 ^ c3;
c12 = c8 ^ c4;
c13 = c8 ^ c5;
c14 = c8 ^ c6;
c15 = c8 ^ c7;
tbl[0] = 0;
tbl[1] = c;
tbl[2] = c2;
tbl[3] = c3;
tbl[4] = c4;
tbl[5] = c5;
tbl[6] = c6;
tbl[7] = c7;
tbl[8] = c8;
tbl[9] = c9;
tbl[10] = c10;
tbl[11] = c11;
tbl[12] = c12;
tbl[13] = c13;
tbl[14] = c14;
tbl[15] = c15;
c17 = (c8 << 1) ^ ((c8 & 0x80) ? 0x1d : 0); //Mult by GF{2}
c18 = (c17 << 1) ^ ((c17 & 0x80) ? 0x1d : 0); //Mult by GF{2}
c19 = c18 ^ c17;
c20 = (c18 << 1) ^ ((c18 & 0x80) ? 0x1d : 0); //Mult by GF{2}
c21 = c20 ^ c17;
c22 = c20 ^ c18;
c23 = c20 ^ c19;
c24 = (c20 << 1) ^ ((c20 & 0x80) ? 0x1d : 0); //Mult by GF{2}
c25 = c24 ^ c17;
c26 = c24 ^ c18;
c27 = c24 ^ c19;
c28 = c24 ^ c20;
c29 = c24 ^ c21;
c30 = c24 ^ c22;
c31 = c24 ^ c23;
tbl[16] = 0;
tbl[17] = c17;
tbl[18] = c18;
tbl[19] = c19;
tbl[20] = c20;
tbl[21] = c21;
tbl[22] = c22;
tbl[23] = c23;
tbl[24] = c24;
tbl[25] = c25;
tbl[26] = c26;
tbl[27] = c27;
tbl[28] = c28;
tbl[29] = c29;
tbl[30] = c30;
tbl[31] = c31;
#endif //__WORDSIZE == 64 || _WIN64 || __x86_64__
}
void gf_vect_dot_prod_base(int len, int vlen, unsigned char *v,
unsigned char **src, unsigned char *dest)
{
int i, j;
unsigned char s;
for (i = 0; i < len; i++) {
s = 0;
for (j = 0; j < vlen; j++)
s ^= gf_mul(src[j][i], v[j * 32 + 1]);
dest[i] = s;
}
}
void gf_vect_mad_base(int len, int vec, int vec_i,
unsigned char *v, unsigned char *src, unsigned char *dest)
{
int i;
unsigned char s;
for (i = 0; i < len; i++) {
s = dest[i];
s ^= gf_mul(src[i], v[vec_i * 32 + 1]);
dest[i] = s;
}
}
void ec_encode_data_base(int len, int srcs, int dests, unsigned char *v,
unsigned char **src, unsigned char **dest)
{
int i, j, l;
unsigned char s;
for (l = 0; l < dests; l++) {
for (i = 0; i < len; i++) {
s = 0;
for (j = 0; j < srcs; j++)
s ^= gf_mul(src[j][i], v[j * 32 + l * srcs * 32 + 1]);
dest[l][i] = s;
}
}
}
void ec_encode_data_update_base(int len, int k, int rows, int vec_i, unsigned char *v,
unsigned char *data, unsigned char **dest)
{
int i, l;
unsigned char s;
for (l = 0; l < rows; l++) {
for (i = 0; i < len; i++) {
s = dest[l][i];
s ^= gf_mul(data[i], v[vec_i * 32 + l * k * 32 + 1]);
dest[l][i] = s;
}
}
}
void gf_vect_mul_base(int len, unsigned char *a, unsigned char *src, unsigned char *dest)
{
//2nd element of table array is ref value used to fill it in
unsigned char c = a[1];
while (len-- > 0)
*dest++ = gf_mul(c, *src++);
}
struct slver {
UINT16 snum;
UINT8 ver;
UINT8 core;
};
// Version info
struct slver gf_vect_mul_init_slver_00020035;
struct slver gf_vect_mul_init_slver = { 0x0035, 0x02, 0x00 };
struct slver ec_encode_data_base_slver_00010135;
struct slver ec_encode_data_base_slver = { 0x0135, 0x01, 0x00 };
struct slver gf_vect_mul_base_slver_00010136;
struct slver gf_vect_mul_base_slver = { 0x0136, 0x01, 0x00 };
struct slver gf_vect_dot_prod_base_slver_00010137;
struct slver gf_vect_dot_prod_base_slver = { 0x0137, 0x01, 0x00 };

File diff suppressed because it is too large Load diff

View file

@ -1,933 +0,0 @@
/**********************************************************************
Copyright(c) 2011-2015 Intel Corporation All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**********************************************************************/
#ifndef _ERASURE_CODE_H_
#define _ERASURE_CODE_H_
/**
* @file erasure_code.h
* @brief Interface to functions supporting erasure code encode and decode.
*
* This file defines the interface to optimized functions used in erasure
* codes. Encode and decode of erasures in GF(2^8) are made by calculating the
* dot product of the symbols (bytes in GF(2^8)) across a set of buffers and a
* set of coefficients. Values for the coefficients are determined by the type
* of erasure code. Using a general dot product means that any sequence of
* coefficients may be used including erasure codes based on random
* coefficients.
* Multiple versions of dot product are supplied to calculate 1-6 output
* vectors in one pass.
* Base GF multiply and divide functions can be sped up by defining
* GF_LARGE_TABLES at the expense of memory size.
*
*/
#include "gf_vect_mul.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Initialize tables for fast Erasure Code encode and decode.
*
* Generates the expanded tables needed for fast encode or decode for erasure
* codes on blocks of data. 32bytes is generated for each input coefficient.
*
* @param k The number of vector sources or rows in the generator matrix
* for coding.
* @param rows The number of output vectors to concurrently encode/decode.
* @param a Pointer to sets of arrays of input coefficients used to encode
* or decode data.
* @param gftbls Pointer to start of space for concatenated output tables
* generated from input coefficients. Must be of size 32*k*rows.
* @returns none
*/
void ec_init_tables(int k, int rows, unsigned char* a, unsigned char* gftbls);
/**
* @brief Generate or decode erasure codes on blocks of data, runs appropriate version.
*
* Given a list of source data blocks, generate one or multiple blocks of
* encoded data as specified by a matrix of GF(2^8) coefficients. When given a
* suitable set of coefficients, this function will perform the fast generation
* or decoding of Reed-Solomon type erasure codes.
*
* This function determines what instruction sets are enabled and
* selects the appropriate version at runtime.
*
* @param len Length of each block of data (vector) of source or dest data.
* @param k The number of vector sources or rows in the generator matrix
* for coding.
* @param rows The number of output vectors to concurrently encode/decode.
* @param gftbls Pointer to array of input tables generated from coding
* coefficients in ec_init_tables(). Must be of size 32*k*rows
* @param data Array of pointers to source input buffers.
* @param coding Array of pointers to coded output buffers.
* @returns none
*/
void ec_encode_data(int len, int k, int rows, unsigned char *gftbls, unsigned char **data,
unsigned char **coding);
/**
* @brief Generate or decode erasure codes on blocks of data.
*
* Arch specific version of ec_encode_data() with same parameters.
* @requires SSE4.1
*/
void ec_encode_data_sse(int len, int k, int rows, unsigned char *gftbls, unsigned char **data,
unsigned char **coding);
/**
* @brief Generate or decode erasure codes on blocks of data.
*
* Arch specific version of ec_encode_data() with same parameters.
* @requires AVX
*/
void ec_encode_data_avx(int len, int k, int rows, unsigned char *gftbls, unsigned char **data,
unsigned char **coding);
/**
* @brief Generate or decode erasure codes on blocks of data.
*
* Arch specific version of ec_encode_data() with same parameters.
* @requires AVX2
*/
void ec_encode_data_avx2(int len, int k, int rows, unsigned char *gftbls, unsigned char **data,
unsigned char **coding);
/**
* @brief Generate or decode erasure codes on blocks of data, runs baseline version.
*
* Baseline version of ec_encode_data() with same parameters.
*/
void ec_encode_data_base(int len, int srcs, int dests, unsigned char *v, unsigned char **src,
unsigned char **dest);
/**
* @brief Generate update for encode or decode of erasure codes from single source, runs appropriate version.
*
* Given one source data block, update one or multiple blocks of encoded data as
* specified by a matrix of GF(2^8) coefficients. When given a suitable set of
* coefficients, this function will perform the fast generation or decoding of
* Reed-Solomon type erasure codes from one input source at a time.
*
* This function determines what instruction sets are enabled and selects the
* appropriate version at runtime.
*
* @param len Length of each block of data (vector) of source or dest data.
* @param k The number of vector sources or rows in the generator matrix
* for coding.
* @param rows The number of output vectors to concurrently encode/decode.
* @param vec_i The vector index corresponding to the single input source.
* @param g_tbls Pointer to array of input tables generated from coding
* coefficients in ec_init_tables(). Must be of size 32*k*rows
* @param data Pointer to single input source used to update output parity.
* @param coding Array of pointers to coded output buffers.
* @returns none
*/
void ec_encode_data_update(int len, int k, int rows, int vec_i, unsigned char *g_tbls,
unsigned char *data, unsigned char **coding);
/**
* @brief Generate update for encode or decode of erasure codes from single source.
*
* Arch specific version of ec_encode_data_update() with same parameters.
* @requires SSE4.1
*/
void ec_encode_data_update_sse(int len, int k, int rows, int vec_i, unsigned char *g_tbls,
unsigned char *data, unsigned char **coding);
/**
* @brief Generate update for encode or decode of erasure codes from single source.
*
* Arch specific version of ec_encode_data_update() with same parameters.
* @requires AVX
*/
void ec_encode_data_update_avx(int len, int k, int rows, int vec_i, unsigned char *g_tbls,
unsigned char *data, unsigned char **coding);
/**
* @brief Generate update for encode or decode of erasure codes from single source.
*
* Arch specific version of ec_encode_data_update() with same parameters.
* @requires AVX2
*/
void ec_encode_data_update_avx2(int len, int k, int rows, int vec_i, unsigned char *g_tbls,
unsigned char *data, unsigned char **coding);
/**
* @brief Generate update for encode or decode of erasure codes from single source.
*
* Baseline version of ec_encode_data_update().
*/
void ec_encode_data_update_base(int len, int k, int rows, int vec_i, unsigned char *v,
unsigned char *data, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product.
*
* Does a GF(2^8) dot product across each byte of the input array and a constant
* set of coefficients to produce each byte of the output. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 32*vlen byte constant array based on the input coefficients.
* @requires SSE4.1
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 32*vlen byte array of pre-calculated constants based
* on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Pointer to destination data array.
* @returns none
*/
void gf_vect_dot_prod_sse(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char *dest);
/**
* @brief GF(2^8) vector dot product.
*
* Does a GF(2^8) dot product across each byte of the input array and a constant
* set of coefficients to produce each byte of the output. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 32*vlen byte constant array based on the input coefficients.
* @requires AVX
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 32*vlen byte array of pre-calculated constants based
* on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Pointer to destination data array.
* @returns none
*/
void gf_vect_dot_prod_avx(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char *dest);
/**
* @brief GF(2^8) vector dot product.
*
* Does a GF(2^8) dot product across each byte of the input array and a constant
* set of coefficients to produce each byte of the output. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 32*vlen byte constant array based on the input coefficients.
* @requires AVX2
*
* @param len Length of each vector in bytes. Must be >= 32.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 32*vlen byte array of pre-calculated constants based
* on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Pointer to destination data array.
* @returns none
*/
void gf_vect_dot_prod_avx2(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char *dest);
/**
* @brief GF(2^8) vector dot product with two outputs.
*
* Vector dot product optimized to calculate two ouputs at a time. Does two
* GF(2^8) dot products across each byte of the input array and two constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 2*32*vlen byte constant array based on the two sets of input coefficients.
* @requires SSE4.1
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 2*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_2vect_dot_prod_sse(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with two outputs.
*
* Vector dot product optimized to calculate two ouputs at a time. Does two
* GF(2^8) dot products across each byte of the input array and two constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 2*32*vlen byte constant array based on the two sets of input coefficients.
* @requires AVX
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 2*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_2vect_dot_prod_avx(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with two outputs.
*
* Vector dot product optimized to calculate two ouputs at a time. Does two
* GF(2^8) dot products across each byte of the input array and two constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 2*32*vlen byte constant array based on the two sets of input coefficients.
* @requires AVX2
*
* @param len Length of each vector in bytes. Must be >= 32.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 2*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_2vect_dot_prod_avx2(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with three outputs.
*
* Vector dot product optimized to calculate three ouputs at a time. Does three
* GF(2^8) dot products across each byte of the input array and three constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 3*32*vlen byte constant array based on the three sets of input coefficients.
* @requires SSE4.1
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 3*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_3vect_dot_prod_sse(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with three outputs.
*
* Vector dot product optimized to calculate three ouputs at a time. Does three
* GF(2^8) dot products across each byte of the input array and three constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 3*32*vlen byte constant array based on the three sets of input coefficients.
* @requires AVX
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 3*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_3vect_dot_prod_avx(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with three outputs.
*
* Vector dot product optimized to calculate three ouputs at a time. Does three
* GF(2^8) dot products across each byte of the input array and three constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 3*32*vlen byte constant array based on the three sets of input coefficients.
* @requires AVX2
*
* @param len Length of each vector in bytes. Must be >= 32.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 3*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_3vect_dot_prod_avx2(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with four outputs.
*
* Vector dot product optimized to calculate four ouputs at a time. Does four
* GF(2^8) dot products across each byte of the input array and four constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 4*32*vlen byte constant array based on the four sets of input coefficients.
* @requires SSE4.1
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 4*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_4vect_dot_prod_sse(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with four outputs.
*
* Vector dot product optimized to calculate four ouputs at a time. Does four
* GF(2^8) dot products across each byte of the input array and four constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 4*32*vlen byte constant array based on the four sets of input coefficients.
* @requires AVX
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 4*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_4vect_dot_prod_avx(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with four outputs.
*
* Vector dot product optimized to calculate four ouputs at a time. Does four
* GF(2^8) dot products across each byte of the input array and four constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 4*32*vlen byte constant array based on the four sets of input coefficients.
* @requires AVX2
*
* @param len Length of each vector in bytes. Must be >= 32.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 4*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_4vect_dot_prod_avx2(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with five outputs.
*
* Vector dot product optimized to calculate five ouputs at a time. Does five
* GF(2^8) dot products across each byte of the input array and five constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 5*32*vlen byte constant array based on the five sets of input coefficients.
* @requires SSE4.1
*
* @param len Length of each vector in bytes. Must >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 5*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_5vect_dot_prod_sse(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with five outputs.
*
* Vector dot product optimized to calculate five ouputs at a time. Does five
* GF(2^8) dot products across each byte of the input array and five constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 5*32*vlen byte constant array based on the five sets of input coefficients.
* @requires AVX
*
* @param len Length of each vector in bytes. Must >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 5*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_5vect_dot_prod_avx(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with five outputs.
*
* Vector dot product optimized to calculate five ouputs at a time. Does five
* GF(2^8) dot products across each byte of the input array and five constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 5*32*vlen byte constant array based on the five sets of input coefficients.
* @requires AVX2
*
* @param len Length of each vector in bytes. Must >= 32.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 5*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_5vect_dot_prod_avx2(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with six outputs.
*
* Vector dot product optimized to calculate six ouputs at a time. Does six
* GF(2^8) dot products across each byte of the input array and six constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 6*32*vlen byte constant array based on the six sets of input coefficients.
* @requires SSE4.1
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 6*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_6vect_dot_prod_sse(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with six outputs.
*
* Vector dot product optimized to calculate six ouputs at a time. Does six
* GF(2^8) dot products across each byte of the input array and six constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 6*32*vlen byte constant array based on the six sets of input coefficients.
* @requires AVX
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 6*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_6vect_dot_prod_avx(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product with six outputs.
*
* Vector dot product optimized to calculate six ouputs at a time. Does six
* GF(2^8) dot products across each byte of the input array and six constant
* sets of coefficients to produce each byte of the outputs. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 6*32*vlen byte constant array based on the six sets of input coefficients.
* @requires AVX2
*
* @param len Length of each vector in bytes. Must be >= 32.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 6*32*vlen byte array of pre-calculated constants
* based on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Array of pointers to destination data buffers.
* @returns none
*/
void gf_6vect_dot_prod_avx2(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char **dest);
/**
* @brief GF(2^8) vector dot product, runs baseline version.
*
* Does a GF(2^8) dot product across each byte of the input array and a constant
* set of coefficients to produce each byte of the output. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 32*vlen byte constant array based on the input coefficients.
*
* @param len Length of each vector in bytes. Must be >= 16.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 32*vlen byte array of pre-calculated constants based
* on the array of input coefficients. Only elements 32*CONST*j + 1
* of this array are used, where j = (0, 1, 2...) and CONST is the
* number of elements in the array of input coefficients. The
* elements used correspond to the original input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Pointer to destination data array.
* @returns none
*/
void gf_vect_dot_prod_base(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char *dest);
/**
* @brief GF(2^8) vector dot product, runs appropriate version.
*
* Does a GF(2^8) dot product across each byte of the input array and a constant
* set of coefficients to produce each byte of the output. Can be used for
* erasure coding encode and decode. Function requires pre-calculation of a
* 32*vlen byte constant array based on the input coefficients.
*
* This function determines what instruction sets are enabled and
* selects the appropriate version at runtime.
*
* @param len Length of each vector in bytes. Must be >= 32.
* @param vlen Number of vector sources.
* @param gftbls Pointer to 32*vlen byte array of pre-calculated constants based
* on the array of input coefficients.
* @param src Array of pointers to source inputs.
* @param dest Pointer to destination data array.
* @returns none
*/
void gf_vect_dot_prod(int len, int vlen, unsigned char *gftbls,
unsigned char **src, unsigned char *dest);
/**
* @brief GF(2^8) vector multiply accumulate, runs appropriate version.
*
* Does a GF(2^8) multiply across each byte of input source with expanded
* constant and add to destination array. Can be used for erasure coding encode
* and decode update when only one source is available at a time. Function
* requires pre-calculation of a 32*vec byte constant array based on the input
* coefficients.
*
* This function determines what instruction sets are enabled and selects the
* appropriate version at runtime.
*
* @param len Length of each vector in bytes. Must be >= 32.
* @param vec The number of vector sources or rows in the generator matrix
* for coding.
* @param vec_i The vector index corresponding to the single input source.
* @param gftbls Pointer to array of input tables generated from coding
* coefficients in ec_init_tables(). Must be of size 32*vec.
* @param src Array of pointers to source inputs.
* @param dest Pointer to destination data array.
* @returns none
*/
void gf_vect_mad(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char *dest);
/**
* @brief GF(2^8) vector multiply accumulate, arch specific version.
*
* Arch specific version of gf_vect_mad() with same parameters.
* @requires SSE4.1
*/
void gf_vect_mad_sse(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char *dest);
/**
* @brief GF(2^8) vector multiply accumulate, arch specific version.
*
* Arch specific version of gf_vect_mad() with same parameters.
* @requires AVX
*/
void gf_vect_mad_avx(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char *dest);
/**
* @brief GF(2^8) vector multiply accumulate, arch specific version.
*
* Arch specific version of gf_vect_mad() with same parameters.
* @requires AVX2
*/
void gf_vect_mad_avx2(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char *dest);
/**
* @brief GF(2^8) vector multiply accumulate, baseline version.
*
* Baseline version of gf_vect_mad() with same parameters.
*/
void gf_vect_mad_base(int len, int vec, int vec_i, unsigned char *v, unsigned char *src,
unsigned char *dest);
/**
* @brief GF(2^8) vector multiply with 2 accumulate. SSE version.
*
* Does a GF(2^8) multiply across each byte of input source with expanded
* constants and add to destination arrays. Can be used for erasure coding
* encode and decode update when only one source is available at a
* time. Function requires pre-calculation of a 32*vec byte constant array based
* on the input coefficients.
* @requires SSE4.1
*
* @param len Length of each vector in bytes. Must be >= 32.
* @param vec The number of vector sources or rows in the generator matrix
* for coding.
* @param vec_i The vector index corresponding to the single input source.
* @param gftbls Pointer to array of input tables generated from coding
* coefficients in ec_init_tables(). Must be of size 32*vec.
* @param src Pointer to source input array.
* @param dest Array of pointers to destination input/outputs.
* @returns none
*/
void gf_2vect_mad_sse(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 2 accumulate. AVX version of gf_2vect_mad_sse().
* @requires AVX
*/
void gf_2vect_mad_avx(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 2 accumulate. AVX2 version of gf_2vect_mad_sse().
* @requires AVX2
*/
void gf_2vect_mad_avx2(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 3 accumulate. SSE version.
*
* Does a GF(2^8) multiply across each byte of input source with expanded
* constants and add to destination arrays. Can be used for erasure coding
* encode and decode update when only one source is available at a
* time. Function requires pre-calculation of a 32*vec byte constant array based
* on the input coefficients.
* @requires SSE4.1
*
* @param len Length of each vector in bytes. Must be >= 32.
* @param vec The number of vector sources or rows in the generator matrix
* for coding.
* @param vec_i The vector index corresponding to the single input source.
* @param gftbls Pointer to array of input tables generated from coding
* coefficients in ec_init_tables(). Must be of size 32*vec.
* @param src Pointer to source input array.
* @param dest Array of pointers to destination input/outputs.
* @returns none
*/
void gf_3vect_mad_sse(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 3 accumulate. AVX version of gf_3vect_mad_sse().
* @requires AVX
*/
void gf_3vect_mad_avx(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 3 accumulate. AVX2 version of gf_3vect_mad_sse().
* @requires AVX2
*/
void gf_3vect_mad_avx2(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 4 accumulate. SSE version.
*
* Does a GF(2^8) multiply across each byte of input source with expanded
* constants and add to destination arrays. Can be used for erasure coding
* encode and decode update when only one source is available at a
* time. Function requires pre-calculation of a 32*vec byte constant array based
* on the input coefficients.
* @requires SSE4.1
*
* @param len Length of each vector in bytes. Must be >= 32.
* @param vec The number of vector sources or rows in the generator matrix
* for coding.
* @param vec_i The vector index corresponding to the single input source.
* @param gftbls Pointer to array of input tables generated from coding
* coefficients in ec_init_tables(). Must be of size 32*vec.
* @param src Pointer to source input array.
* @param dest Array of pointers to destination input/outputs.
* @returns none
*/
void gf_4vect_mad_sse(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 4 accumulate. AVX version of gf_4vect_mad_sse().
* @requires AVX
*/
void gf_4vect_mad_avx(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 4 accumulate. AVX2 version of gf_4vect_mad_sse().
* @requires AVX2
*/
void gf_4vect_mad_avx2(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 5 accumulate. SSE version.
* @requires SSE4.1
*/
void gf_5vect_mad_sse(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 5 accumulate. AVX version.
* @requires AVX
*/
void gf_5vect_mad_avx(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 5 accumulate. AVX2 version.
* @requires AVX2
*/
void gf_5vect_mad_avx2(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 6 accumulate. SSE version.
* @requires SSE4.1
*/
void gf_6vect_mad_sse(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 6 accumulate. AVX version.
* @requires AVX
*/
void gf_6vect_mad_avx(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**
* @brief GF(2^8) vector multiply with 6 accumulate. AVX2 version.
* @requires AVX2
*/
void gf_6vect_mad_avx2(int len, int vec, int vec_i, unsigned char *gftbls, unsigned char *src,
unsigned char **dest);
/**********************************************************************
* The remaining are lib support functions used in GF(2^8) operations.
*/
/**
* @brief Single element GF(2^8) multiply.
*
* @param a Multiplicand a
* @param b Multiplicand b
* @returns Product of a and b in GF(2^8)
*/
unsigned char gf_mul(unsigned char a, unsigned char b);
/**
* @brief Single element GF(2^8) inverse.
*
* @param a Input element
* @returns Field element b such that a x b = {1}
*/
unsigned char gf_inv(unsigned char a);
/**
* @brief Generate a matrix of coefficients to be used for encoding.
*
* Vandermonde matrix example of encoding coefficients where high portion of
* matrix is identity matrix I and lower portion is constructed as 2^{i*(j-k+1)}
* i:{0,k-1} j:{k,m-1}. Commonly used method for choosing coefficients in
* erasure encoding but does not guarantee invertable for every sub matrix. For
* large k it is possible to find cases where the decode matrix chosen from
* sources and parity not in erasure are not invertable. Users may want to
* adjust for k > 5.
*
* @param a [mxk] array to hold coefficients
* @param m number of rows in matrix corresponding to srcs + parity.
* @param k number of columns in matrix corresponding to srcs.
* @returns none
*/
void gf_gen_rs_matrix(unsigned char *a, int m, int k);
/**
* @brief Generate a Cauchy matrix of coefficients to be used for encoding.
*
* Cauchy matrix example of encoding coefficients where high portion of matrix
* is identity matrix I and lower portion is constructed as 1/(i + j) | i != j,
* i:{0,k-1} j:{k,m-1}. Any sub-matrix of a Cauchy matrix should be invertable.
*
* @param a [mxk] array to hold coefficients
* @param m number of rows in matrix corresponding to srcs + parity.
* @param k number of columns in matrix corresponding to srcs.
* @returns none
*/
void gf_gen_cauchy1_matrix(unsigned char *a, int m, int k);
/**
* @brief Invert a matrix in GF(2^8)
*
* @param in input matrix
* @param out output matrix such that [in] x [out] = [I] - identity matrix
* @param n size of matrix [nxn]
* @returns 0 successful, other fail on singular input matrix
*/
int gf_invert_matrix(unsigned char *in, unsigned char *out, const int n);
/*************************************************************/
#ifdef __cplusplus
}
#endif
#endif //_ERASURE_CODE_H_

View file

@ -1,267 +0,0 @@
/**********************************************************************
Copyright(c) 2011-2015 Intel Corporation All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**********************************************************************/
#include <limits.h>
#include "ec_code.h"
#include "ec_types.h"
void ec_init_tables(int k, int rows, unsigned char *a, unsigned char *g_tbls)
{
int i, j;
for (i = 0; i < rows; i++) {
for (j = 0; j < k; j++) {
gf_vect_mul_init(*a++, g_tbls);
g_tbls += 32;
}
}
}
void ec_encode_data_sse(int len, int k, int rows, unsigned char *g_tbls, unsigned char **data,
unsigned char **coding)
{
if (len < 16) {
ec_encode_data_base(len, k, rows, g_tbls, data, coding);
return;
}
while (rows >= 4) {
gf_4vect_dot_prod_sse(len, k, g_tbls, data, coding);
g_tbls += 4 * k * 32;
coding += 4;
rows -= 4;
}
switch (rows) {
case 3:
gf_3vect_dot_prod_sse(len, k, g_tbls, data, coding);
break;
case 2:
gf_2vect_dot_prod_sse(len, k, g_tbls, data, coding);
break;
case 1:
gf_vect_dot_prod_sse(len, k, g_tbls, data, *coding);
break;
case 0:
break;
}
}
void ec_encode_data_avx(int len, int k, int rows, unsigned char *g_tbls, unsigned char **data,
unsigned char **coding)
{
if (len < 16) {
ec_encode_data_base(len, k, rows, g_tbls, data, coding);
return;
}
while (rows >= 4) {
gf_4vect_dot_prod_avx(len, k, g_tbls, data, coding);
g_tbls += 4 * k * 32;
coding += 4;
rows -= 4;
}
switch (rows) {
case 3:
gf_3vect_dot_prod_avx(len, k, g_tbls, data, coding);
break;
case 2:
gf_2vect_dot_prod_avx(len, k, g_tbls, data, coding);
break;
case 1:
gf_vect_dot_prod_avx(len, k, g_tbls, data, *coding);
break;
case 0:
break;
}
}
void ec_encode_data_avx2(int len, int k, int rows, unsigned char *g_tbls, unsigned char **data,
unsigned char **coding)
{
if (len < 32) {
ec_encode_data_base(len, k, rows, g_tbls, data, coding);
return;
}
while (rows >= 4) {
gf_4vect_dot_prod_avx2(len, k, g_tbls, data, coding);
g_tbls += 4 * k * 32;
coding += 4;
rows -= 4;
}
switch (rows) {
case 3:
gf_3vect_dot_prod_avx2(len, k, g_tbls, data, coding);
break;
case 2:
gf_2vect_dot_prod_avx2(len, k, g_tbls, data, coding);
break;
case 1:
gf_vect_dot_prod_avx2(len, k, g_tbls, data, *coding);
break;
case 0:
break;
}
}
#if __WORDSIZE == 64 || _WIN64 || __x86_64__
void ec_encode_data_update_sse(int len, int k, int rows, int vec_i, unsigned char *g_tbls,
unsigned char *data, unsigned char **coding)
{
if (len < 16) {
ec_encode_data_update_base(len, k, rows, vec_i, g_tbls, data, coding);
return;
}
while (rows > 6) {
gf_6vect_mad_sse(len, k, vec_i, g_tbls, data, coding);
g_tbls += 6 * k * 32;
coding += 6;
rows -= 6;
}
switch (rows) {
case 6:
gf_6vect_mad_sse(len, k, vec_i, g_tbls, data, coding);
break;
case 5:
gf_5vect_mad_sse(len, k, vec_i, g_tbls, data, coding);
break;
case 4:
gf_4vect_mad_sse(len, k, vec_i, g_tbls, data, coding);
break;
case 3:
gf_3vect_mad_sse(len, k, vec_i, g_tbls, data, coding);
break;
case 2:
gf_2vect_mad_sse(len, k, vec_i, g_tbls, data, coding);
break;
case 1:
gf_vect_mad_sse(len, k, vec_i, g_tbls, data, *coding);
break;
case 0:
break;
}
}
void ec_encode_data_update_avx(int len, int k, int rows, int vec_i, unsigned char *g_tbls,
unsigned char *data, unsigned char **coding)
{
if (len < 16) {
ec_encode_data_update_base(len, k, rows, vec_i, g_tbls, data, coding);
return;
}
while (rows > 6) {
gf_6vect_mad_avx(len, k, vec_i, g_tbls, data, coding);
g_tbls += 6 * k * 32;
coding += 6;
rows -= 6;
}
switch (rows) {
case 6:
gf_6vect_mad_avx(len, k, vec_i, g_tbls, data, coding);
break;
case 5:
gf_5vect_mad_avx(len, k, vec_i, g_tbls, data, coding);
break;
case 4:
gf_4vect_mad_avx(len, k, vec_i, g_tbls, data, coding);
break;
case 3:
gf_3vect_mad_avx(len, k, vec_i, g_tbls, data, coding);
break;
case 2:
gf_2vect_mad_avx(len, k, vec_i, g_tbls, data, coding);
break;
case 1:
gf_vect_mad_avx(len, k, vec_i, g_tbls, data, *coding);
break;
case 0:
break;
}
}
void ec_encode_data_update_avx2(int len, int k, int rows, int vec_i, unsigned char *g_tbls,
unsigned char *data, unsigned char **coding)
{
if (len < 32) {
ec_encode_data_update_base(len, k, rows, vec_i, g_tbls, data, coding);
return;
}
while (rows > 6) {
gf_6vect_mad_avx2(len, k, vec_i, g_tbls, data, coding);
g_tbls += 6 * k * 32;
coding += 6;
rows -= 6;
}
switch (rows) {
case 6:
gf_6vect_mad_avx2(len, k, vec_i, g_tbls, data, coding);
break;
case 5:
gf_5vect_mad_avx2(len, k, vec_i, g_tbls, data, coding);
break;
case 4:
gf_4vect_mad_avx2(len, k, vec_i, g_tbls, data, coding);
break;
case 3:
gf_3vect_mad_avx2(len, k, vec_i, g_tbls, data, coding);
break;
case 2:
gf_2vect_mad_avx2(len, k, vec_i, g_tbls, data, coding);
break;
case 1:
gf_vect_mad_avx2(len, k, vec_i, g_tbls, data, *coding);
break;
case 0:
break;
}
}
#endif //__WORDSIZE == 64 || _WIN64 || __x86_64__
struct slver {
UINT16 snum;
UINT8 ver;
UINT8 core;
};
// Version info
struct slver ec_init_tables_slver_00010068;
struct slver ec_init_tables_slver = { 0x0068, 0x01, 0x00 };
struct slver ec_encode_data_sse_slver_00020069;
struct slver ec_encode_data_sse_slver = { 0x0069, 0x02, 0x00 };

View file

@ -1,39 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __COMMON_H__
#define __COMMON_H__
#include <stdint.h>
int32_t minio_init_encoder (int k, int m,
unsigned char **encode_matrix,
unsigned char **encode_tbls);
int32_t minio_init_decoder (int32_t *error_index,
int k, int n, int errs,
unsigned char *encoding_matrix,
unsigned char **decode_matrix,
unsigned char **decode_tbls,
uint32_t **decode_index);
int32_t minio_get_source_target (int errs, int k, int m,
int32_t *error_index,
uint32_t *decode_index,
unsigned char **buffs,
unsigned char ***source,
unsigned char ***target);
#endif /* __COMMON_H__ */

View file

@ -1,142 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "ec.h"
#include "ec_minio_common.h"
static
int32_t _minio_src_index_in_error (int r, int32_t *error_index, int errs)
{
int i;
for (i = 0; i < errs; i++) {
if (error_index[i] == r) {
// true
return 1;
}
}
// false
return 0;
}
// Separate out source data and target buffers
int32_t minio_get_source_target (int errs, int k, int m,
int32_t *error_index,
uint32_t *decode_index,
unsigned char **buffs,
unsigned char ***source,
unsigned char ***target)
{
int i;
unsigned char *tmp_source[k];
unsigned char *tmp_target[m];
if (k < 0 || m < 0) {
return -1;
}
memset (tmp_source, 0, k);
memset (tmp_target, 0, m);
for (i = 0; i < k; i++) {
tmp_source[i] = (unsigned char *) buffs[decode_index[i]];
}
for (i = 0; i < m; i++) {
if (i < errs)
tmp_target[i] = (unsigned char *) buffs[error_index[i]];
}
*source = tmp_source;
*target = tmp_target;
return 0;
}
/*
Generate decode matrix during the decoding phase
*/
int minio_init_decoder (int32_t *error_index,
int k, int n, int errs,
unsigned char *encode_matrix,
unsigned char **decode_matrix,
unsigned char **decode_tbls,
uint32_t **decode_index)
{
int i, j, r, l;
uint32_t *tmp_decode_index = (uint32_t *) malloc(sizeof(uint32_t) * k);
unsigned char *input_matrix;
unsigned char *inverse_matrix;
unsigned char *tmp_decode_matrix;
unsigned char *tmp_decode_tbls;
input_matrix = (unsigned char *) malloc(sizeof(unsigned char) * k * n);
inverse_matrix = (unsigned char *) malloc(sizeof(unsigned char) * k * n);
tmp_decode_matrix = (unsigned char *) malloc(sizeof(unsigned char) * k * n);;
tmp_decode_tbls = (unsigned char *) malloc(sizeof(unsigned char) * k * n * 32);
for (i = 0, r = 0; i < k; i++, r++) {
while (_minio_src_index_in_error(r, error_index, errs))
r++;
for (j = 0; j < k; j++) {
input_matrix[k * i + j] = encode_matrix[k * r + j];
}
tmp_decode_index[i] = r;
}
// Not all vandermonde matrix can be inverted
if (gf_invert_matrix(input_matrix, inverse_matrix, k) < 0) {
free(tmp_decode_matrix);
free(tmp_decode_tbls);
free(tmp_decode_index);
return -1;
}
for (l = 0; l < errs; l++) {
if (error_index[l] < k) {
// decoding matrix elements for data chunks
for (j = 0; j < k; j++) {
tmp_decode_matrix[k * l + j] =
inverse_matrix[k *
error_index[l] + j];
}
} else {
// decoding matrix element for coding chunks
for (i = 0; i < k; i++) {
unsigned char s = 0;
for (j = 0; j < k; j++) {
s ^= gf_mul(inverse_matrix[j * k + i],
encode_matrix[k *
error_index[l] + j]);
}
tmp_decode_matrix[k * l + i] = s;
}
}
}
ec_init_tables (k, errs, tmp_decode_matrix, tmp_decode_tbls);
*decode_matrix = tmp_decode_matrix;
*decode_tbls = tmp_decode_tbls;
*decode_index = tmp_decode_index;
return 0;
}

View file

@ -1,55 +0,0 @@
/*
* Minio Cloud Storage, (C) 2014 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include "ec.h"
#include "ec_minio_common.h"
/*
Generate encode matrix during the encoding phase
*/
int32_t minio_init_encoder (int k, int m, unsigned char **encode_matrix, unsigned char **encode_tbls)
{
unsigned char *tmp_matrix;
unsigned char *tmp_tbls;
tmp_matrix = (unsigned char *) malloc (k * (k + m));
tmp_tbls = (unsigned char *) malloc (k * (k + m) * 32);
if (k < 5) {
/*
Commonly used method for choosing coefficients in erasure
encoding but does not guarantee invertable for every sub
matrix. For large k it is possible to find cases where the
decode matrix chosen from sources and parity not in erasure
are not invertable. Users may want to adjust for k > 5.
-- Intel
*/
gf_gen_rs_matrix (tmp_matrix, k + m, k);
} else {
gf_gen_cauchy1_matrix (tmp_matrix, k + m, k);
}
ec_init_tables(k, m, &tmp_matrix[k * k], tmp_tbls);
*encode_matrix = tmp_matrix;
*encode_tbls = tmp_tbls;
return 0;
}

Some files were not shown because too many files have changed in this diff Show more