From 76d56c6ff2da346d70d847c20d2c8511ebb3fd7e Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 15 Aug 2016 02:44:48 -0700 Subject: [PATCH] typo: Fix typos across the codebase. (#2442) --- Makefile | 4 ++-- bucket-policy-handlers_test.go | 4 ++-- bucket-policy-parser_test.go | 6 +++--- dist/benchmark/benchcmp.sh | 2 +- docs/FreeBSD.md | 2 +- docs/erasure/README.md | 2 +- ...run-multiple-minio-server-instances-on-single-machine.md | 4 ++-- leak-detect_test.go | 2 +- object-api-listobjects_test.go | 2 +- object-api-multipart_test.go | 2 +- object-api-putobject_test.go | 2 +- pkg/quick/errorutil.go | 2 +- signature-v4-utils.go | 2 +- xl-v1-multipart.go | 2 +- 14 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Makefile b/Makefile index eb3b7d1bc..d49fb21d1 100644 --- a/Makefile +++ b/Makefile @@ -101,8 +101,8 @@ deadcode: @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/deadcode spelling: - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell *.go - @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell pkg/**/* + @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell -error * + @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell -error pkg/**/* test: build @echo "Running all minio testing:" diff --git a/bucket-policy-handlers_test.go b/bucket-policy-handlers_test.go index 156afc151..24ebe5ccf 100644 --- a/bucket-policy-handlers_test.go +++ b/bucket-policy-handlers_test.go @@ -118,7 +118,7 @@ func TestBucketPolicyActionMatch(t *testing.T) { {"s3:ListBucketMultipartUploads", getWriteOnlyBucketStatement(bucketName, objectPrefix), true}, // read-only bucket policy is expected to not allow ListBucketMultipartUploads operation on an anonymous request (Test case 9). // the allowed actions in read-only bucket statement are "s3:GetBucketLocation","s3:ListBucket", - // this shouldnot allow for ListBucketMultipartUploads operations. + // this should not allow for ListBucketMultipartUploads operations. {"s3:ListBucketMultipartUploads", getReadOnlyBucketStatement(bucketName, objectPrefix), false}, // Any of the object level policy will not allow for s3:ListBucketMultipartUploads (Test cases 10-12). @@ -136,7 +136,7 @@ func TestBucketPolicyActionMatch(t *testing.T) { {"s3:ListBucket", getReadWriteBucketStatement(bucketName, objectPrefix), true}, // write-only bucket policy is expected to not allow ListBucket operation on an anonymous request (Test case 15). // the allowed actions in write-only bucket statement are "s3:GetBucketLocation", "s3:ListBucketMultipartUploads", - // this shouldnot allow for ListBucket operations. + // this should not allow for ListBucket operations. {"s3:ListBucket", getWriteOnlyBucketStatement(bucketName, objectPrefix), false}, // Cases for testing ListBucket access for different Object level access permissions (Test cases 16-18). diff --git a/bucket-policy-parser_test.go b/bucket-policy-parser_test.go index 350416027..2c5670791 100644 --- a/bucket-policy-parser_test.go +++ b/bucket-policy-parser_test.go @@ -477,7 +477,7 @@ func TestCheckbucketPolicyResources(t *testing.T) { statements[0].Actions = []string{"s3:DeleteObject", "s3:PutObject"} return statements } - // contructing policy statement with recursive resources. + // contracting policy statement with recursive resources. // should result in ErrMalformedPolicy setRecurseResource := func(statements []policyStatement) []policyStatement { statements[0].Resources = []string{"arn:aws:s3:::minio-bucket/Asia/*", "arn:aws:s3:::minio-bucket/Asia/India/*"} @@ -512,7 +512,7 @@ func TestCheckbucketPolicyResources(t *testing.T) { // this results in return of ErrMalformedPolicy. {Version: "1.0", Statements: setValidPrefixActions(getWriteOnlyStatement("minio-bucket-fail", "Asia/India/"))}, // bucketPolicy - 6. - // contructing policy statement with recursive resources. + // contracting policy statement with recursive resources. // should result in ErrMalformedPolicy {Version: "1.0", Statements: setRecurseResource(setValidPrefixActions(getWriteOnlyStatement("minio-bucket", "")))}, // BucketPolciy - 7. @@ -544,7 +544,7 @@ func TestCheckbucketPolicyResources(t *testing.T) { // Resource prefix bucket part is not equal to the bucket name in this case. {bucketAccessPolicies[4], ErrMalformedPolicy, false}, // Test case - 6. - // contructing policy statement with recursive resources. + // contracting policy statement with recursive resources. // should result in ErrPolicyNesting. {bucketAccessPolicies[5], ErrPolicyNesting, false}, // Test case - 7. diff --git a/dist/benchmark/benchcmp.sh b/dist/benchmark/benchcmp.sh index d0e688c1f..7199dde88 100755 --- a/dist/benchmark/benchcmp.sh +++ b/dist/benchmark/benchcmp.sh @@ -50,7 +50,7 @@ if [ ! $# -eq 2 ] then # exit if commit SHA's are not provided. echo $# - echo "Need Commit SHA's of 2 snapshots to be supplied to run benchmark comparision." + echo "Need Commit SHA's of 2 snapshots to be supplied to run benchmark comparison." exit 1 fi diff --git a/docs/FreeBSD.md b/docs/FreeBSD.md index b9e70d92d..79aec5d87 100644 --- a/docs/FreeBSD.md +++ b/docs/FreeBSD.md @@ -62,7 +62,7 @@ Verify if it is writable ``` -Now you have successfully created a ZFS pool for futher reading please refer to [ZFS Quickstart Guide](https://www.freebsd.org/doc/handbook/zfs-quickstart.html) +Now you have successfully created a ZFS pool for further reading please refer to [ZFS Quickstart Guide](https://www.freebsd.org/doc/handbook/zfs-quickstart.html) However, this pool is not taking advantage of any ZFS features, so let's create a ZFS filesytem on this pool with compression enabled. ZFS supports many compression algorithms: lzjb, gzip, zle, lz4. LZ4 is often the most performant algorithm in terms of compression of data versus system overhead. diff --git a/docs/erasure/README.md b/docs/erasure/README.md index ffda3ad47..ad7f54808 100644 --- a/docs/erasure/README.md +++ b/docs/erasure/README.md @@ -12,7 +12,7 @@ Erasure code is a mathematical algorithm to reconstruct missing or corrupted dat ## Why is Erasure Code useful? -Erasure code protects data from multiple drives failure unlike RAID or replication. For eg RAID6 can protect against 2 drive failure whereas in Minio erasure code you can lose as many as half number of drives and still the data reamins safe. Further Minio's erasure code is at object level and can heal one object at a time. For RAID, healing can only be performed at volume level which translates into huge down time. As Minio encodes each object individually with a high parity count. Storage servers once deployed should not require drive replacement or healing for the lifetime of the server. Minio's erasure coded backend is designed for operational efficiency and takes full advantage of hardware acceleration whenever available. +Erasure code protects data from multiple drives failure unlike RAID or replication. For eg RAID6 can protect against 2 drive failure whereas in Minio erasure code you can lose as many as half number of drives and still the data remains safe. Further Minio's erasure code is at object level and can heal one object at a time. For RAID, healing can only be performed at volume level which translates into huge down time. As Minio encodes each object individually with a high parity count. Storage servers once deployed should not require drive replacement or healing for the lifetime of the server. Minio's erasure coded backend is designed for operational efficiency and takes full advantage of hardware acceleration whenever available. ![Erasure](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/erasure-code.jpg?raw=true) diff --git a/docs/how-to-run-multiple-minio-server-instances-on-single-machine.md b/docs/how-to-run-multiple-minio-server-instances-on-single-machine.md index e89e9710b..5cd9ed92e 100644 --- a/docs/how-to-run-multiple-minio-server-instances-on-single-machine.md +++ b/docs/how-to-run-multiple-minio-server-instances-on-single-machine.md @@ -1,9 +1,9 @@ # How to run multiple Minio server instances on single machine. [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![minio_MULTISERVER](https://github.com/minio/minio/blob/master/docs/screenshots/multiport.jpeg?raw=true) +![minio_MULTIVERSE](https://github.com/minio/minio/blob/master/docs/screenshots/multiport.jpeg?raw=true) -In this document we will illustrate how to set up multiple Minio server instances on single machine. These Minio servers are running on thier own port, data directory & configuration directory. +In this document we will illustrate how to set up multiple Minio server instances on single machine. These Minio servers are running on their own port, data directory & configuration directory. ## 1. Prerequisites diff --git a/leak-detect_test.go b/leak-detect_test.go index 617f19b0d..838d63ff1 100644 --- a/leak-detect_test.go +++ b/leak-detect_test.go @@ -37,7 +37,7 @@ func NewLeakDetect() LeakDetect { return snapshot } -// CompareCurrentSnapshot - Comapres the initial relevant stack trace with the current one (during the time of invocation). +// CompareCurrentSnapshot - Compares the initial relevant stack trace with the current one (during the time of invocation). func (initialSnapShot LeakDetect) CompareCurrentSnapshot() []string { var stackDiff []string for _, g := range pickRelevantGoroutines() { diff --git a/object-api-listobjects_test.go b/object-api-listobjects_test.go index f177a0812..13e14ec46 100644 --- a/object-api-listobjects_test.go +++ b/object-api-listobjects_test.go @@ -503,7 +503,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) { {"test-bucket-list-object", "Asia/India/", "", "", 10, resultCases[23], nil, true}, {"test-bucket-list-object", "Asia", "", "", 10, resultCases[24], nil, true}, // Tests with prefix and delimiter (55-57). - // With delimeter the code shouldnot recurse into the sub-directories of prefix Dir. + // With delimeter the code should not recurse into the sub-directories of prefix Dir. {"test-bucket-list-object", "Asia", "", "/", 10, resultCases[25], nil, true}, {"test-bucket-list-object", "new", "", "/", 10, resultCases[26], nil, true}, {"test-bucket-list-object", "Asia/India/", "", "/", 10, resultCases[27], nil, true}, diff --git a/object-api-multipart_test.go b/object-api-multipart_test.go index 828d0efa8..699225815 100644 --- a/object-api-multipart_test.go +++ b/object-api-multipart_test.go @@ -1876,7 +1876,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T } // Benchmarks for ObjectLayer.PutObjectPart(). -// The intent is to benchamrk PutObjectPart for various sizes ranging from few bytes to 100MB. +// The intent is to benchmark PutObjectPart for various sizes ranging from few bytes to 100MB. // Also each of these Benchmarks are run both XL and FS backends. // BenchmarkPutObjectPart5MbFS - Benchmark FS.PutObjectPart() for object size of 5MB. diff --git a/object-api-putobject_test.go b/object-api-putobject_test.go index 694338063..0bb3bf12a 100644 --- a/object-api-putobject_test.go +++ b/object-api-putobject_test.go @@ -395,7 +395,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str } // Benchmarks for ObjectLayer.PutObject(). -// The intent is to benchamrk PutObject for various sizes ranging from few bytes to 100MB. +// The intent is to benchmark PutObject for various sizes ranging from few bytes to 100MB. // Also each of these Benchmarks are run both XL and FS backends. // BenchmarkPutObjectVerySmallFS - Benchmark FS.PutObject() for object size of 10 bytes. diff --git a/pkg/quick/errorutil.go b/pkg/quick/errorutil.go index ad2ac70ca..b6bb92edf 100644 --- a/pkg/quick/errorutil.go +++ b/pkg/quick/errorutil.go @@ -49,7 +49,7 @@ func FormatJSONSyntaxError(data io.Reader, sErr *json.SyntaxError) error { termWidth := 25 // errorShift is the length of the minimum needed place for - // error msg accessoires, like <--, etc.. We calculate it + // error msg accessories, like <--, etc.. We calculate it // dynamically to avoid an eventual bug after modifying errorFmt errorShift := len(fmt.Sprintf(errorFmt, 1, "")) diff --git a/signature-v4-utils.go b/signature-v4-utils.go index 901774c13..d12034a51 100644 --- a/signature-v4-utils.go +++ b/signature-v4-utils.go @@ -143,7 +143,7 @@ func extractSignedHeaders(signedHeaders []string, reqHeaders http.Header) (http. continue } // the "host" field will not be found in the header map, it can be found in req.Host. - // but its necessary to make sure that the "host" field exists in the list of signed paramaters, + // but its necessary to make sure that the "host" field exists in the list of signed parameters, // the check is done above. if header == "host" { continue diff --git a/xl-v1-multipart.go b/xl-v1-multipart.go index 089e1aec0..ec066dbc4 100644 --- a/xl-v1-multipart.go +++ b/xl-v1-multipart.go @@ -722,7 +722,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload // This lock also protects the cache namespace. nsMutex.Unlock(bucket, object) - // Prefetch the object from disk by triggerring a fake GetObject call + // Prefetch the object from disk by triggering a fake GetObject call // Unlike a regular single PutObject, multipart PutObject is comes in // stages and it is harder to cache. go xl.GetObject(bucket, object, 0, objectSize, ioutil.Discard)