fix: remove all unused code (#12360)

This commit is contained in:
Harshavardhana 2021-05-24 09:28:19 -07:00 committed by GitHub
parent 41e9c6572f
commit ebf75ef10d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 29 additions and 497 deletions

View File

@ -19,6 +19,9 @@ linters:
- structcheck
- gomodguard
- gofmt
- unused
- structcheck
- unconvert
issues:
exclude-use-default: false
@ -26,11 +29,5 @@ issues:
- should have a package comment
- error strings should not be capitalized or end with punctuation or a newline
run:
skip-dirs:
- pkg/rpc
- pkg/argon2
- pkg/s3select/internal
service:
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly

View File

@ -486,22 +486,6 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
}
}
// resetHealStatusCounters - reset the healSequence status counters between
// each monthly background heal scanning activity.
// This is used only in case of Background healing scenario, where
// we use a single long running healSequence which reactively heals
// objects passed to the SourceCh.
func (h *healSequence) resetHealStatusCounters() {
h.mutex.Lock()
defer h.mutex.Unlock()
h.currentStatus.Items = []madmin.HealResultItem{}
h.lastSentResultIndex = 0
h.scannedItemsMap = make(map[madmin.HealItemType]int64)
h.healedItemsMap = make(map[madmin.HealItemType]int64)
h.healFailedItemsMap = make(map[string]int64)
}
// getScannedItemsCount - returns a count of all scanned items
func (h *healSequence) getScannedItemsCount() int64 {
var count int64

View File

@ -31,7 +31,7 @@ func TestNewRequestID(t *testing.T) {
var e rune
for _, char := range id {
e = rune(char)
e = char
// Ensure that it is alphanumeric, in this case, between 0-9 and A-Z.
if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) {

View File

@ -111,7 +111,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
} else {
textPartData = textData[j*partSize:]
}
md5hex := getMD5Hash([]byte(textPartData))
md5hex := getMD5Hash(textPartData)
var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
@ -206,7 +206,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
md5hex := getMD5Hash([]byte(textData))
md5hex := getMD5Hash(textData)
sha256hex := ""
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.

View File

@ -31,14 +31,6 @@ import (
"github.com/minio/minio/pkg/ioutil"
)
type errHashMismatch struct {
message string
}
func (err *errHashMismatch) Error() string {
return err.message
}
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow io.WriteCloser

View File

@ -352,7 +352,7 @@ func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTar
return tgt.Arn
}
}
if !madmin.ServiceType(target.Type).IsValid() {
if !target.Type.IsValid() {
return ""
}
return generateARN(target)

View File

@ -137,7 +137,7 @@ func (c *CoreDNS) list(key string, domain bool) ([]SrvRecord, error) {
var srvRecords []SrvRecord
for _, n := range r.Kvs {
var srvRecord SrvRecord
if err = json.Unmarshal([]byte(n.Value), &srvRecord); err != nil {
if err = json.Unmarshal(n.Value, &srvRecord); err != nil {
return nil, err
}
srvRecord.Key = strings.TrimPrefix(string(n.Key), key)

View File

@ -218,12 +218,6 @@ func (h dataUsageHash) mod(cycle uint32, cycles uint32) bool {
return uint32(xxhash.Sum64String(string(h)))%cycles == cycle%cycles
}
// addChildString will add a child based on its name.
// If it already exists it will not be added again.
func (e *dataUsageEntry) addChildString(name string) {
e.addChild(hashPath(name))
}
// addChild will add a child based on its hash.
// If it already exists it will not be added again.
func (e *dataUsageEntry) addChild(hash dataUsageHash) {
@ -291,17 +285,6 @@ func (d *dataUsageCache) searchParent(h dataUsageHash) *dataUsageHash {
return nil
}
// Returns nil if not found.
func (d *dataUsageCache) subCache(path string) dataUsageCache {
dst := dataUsageCache{Info: dataUsageCacheInfo{
Name: path,
LastUpdate: d.Info.LastUpdate,
BloomFilter: d.Info.BloomFilter,
}}
dst.copyWithChildren(d, dataUsageHash(hashPath(path).Key()), nil)
return dst
}
// deleteRecursive will delete an entry recursively, but not change its parent.
func (d *dataUsageCache) deleteRecursive(h dataUsageHash) {
if existing, ok := d.Cache[h.String()]; ok {
@ -313,37 +296,6 @@ func (d *dataUsageCache) deleteRecursive(h dataUsageHash) {
}
}
// deleteChildren will delete any children, but not the entry itself.
func (d *dataUsageCache) deleteChildren(h dataUsageHash) {
if existing, ok := d.Cache[h.String()]; ok {
for child := range existing.Children {
d.deleteRecursive(dataUsageHash(child))
}
}
}
// replaceRootChild will replace the child of root in d with the root of 'other'.
func (d *dataUsageCache) replaceRootChild(other dataUsageCache) {
otherRoot := other.root()
if otherRoot == nil {
logger.LogIf(GlobalContext, errors.New("replaceRootChild: Source has no root"))
return
}
thisRoot := d.root()
if thisRoot == nil {
logger.LogIf(GlobalContext, errors.New("replaceRootChild: Root of current not found"))
return
}
thisRootHash := d.rootHash()
otherRootHash := other.rootHash()
if thisRootHash == otherRootHash {
logger.LogIf(GlobalContext, errors.New("replaceRootChild: Root of child matches root of destination"))
return
}
d.deleteRecursive(other.rootHash())
d.copyWithChildren(&other, other.rootHash(), &thisRootHash)
}
// keepBuckets will keep only the buckets specified specified by delete all others.
func (d *dataUsageCache) keepBuckets(b []BucketInfo) {
lu := make(map[dataUsageHash]struct{})
@ -415,16 +367,6 @@ func (d *dataUsageCache) replace(path, parent string, e dataUsageEntry) {
}
}
// listCache will return all cache paths.
func (d *dataUsageCache) listCache() []string {
dst := make([]string, 0, len(d.Cache))
for k := range d.Cache {
dst = append(dst, k)
}
sort.Strings(dst)
return dst
}
// replaceHashed add or replaces an entry to the cache based on its hash.
// If a parent is specified it will be added to that if not already there.
// If the parent does not exist, it will be added.

View File

@ -80,24 +80,6 @@ func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
return newDisks
}
// getLoadBalancedNDisks - fetches load balanced (sufficiently randomized) disk slice
// with N disks online. If ndisks is zero or negative, then it will returns all disks,
// same if ndisks is greater than the number of all disks.
func (er erasureObjects) getLoadBalancedNDisks(ndisks int) (newDisks []StorageAPI) {
disks := er.getLoadBalancedDisks(ndisks != -1)
for _, disk := range disks {
if disk == nil {
continue
}
newDisks = append(newDisks, disk)
ndisks--
if ndisks == 0 {
break
}
}
return
}
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
// ensures to skip disks if they are not healing and online.
func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI {

View File

@ -801,82 +801,6 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
return objReaderFn(reader, h, opts.CheckPrecondFn, closeFn, rwPoolUnlocker, nsUnlocker)
}
// getObject - wrapper for GetObject
func (fs *FSObjects) getObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, lock bool) (err error) {
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
return toObjectErr(err, bucket)
}
// Offset cannot be negative.
if offset < 0 {
logger.LogIf(ctx, errUnexpected, logger.Application)
return toObjectErr(errUnexpected, bucket, object)
}
// Writer cannot be nil.
if writer == nil {
logger.LogIf(ctx, errUnexpected, logger.Application)
return toObjectErr(errUnexpected, bucket, object)
}
// If its a directory request, we return an empty body.
if HasSuffix(object, SlashSeparator) {
_, err = writer.Write([]byte(""))
logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
}
if bucket != minioMetaBucket {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
if lock {
_, err = fs.rwPool.Open(fsMetaPath)
if err != nil && err != errFileNotFound {
logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
}
defer fs.rwPool.Close(fsMetaPath)
}
}
if etag != "" && etag != defaultEtag {
objEtag, perr := fs.getObjectETag(ctx, bucket, object, lock)
if perr != nil {
return toObjectErr(perr, bucket, object)
}
if objEtag != etag {
logger.LogIf(ctx, InvalidETag{}, logger.Application)
return toObjectErr(InvalidETag{}, bucket, object)
}
}
// Read the object, doesn't exist returns an s3 compatible error.
fsObjPath := pathJoin(fs.fsPath, bucket, object)
reader, size, err := fsOpenFile(ctx, fsObjPath, offset)
if err != nil {
return toObjectErr(err, bucket, object)
}
defer reader.Close()
// For negative length we read everything.
if length < 0 {
length = size - offset
}
// Reply back invalid range if the input offset and length fall out of range.
if offset > size || offset+length > size {
err = InvalidRange{offset, length, size}
logger.LogIf(ctx, err, logger.Application)
return err
}
_, err = io.Copy(writer, io.LimitReader(reader, length))
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
if err == io.ErrClosedPipe {
err = nil
}
return toObjectErr(err, bucket, object)
}
// Create a new fs.json file, if the existing one is corrupt. Should happen very rarely.
func (fs *FSObjects) createFsJSON(object, fsMetaPath string) error {
fsMeta := newFSMetaV1()
@ -1377,77 +1301,6 @@ func (fs *FSObjects) isObjectDir(bucket, prefix string) bool {
return len(entries) == 0
}
// getObjectETag is a helper function, which returns only the md5sum
// of the file on the disk.
func (fs *FSObjects) getObjectETag(ctx context.Context, bucket, entry string, lock bool) (string, error) {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, entry, fs.metaJSONFile)
var reader io.Reader
var fi os.FileInfo
var size int64
if lock {
// Read `fs.json` to perhaps contend with
// parallel Put() operations.
rlk, err := fs.rwPool.Open(fsMetaPath)
// Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound {
logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
}
// If file is not found, we don't need to proceed forward.
if err == errFileNotFound {
return "", nil
}
// Read from fs metadata only if it exists.
defer fs.rwPool.Close(fsMetaPath)
// Fetch the size of the underlying file.
fi, err = rlk.LockedFile.Stat()
if err != nil {
logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
}
size = fi.Size()
reader = io.NewSectionReader(rlk.LockedFile, 0, fi.Size())
} else {
var err error
reader, size, err = fsOpenFile(ctx, fsMetaPath, 0)
if err != nil {
return "", toObjectErr(err, bucket, entry)
}
}
// `fs.json` can be empty due to previously failed
// PutObject() transaction, if we arrive at such
// a situation we just ignore and continue.
if size == 0 {
return "", nil
}
fsMetaBuf, err := ioutil.ReadAll(reader)
if err != nil {
logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
}
var fsMeta fsMetaV1
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(fsMetaBuf, &fsMeta); err != nil {
return "", err
}
// Check if FS metadata is valid, if not return error.
if !isFSMetaValid(fsMeta.Version) {
logger.LogIf(ctx, errCorruptedFormat)
return "", toObjectErr(errCorruptedFormat, bucket, entry)
}
return extractETag(fsMeta.Meta), nil
}
// ListObjectVersions not implemented for FS mode.
func (fs *FSObjects) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, e error) {
return loi, NotImplemented{}

View File

@ -215,9 +215,6 @@ var (
// Hold the old server credentials passed by the environment
globalOldCred auth.Credentials
// Indicates if config is to be encrypted
globalConfigEncrypted bool
globalPublicCerts []*x509.Certificate
globalDomainNames []string // Root domains for virtual host style requests

View File

@ -218,7 +218,7 @@ func Trace(f http.HandlerFunc, logBody bool, w http.ResponseWriter, r *http.Requ
Time: now,
Proto: r.Proto,
Method: r.Method,
Path: r.URL.Path,
Path: r.URL.RawPath,
RawQuery: redactLDAPPwd(r.URL.RawQuery),
Client: handlers.GetSourceIP(r),
Headers: reqHeaders,

View File

@ -57,7 +57,7 @@ func etcdKvsToSet(prefix string, kvs []*mvccpb.KeyValue) set.StringSet {
// suffix := "config.json"
// result is foo
func extractPathPrefixAndSuffix(s string, prefix string, suffix string) string {
return pathClean(strings.TrimSuffix(strings.TrimPrefix(string(s), prefix), suffix))
return pathClean(strings.TrimSuffix(strings.TrimPrefix(s, prefix), suffix))
}
// IAMEtcdStore implements IAMStorageAPI
@ -331,7 +331,7 @@ func (ies *IAMEtcdStore) addUser(ctx context.Context, user string, userType IAMU
return []byte(globalOldCred.SecretKey), nil
}
if _, err := jwtgo.ParseWithClaims(u.Credentials.SessionToken, m, stsTokenCallback); err == nil {
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims(m))
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, m)
if token, err := jwt.SignedString([]byte(globalActiveCred.SecretKey)); err == nil {
u.Credentials.SessionToken = token
err := ies.saveIAMConfig(ctx, &u, getUserIdentityPath(user, userType))

View File

@ -386,23 +386,6 @@ func interestingCaches(root string, cachesRoot map[string][]string) []string {
return interesting
}
// updateCache will update a cache by id.
// If the cache cannot be found nil is returned.
// The bucket cache will be locked until the done .
func (b *bucketMetacache) updateCache(id string) (cache *metacache, done func()) {
b.mu.Lock()
c, ok := b.caches[id]
if !ok {
b.mu.Unlock()
return nil, func() {}
}
return &c, func() {
c.lastUpdate = UTCNow()
b.caches[id] = c
b.mu.Unlock()
}
}
// updateCacheEntry will update a cache.
// Returns the updated status.
func (b *bucketMetacache) updateCacheEntry(update metacache) (metacache, error) {
@ -437,18 +420,6 @@ func (b *bucketMetacache) cloneCaches() (map[string]metacache, map[string][]stri
return dst, dst2
}
// getCache will return a clone of a specific metacache.
// Will return nil if the cache doesn't exist.
func (b *bucketMetacache) getCache(id string) *metacache {
b.mu.RLock()
c, ok := b.caches[id]
b.mu.RUnlock()
if !ok {
return nil
}
return &c
}
// deleteAll will delete all on disk data for ALL caches.
// Deletes are performed concurrently.
func (b *bucketMetacache) deleteAll() {

View File

@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"io"
"os"
"sort"
"strings"
@ -275,16 +274,6 @@ type metaCacheEntriesSorted struct {
listID string
}
// writeTo will write all objects to the provided output.
func (m metaCacheEntriesSorted) writeTo(writer io.Writer) error {
w := newMetacacheWriter(writer, 1<<20)
if err := w.write(m.o...); err != nil {
w.Close()
return err
}
return w.Close()
}
// shallowClone will create a shallow clone of the array objects,
// but object metadata will not be cloned.
func (m metaCacheEntriesSorted) shallowClone() metaCacheEntriesSorted {
@ -293,19 +282,6 @@ func (m metaCacheEntriesSorted) shallowClone() metaCacheEntriesSorted {
return m
}
// iterate the entries in order.
// If the iterator function returns iterating stops.
func (m *metaCacheEntriesSorted) iterate(fn func(entry metaCacheEntry) (cont bool)) {
if m == nil {
return
}
for _, o := range m.o {
if !fn(o) {
return
}
}
}
// fileInfoVersions converts the metadata to FileInfoVersions where possible.
// Metadata that cannot be decoded is skipped.
func (m *metaCacheEntriesSorted) fileInfoVersions(bucket, prefix, delimiter, afterV string) (versions []ObjectInfo) {
@ -488,17 +464,6 @@ func (m *metaCacheEntriesSorted) merge(other metaCacheEntriesSorted, limit int)
m.o = merged
}
// filter allows selective filtering with the provided function.
func (m *metaCacheEntriesSorted) filter(fn func(entry *metaCacheEntry) bool) {
dst := m.o[:0]
for _, o := range m.o {
if fn(&o) {
dst = append(dst, o)
}
}
m.o = dst
}
// filterPrefix will filter m to only contain entries with the specified prefix.
func (m *metaCacheEntriesSorted) filterPrefix(s string) {
if s == "" {

View File

@ -952,7 +952,7 @@ func getMinioProcMetrics() MetricsGroup {
metrics = append(metrics,
Metric{
Description: getMinIOProcessCPUTime(),
Value: float64(stat.CPUTime()),
Value: stat.CPUTime(),
})
return
},

View File

@ -181,7 +181,7 @@ func healingMetricsPrometheus(ch chan<- prometheus.Metric) {
"Objects for which healing failed in current self healing run",
[]string{"mount_path", "volume_status"}, nil),
prometheus.GaugeValue,
float64(v), string(s[0]), string(s[1]),
float64(v), s[0], s[1],
)
}
}

View File

@ -498,75 +498,6 @@ func (sys *NotificationSys) updateBloomFilter(ctx context.Context, current uint6
return bf, nil
}
// collectBloomFilter will collect bloom filters from all servers from the specified cycle.
func (sys *NotificationSys) collectBloomFilter(ctx context.Context, from uint64) (*bloomFilter, error) {
var req = bloomFilterRequest{
Current: 0,
Oldest: from,
}
// Load initial state from local...
var bf *bloomFilter
bfr, err := intDataUpdateTracker.cycleFilter(ctx, req)
logger.LogIf(ctx, err)
if err == nil && bfr.Complete {
nbf := intDataUpdateTracker.newBloomFilter()
bf = &nbf
_, err = bf.ReadFrom(bytes.NewReader(bfr.Filter))
logger.LogIf(ctx, err)
}
if !bfr.Complete {
// If local isn't complete just return early
return nil, nil
}
var mu sync.Mutex
g := errgroup.WithNErrs(len(sys.peerClients))
for idx, client := range sys.peerClients {
if client == nil {
continue
}
client := client
g.Go(func() error {
serverBF, err := client.cycleServerBloomFilter(ctx, req)
if false && intDataUpdateTracker.debug {
b, _ := json.MarshalIndent(serverBF, "", " ")
logger.Info("Disk %v, Bloom filter: %v", client.host.Name, string(b))
}
// Keep lock while checking result.
mu.Lock()
defer mu.Unlock()
if err != nil || !serverBF.Complete || bf == nil {
logger.LogIf(ctx, err)
bf = nil
return nil
}
var tmp bloom.BloomFilter
_, err = tmp.ReadFrom(bytes.NewReader(serverBF.Filter))
if err != nil {
logger.LogIf(ctx, err)
bf = nil
return nil
}
if bf.BloomFilter == nil {
bf.BloomFilter = &tmp
} else {
err = bf.Merge(&tmp)
if err != nil {
logger.LogIf(ctx, err)
bf = nil
return nil
}
}
return nil
}, idx)
}
g.Wait()
return bf, nil
}
// findEarliestCleanBloomFilter will find the earliest bloom filter across the cluster
// where the directory is clean.
// Due to how objects are stored this can include object names.

View File

@ -33,7 +33,7 @@ import (
)
func md5Header(data []byte) map[string]string {
return map[string]string{"etag": getMD5Hash([]byte(data))}
return map[string]string{"etag": getMD5Hash(data)}
}
// Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup.

View File

@ -119,12 +119,12 @@ func checkAssumeRoleAuth(ctx context.Context, r *http.Request) (user auth.Creden
return user, true, ErrSTSAccessDenied
case authTypeSigned:
s3Err := isReqAuthenticated(ctx, r, globalServerRegion, serviceSTS)
if APIErrorCode(s3Err) != ErrNone {
if s3Err != ErrNone {
return user, false, STSErrorCode(s3Err)
}
user, _, s3Err = getReqAccessKeyV4(r, globalServerRegion, serviceSTS)
if APIErrorCode(s3Err) != ErrNone {
if s3Err != ErrNone {
return user, false, STSErrorCode(s3Err)
}

View File

@ -78,8 +78,6 @@ func TestMain(m *testing.M) {
SecretKey: auth.DefaultSecretKey,
}
globalConfigEncrypted = true
// disable ENVs which interfere with tests.
for _, env := range []string{
crypto.EnvKMSAutoEncryption,
@ -1278,35 +1276,6 @@ func getRandomBucketName() string {
}
// NewEOFWriter returns a Writer that writes to w,
// but returns EOF error after writing n bytes.
func NewEOFWriter(w io.Writer, n int64) io.Writer {
return &EOFWriter{w, n}
}
type EOFWriter struct {
w io.Writer
n int64
}
// io.Writer implementation designed to error out with io.EOF after reading `n` bytes.
func (t *EOFWriter) Write(p []byte) (n int, err error) {
if t.n <= 0 {
return -1, io.EOF
}
// real write
n = len(p)
if int64(n) > t.n {
n = int(t.n)
}
n, err = t.w.Write(p[0:n])
t.n -= int64(n)
if err == nil {
n = len(p)
}
return
}
// construct URL for http requests for bucket operations.
func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string {
urlStr := endPoint + SlashSeparator

View File

@ -211,6 +211,9 @@ type xlMetaV2Version struct {
// Valid xl meta xlMetaV2Version is valid
func (j xlMetaV2Version) Valid() bool {
if !j.Type.valid() {
return false
}
switch j.Type {
case LegacyType:
return j.ObjectV1 != nil &&

View File

@ -758,25 +758,6 @@ func (s *xlStorage) DeleteVol(ctx context.Context, volume string, forceDelete bo
return nil
}
func (s *xlStorage) isLeaf(volume string, leafPath string) bool {
volumeDir, err := s.getVolDir(volume)
if err != nil {
return false
}
if err = Access(pathJoin(volumeDir, leafPath, xlStorageFormatFile)); err == nil {
return true
}
if osIsNotExist(err) {
// We need a fallback code where directory might contain
// legacy `xl.json`, in such situation we just rename
// and proceed if rename is successful we know that it
// is the leaf since `xl.json` was present.
return s.renameLegacyMetadata(volumeDir, leafPath) == nil
}
return false
}
// ListDir - return all the entries at the given directory path.
// If an entry is a directory it will be returned with a trailing SlashSeparator.
func (s *xlStorage) ListDir(ctx context.Context, volume, dirPath string, count int) (entries []string, err error) {

View File

@ -135,12 +135,12 @@ func TestMarshalLifecycleConfig(t *testing.T) {
{
Status: "Enabled",
Filter: Filter{Prefix: Prefix{string: "prefix-1", set: true}},
Expiration: Expiration{Date: ExpirationDate(midnightTS)},
Expiration: Expiration{Date: midnightTS},
},
{
Status: "Enabled",
Filter: Filter{Prefix: Prefix{string: "prefix-1", set: true}},
Expiration: Expiration{Date: ExpirationDate(midnightTS)},
Expiration: Expiration{Date: midnightTS},
NoncurrentVersionTransition: NoncurrentVersionTransition{NoncurrentDays: 2, StorageClass: "TEST"},
},
},

View File

@ -65,7 +65,7 @@ func getNewUUID() (string, error) {
// validateID - checks if ID is valid or not.
func (r Rule) validateID() error {
IDLen := len(string(r.ID))
IDLen := len(r.ID)
// generate new ID when not provided
// cannot be longer than 255 characters
if IDLen == 0 {

View File

@ -33,10 +33,11 @@ func GetInfo(path string) (info Info, err error) {
}
reservedBlocks := s.Bfree - s.Bavail
info = Info{
Total: uint64(s.Frsize) * (s.Blocks - reservedBlocks),
Free: uint64(s.Frsize) * s.Bavail,
Files: s.Files,
Ffree: s.Ffree,
Total: uint64(s.Frsize) * (s.Blocks - reservedBlocks),
Free: uint64(s.Frsize) * s.Bavail,
Files: s.Files,
Ffree: s.Ffree,
//nolint:unconvert
FSType: getFSType(int64(s.Type)),
}
// Check for overflows.

View File

@ -65,7 +65,7 @@ func (e *FuncExpr) getFunctionName() FuncName {
case e.SFunc != nil:
return FuncName(strings.ToUpper(e.SFunc.FunctionName))
case e.Count != nil:
return FuncName(aggFnCount)
return aggFnCount
case e.Cast != nil:
return sqlFnCast
case e.Substring != nil:

View File

@ -113,43 +113,6 @@ type nvmeIdentController struct {
Vs [1024]byte // Vendor Specific
} // 4096 bytes
type nvmeLBAF struct {
Ms uint16
Ds uint8
Rp uint8
}
//nolint:deadcode
type nvmeIdentNamespace struct {
Nsze uint64
Ncap uint64
Nuse uint64
Nsfeat uint8
Nlbaf uint8
Flbas uint8
Mc uint8
Dpc uint8
Dps uint8
Nmic uint8
Rescap uint8
Fpi uint8
Rsvd33 uint8
Nawun uint16
Nawupf uint16
Nacwu uint16
Nabsn uint16
Nabo uint16
Nabspf uint16
Rsvd46 [2]byte
Nvmcap [16]byte
Rsvd64 [40]byte
Nguid [16]byte
EUI64 [8]byte
Lbaf [16]nvmeLBAF
Rsvd192 [192]byte
Vs [3712]byte
} // 4096 bytes
//nolint:deadcode
type nvmeSMARTLog struct {
CritWarning uint8

View File

@ -70,6 +70,7 @@ func getSysinfoMemoryLimit() (limit uint64, err error) {
// Total RAM is always the multiplicative value
// of unit size and total ram.
//nolint:unconvert
return uint64(unit) * uint64(totalRAM), nil
}