Remove stale entry spurious logging (#7663)

The problem in current code was we were removing
an entry from a lock lockerMap without considering
the fact that different entry for same resource is
a possibility due the nature of locks that can be
acquired in parallel before we decide if the lock
is considered stale

A sequence of events is as follows

 - Lock("resource")
 - lockMaintenance(finds a long lived lock in this "resource")
 - Owner node rebooted which now retruns Expired() as true for
   this "resource"
 - Unlock("resource") which succeeded in quorum
 - Now by this time application retried and acquired a new
   Lock() on the same "resource"
 - Now that we have Expired() true from the previous call,
   we proceed to purge the entry from the local lockMap()
   local lockMap reports a different entry for the expired
   UID which results in a spurious log entry.

This PR removes this logging as this situation is an
expected scenario.
This commit is contained in:
Harshavardhana 2019-05-22 12:21:36 -07:00 committed by kannappanr
parent 59e847aebe
commit 59e1d94770
2 changed files with 15 additions and 24 deletions

View file

@ -17,12 +17,8 @@
package cmd
import (
"context"
"errors"
"path"
"time"
"github.com/minio/minio/cmd/logger"
)
const lockRESTVersion = "v1"
@ -53,28 +49,22 @@ type lockResponse struct {
func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
// Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry)
if lri, ok := l.lockMap[nlrip.name]; ok {
if !l.removeEntry(nlrip.name, nlrip.lri.UID, &lri) {
// Remove failed, in case it is a:
if nlrip.lri.Writer {
// Writer: this should never happen as the whole (mapped) entry should have been deleted
reqInfo := (&logger.ReqInfo{}).AppendTags("name", nlrip.name)
reqInfo.AppendTags("uid", nlrip.lri.UID)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, errors.New("Lock maintenance failed to remove entry for write lock (should never happen)"))
} // Reader: this can happen if multiple read locks were active and
// the one we are looking for has been released concurrently (so it is fine).
} // Removal went okay, all is fine.
// Even if the entry exists, it may not be the same entry which was
// considered as expired, so we simply an attempt to remove it if its
// not possible there is nothing we need to do.
l.removeEntry(nlrip.name, nlrip.lri.UID, &lri)
}
}
// removeEntry either, based on the uid of the lock message, removes a single entry from the
// lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock)
// removeEntry based on the uid of the lock message, removes a single entry from the
// lockRequesterInfo array or the whole array from the map (in case of a write lock
// or last read lock)
func (l *localLocker) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
// Find correct entry to remove based on uid.
for index, entry := range *lri {
if entry.UID == uid {
if len(*lri) == 1 {
// Remove the (last) lock.
// Remove the write lock.
delete(l.lockMap, name)
} else {
// Remove the appropriate read lock.
@ -84,6 +74,7 @@ func (l *localLocker) removeEntry(name, uid string, lri *[]lockRequesterInfo) bo
return true
}
}
// None found return false, perhaps entry removed in previous run.
return false
}

View file

@ -286,17 +286,17 @@ func (l *lockRESTServer) lockMaintenance(interval time.Duration) {
Resource: nlrip.name,
})
// Close the connection regardless of the call response.
c.Close()
// For successful response, verify if lock is indeed active or stale.
// For successful response, verify if lock was indeed active or stale.
if expired {
// The lock is no longer active at server that originated the lock
// So remove the lock from the map.
// The lock is no longer active at server that originated
// the lock, attempt to remove the lock.
l.ll.mutex.Lock()
l.ll.removeEntryIfExists(nlrip) // Purge the stale entry if it exists.
l.ll.mutex.Unlock()
}
// Close the connection regardless of the call response.
c.Close()
}
}