Skip to content

Commit 6367b49

Browse files
author
Al Viro
committed
retain_dentry(): introduce a trimmed-down lockless variant
fast_dput() contains a small piece of code, preceded by scary comments about 5 times longer than it. What is actually done there is a trimmed-down subset of retain_dentry() - in some situations we can tell that retain_dentry() would have returned true without ever needing ->d_lock and that's what that code checks. If these checks come true fast_dput() can declare that we are done, without bothering with ->d_lock; otherwise it has to take the lock and do full variant of retain_dentry() checks. Trimmed-down variant of the checks is hard to follow and it's asking for trouble - if we ever decide to change the rules in retain_dentry(), we'll have to remember to update that code. It turns out that an equivalent variant of these checks more obviously parallel to retain_dentry() is not just possible, but easy to unify with retain_dentry() itself, passing it a new boolean argument ('locked') to distinguish between the full semantics and trimmed down one. Note that in lockless case true is returned only when locked variant would have returned true without ever needing the lock; false means "punt to the locking path and recheck there". Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
1 parent 1c18edd commit 6367b49

File tree

1 file changed

+47
-48
lines changed

1 file changed

+47
-48
lines changed

fs/dcache.c

Lines changed: 47 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -665,30 +665,57 @@ static bool lock_for_kill(struct dentry *dentry)
665665
return false;
666666
}
667667

668-
static inline bool retain_dentry(struct dentry *dentry)
668+
/*
669+
* Decide if dentry is worth retaining. Usually this is called with dentry
670+
* locked; if not locked, we are more limited and might not be able to tell
671+
* without a lock. False in this case means "punt to locked path and recheck".
672+
*
673+
* In case we aren't locked, these predicates are not "stable". However, it is
674+
* sufficient that at some point after we dropped the reference the dentry was
675+
* hashed and the flags had the proper value. Other dentry users may have
676+
* re-gotten a reference to the dentry and change that, but our work is done -
677+
* we can leave the dentry around with a zero refcount.
678+
*/
679+
static inline bool retain_dentry(struct dentry *dentry, bool locked)
669680
{
670-
WARN_ON(d_in_lookup(dentry));
681+
unsigned int d_flags;
671682

672-
/* Unreachable? Get rid of it */
683+
smp_rmb();
684+
d_flags = READ_ONCE(dentry->d_flags);
685+
686+
// Unreachable? Nobody would be able to look it up, no point retaining
673687
if (unlikely(d_unhashed(dentry)))
674688
return false;
675689

676-
if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
690+
// Same if it's disconnected
691+
if (unlikely(d_flags & DCACHE_DISCONNECTED))
677692
return false;
678693

679-
if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
680-
if (dentry->d_op->d_delete(dentry))
694+
// ->d_delete() might tell us not to bother, but that requires
695+
// ->d_lock; can't decide without it
696+
if (unlikely(d_flags & DCACHE_OP_DELETE)) {
697+
if (!locked || dentry->d_op->d_delete(dentry))
681698
return false;
682699
}
683700

684-
if (unlikely(dentry->d_flags & DCACHE_DONTCACHE))
701+
// Explicitly told not to bother
702+
if (unlikely(d_flags & DCACHE_DONTCACHE))
685703
return false;
686704

687-
/* retain; LRU fodder */
688-
if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
705+
// At this point it looks like we ought to keep it. We also might
706+
// need to do something - put it on LRU if it wasn't there already
707+
// and mark it referenced if it was on LRU, but not marked yet.
708+
// Unfortunately, both actions require ->d_lock, so in lockless
709+
// case we'd have to punt rather than doing those.
710+
if (unlikely(!(d_flags & DCACHE_LRU_LIST))) {
711+
if (!locked)
712+
return false;
689713
d_lru_add(dentry);
690-
else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
714+
} else if (unlikely(!(d_flags & DCACHE_REFERENCED))) {
715+
if (!locked)
716+
return false;
691717
dentry->d_flags |= DCACHE_REFERENCED;
718+
}
692719
return true;
693720
}
694721

@@ -720,7 +747,6 @@ EXPORT_SYMBOL(d_mark_dontcache);
720747
static inline bool fast_dput(struct dentry *dentry)
721748
{
722749
int ret;
723-
unsigned int d_flags;
724750

725751
/*
726752
* try to decrement the lockref optimistically.
@@ -749,45 +775,18 @@ static inline bool fast_dput(struct dentry *dentry)
749775
return true;
750776

751777
/*
752-
* Careful, careful. The reference count went down
753-
* to zero, but we don't hold the dentry lock, so
754-
* somebody else could get it again, and do another
755-
* dput(), and we need to not race with that.
756-
*
757-
* However, there is a very special and common case
758-
* where we don't care, because there is nothing to
759-
* do: the dentry is still hashed, it does not have
760-
* a 'delete' op, and it's referenced and already on
761-
* the LRU list.
762-
*
763-
* NOTE! Since we aren't locked, these values are
764-
* not "stable". However, it is sufficient that at
765-
* some point after we dropped the reference the
766-
* dentry was hashed and the flags had the proper
767-
* value. Other dentry users may have re-gotten
768-
* a reference to the dentry and change that, but
769-
* our work is done - we can leave the dentry
770-
* around with a zero refcount.
771-
*
772-
* Nevertheless, there are two cases that we should kill
773-
* the dentry anyway.
774-
* 1. free disconnected dentries as soon as their refcount
775-
* reached zero.
776-
* 2. free dentries if they should not be cached.
778+
* Can we decide that decrement of refcount is all we needed without
779+
* taking the lock? There's a very common case when it's all we need -
780+
* dentry looks like it ought to be retained and there's nothing else
781+
* to do.
777782
*/
778-
smp_rmb();
779-
d_flags = READ_ONCE(dentry->d_flags);
780-
d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_OP_DELETE |
781-
DCACHE_DISCONNECTED | DCACHE_DONTCACHE;
782-
783-
/* Nothing to do? Dropping the reference was all we needed? */
784-
if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
783+
if (retain_dentry(dentry, false))
785784
return true;
786785

787786
/*
788-
* Not the fast normal case? Get the lock. We've already decremented
789-
* the refcount, but we'll need to re-check the situation after
790-
* getting the lock.
787+
* Either not worth retaining or we can't tell without the lock.
788+
* Get the lock, then. We've already decremented the refcount to 0,
789+
* but we'll need to re-check the situation after getting the lock.
791790
*/
792791
spin_lock(&dentry->d_lock);
793792

@@ -798,7 +797,7 @@ static inline bool fast_dput(struct dentry *dentry)
798797
* don't need to do anything else.
799798
*/
800799
locked:
801-
if (dentry->d_lockref.count || retain_dentry(dentry)) {
800+
if (dentry->d_lockref.count || retain_dentry(dentry, true)) {
802801
spin_unlock(&dentry->d_lock);
803802
return true;
804803
}
@@ -847,7 +846,7 @@ void dput(struct dentry *dentry)
847846
dentry = __dentry_kill(dentry);
848847
if (!dentry)
849848
return;
850-
if (retain_dentry(dentry)) {
849+
if (retain_dentry(dentry, true)) {
851850
spin_unlock(&dentry->d_lock);
852851
return;
853852
}

0 commit comments

Comments
 (0)