Skip to content

Commit 339e9e1

Browse files
author
Al Viro
committed
don't try to cut corners in shrink_lock_dentry()
That is to say, do *not* treat the ->d_inode or ->d_parent changes as "it's hard, return false; somebody must have grabbed it, so even if has zero refcount, we don't need to bother killing it - final dput() from whoever grabbed it would've done everything". First of all, that is not guaranteed. It might have been dropped by shrink_kill() handling of victim's parent, which would've found it already on a shrink list (ours) and decided that they don't need to put it on their shrink list. What's more, dentry_kill() is doing pretty much the same thing, cutting its own set of corners (it assumes that dentry can't go from positive to negative, so its inode can change but only once and only in one direction). Doing that right allows to get rid of that not-quite-duplication and removes the only reason for re-incrementing refcount before the call of dentry_kill(). Replacement is called lock_for_kill(); called under rcu_read_lock and with ->d_lock held. If it returns false, dentry has non-zero refcount and the same locks are held. If it returns true, dentry has zero refcount and all locks required by __dentry_kill() are taken. Part of __lock_parent() had been lifted into lock_parent() to allow its reuse. Now it's called with rcu_read_lock already held and dentry already unlocked. Note that this is not the final change - locking requirements for __dentry_kill() are going to change later in the series and the set of locks taken by lock_for_kill() will be adjusted. Both lock_parent() and __lock_parent() will be gone once that happens. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
1 parent f05441c commit 339e9e1

File tree

1 file changed

+66
-93
lines changed

1 file changed

+66
-93
lines changed

fs/dcache.c

Lines changed: 66 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -625,8 +625,6 @@ static void __dentry_kill(struct dentry *dentry)
625625
static struct dentry *__lock_parent(struct dentry *dentry)
626626
{
627627
struct dentry *parent;
628-
rcu_read_lock();
629-
spin_unlock(&dentry->d_lock);
630628
again:
631629
parent = READ_ONCE(dentry->d_parent);
632630
spin_lock(&parent->d_lock);
@@ -642,7 +640,6 @@ static struct dentry *__lock_parent(struct dentry *dentry)
642640
spin_unlock(&parent->d_lock);
643641
goto again;
644642
}
645-
rcu_read_unlock();
646643
if (parent != dentry)
647644
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
648645
else
@@ -657,7 +654,64 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
657654
return NULL;
658655
if (likely(spin_trylock(&parent->d_lock)))
659656
return parent;
660-
return __lock_parent(dentry);
657+
rcu_read_lock();
658+
spin_unlock(&dentry->d_lock);
659+
parent = __lock_parent(dentry);
660+
rcu_read_unlock();
661+
return parent;
662+
}
663+
664+
/*
665+
* Lock a dentry for feeding it to __dentry_kill().
666+
* Called under rcu_read_lock() and dentry->d_lock; the former
667+
* guarantees that nothing we access will be freed under us.
668+
* Note that dentry is *not* protected from concurrent dentry_kill(),
669+
* d_delete(), etc.
670+
*
671+
* Return false if dentry is busy. Otherwise, return true and have
672+
* that dentry's inode and parent both locked.
673+
*/
674+
675+
static bool lock_for_kill(struct dentry *dentry)
676+
{
677+
struct inode *inode = dentry->d_inode;
678+
struct dentry *parent = dentry->d_parent;
679+
680+
if (unlikely(dentry->d_lockref.count))
681+
return false;
682+
683+
if (inode && unlikely(!spin_trylock(&inode->i_lock)))
684+
goto slow;
685+
if (dentry == parent)
686+
return true;
687+
if (likely(spin_trylock(&parent->d_lock)))
688+
return true;
689+
690+
if (inode)
691+
spin_unlock(&inode->i_lock);
692+
slow:
693+
spin_unlock(&dentry->d_lock);
694+
695+
for (;;) {
696+
if (inode)
697+
spin_lock(&inode->i_lock);
698+
parent = __lock_parent(dentry);
699+
if (likely(inode == dentry->d_inode))
700+
break;
701+
if (inode)
702+
spin_unlock(&inode->i_lock);
703+
inode = dentry->d_inode;
704+
spin_unlock(&dentry->d_lock);
705+
if (parent)
706+
spin_unlock(&parent->d_lock);
707+
}
708+
if (likely(!dentry->d_lockref.count))
709+
return true;
710+
if (inode)
711+
spin_unlock(&inode->i_lock);
712+
if (parent)
713+
spin_unlock(&parent->d_lock);
714+
return false;
661715
}
662716

663717
static inline bool retain_dentry(struct dentry *dentry)
@@ -710,45 +764,16 @@ EXPORT_SYMBOL(d_mark_dontcache);
710764
static struct dentry *dentry_kill(struct dentry *dentry)
711765
__releases(dentry->d_lock)
712766
{
713-
struct inode *inode = dentry->d_inode;
714-
struct dentry *parent = NULL;
715767

716-
if (inode && unlikely(!spin_trylock(&inode->i_lock)))
717-
goto slow_positive;
718-
719-
if (!IS_ROOT(dentry)) {
720-
parent = dentry->d_parent;
721-
if (unlikely(!spin_trylock(&parent->d_lock))) {
722-
parent = __lock_parent(dentry);
723-
if (likely(inode || !dentry->d_inode))
724-
goto got_locks;
725-
/* negative that became positive */
726-
if (parent)
727-
spin_unlock(&parent->d_lock);
728-
inode = dentry->d_inode;
729-
goto slow_positive;
730-
}
731-
}
732768
dentry->d_lockref.count--;
733-
__dentry_kill(dentry);
734-
return parent;
735-
736-
slow_positive:
737-
spin_unlock(&dentry->d_lock);
738-
spin_lock(&inode->i_lock);
739-
spin_lock(&dentry->d_lock);
740-
parent = lock_parent(dentry);
741-
got_locks:
742-
dentry->d_lockref.count--;
743-
if (likely(dentry->d_lockref.count == 0)) {
769+
rcu_read_lock();
770+
if (likely(lock_for_kill(dentry))) {
771+
struct dentry *parent = dentry->d_parent;
772+
rcu_read_unlock();
744773
__dentry_kill(dentry);
745-
return parent;
774+
return parent != dentry ? parent : NULL;
746775
}
747-
/* we are keeping it, after all */
748-
if (inode)
749-
spin_unlock(&inode->i_lock);
750-
if (parent)
751-
spin_unlock(&parent->d_lock);
776+
rcu_read_unlock();
752777
spin_unlock(&dentry->d_lock);
753778
return NULL;
754779
}
@@ -1100,58 +1125,6 @@ void d_prune_aliases(struct inode *inode)
11001125
}
11011126
EXPORT_SYMBOL(d_prune_aliases);
11021127

1103-
/*
1104-
* Lock a dentry from shrink list.
1105-
* Called under rcu_read_lock() and dentry->d_lock; the former
1106-
* guarantees that nothing we access will be freed under us.
1107-
* Note that dentry is *not* protected from concurrent dentry_kill(),
1108-
* d_delete(), etc.
1109-
*
1110-
* Return false if dentry has been disrupted or grabbed, leaving
1111-
* the caller to kick it off-list. Otherwise, return true and have
1112-
* that dentry's inode and parent both locked.
1113-
*/
1114-
static bool shrink_lock_dentry(struct dentry *dentry)
1115-
{
1116-
struct inode *inode;
1117-
struct dentry *parent;
1118-
1119-
if (dentry->d_lockref.count)
1120-
return false;
1121-
1122-
inode = dentry->d_inode;
1123-
if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1124-
spin_unlock(&dentry->d_lock);
1125-
spin_lock(&inode->i_lock);
1126-
spin_lock(&dentry->d_lock);
1127-
if (unlikely(dentry->d_lockref.count))
1128-
goto out;
1129-
/* changed inode means that somebody had grabbed it */
1130-
if (unlikely(inode != dentry->d_inode))
1131-
goto out;
1132-
}
1133-
1134-
parent = dentry->d_parent;
1135-
if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1136-
return true;
1137-
1138-
spin_unlock(&dentry->d_lock);
1139-
spin_lock(&parent->d_lock);
1140-
if (unlikely(parent != dentry->d_parent)) {
1141-
spin_unlock(&parent->d_lock);
1142-
spin_lock(&dentry->d_lock);
1143-
goto out;
1144-
}
1145-
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1146-
if (likely(!dentry->d_lockref.count))
1147-
return true;
1148-
spin_unlock(&parent->d_lock);
1149-
out:
1150-
if (inode)
1151-
spin_unlock(&inode->i_lock);
1152-
return false;
1153-
}
1154-
11551128
static inline void shrink_kill(struct dentry *victim, struct list_head *list)
11561129
{
11571130
struct dentry *parent = victim->d_parent;
@@ -1170,7 +1143,7 @@ void shrink_dentry_list(struct list_head *list)
11701143
dentry = list_entry(list->prev, struct dentry, d_lru);
11711144
spin_lock(&dentry->d_lock);
11721145
rcu_read_lock();
1173-
if (!shrink_lock_dentry(dentry)) {
1146+
if (!lock_for_kill(dentry)) {
11741147
bool can_free;
11751148
rcu_read_unlock();
11761149
d_shrink_del(dentry);
@@ -1614,7 +1587,7 @@ void shrink_dcache_parent(struct dentry *parent)
16141587
d_walk(parent, &data, select_collect2);
16151588
if (data.victim) {
16161589
spin_lock(&data.victim->d_lock);
1617-
if (!shrink_lock_dentry(data.victim)) {
1590+
if (!lock_for_kill(data.victim)) {
16181591
spin_unlock(&data.victim->d_lock);
16191592
rcu_read_unlock();
16201593
} else {

0 commit comments

Comments
 (0)