15
15
static struct cached_fid * init_cached_dir (const char * path );
16
16
static void free_cached_dir (struct cached_fid * cfid );
17
17
static void smb2_close_cached_fid (struct kref * ref );
18
+ static void cfids_laundromat_worker (struct work_struct * work );
18
19
19
20
static struct cached_fid * find_or_create_cached_dir (struct cached_fids * cfids ,
20
21
const char * path ,
@@ -169,15 +170,18 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
169
170
return - ENOENT ;
170
171
}
171
172
/*
172
- * At this point we either have a lease already and we can just
173
- * return it. If not we are guaranteed to be the only thread accessing
174
- * this cfid .
173
+ * Return cached fid if it has a lease. Otherwise, it is either a new
174
+ * entry or laundromat worker removed it from @cfids->entries. Caller
175
+ * will put last reference if the latter .
175
176
*/
177
+ spin_lock (& cfids -> cfid_list_lock );
176
178
if (cfid -> has_lease ) {
179
+ spin_unlock (& cfids -> cfid_list_lock );
177
180
* ret_cfid = cfid ;
178
181
kfree (utf16_path );
179
182
return 0 ;
180
183
}
184
+ spin_unlock (& cfids -> cfid_list_lock );
181
185
182
186
/*
183
187
* Skip any prefix paths in @path as lookup_positive_unlocked() ends up
@@ -294,9 +298,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
294
298
goto oshr_free ;
295
299
}
296
300
}
301
+ spin_lock (& cfids -> cfid_list_lock );
297
302
cfid -> dentry = dentry ;
298
303
cfid -> time = jiffies ;
299
304
cfid -> has_lease = true;
305
+ spin_unlock (& cfids -> cfid_list_lock );
300
306
301
307
oshr_free :
302
308
kfree (utf16_path );
@@ -305,32 +311,36 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
305
311
free_rsp_buf (resp_buftype [0 ], rsp_iov [0 ].iov_base );
306
312
free_rsp_buf (resp_buftype [1 ], rsp_iov [1 ].iov_base );
307
313
spin_lock (& cfids -> cfid_list_lock );
308
- if (rc && !cfid -> has_lease ) {
309
- if (cfid -> on_list ) {
310
- list_del (& cfid -> entry );
311
- cfid -> on_list = false;
312
- cfids -> num_entries -- ;
314
+ if (!cfid -> has_lease ) {
315
+ if (rc ) {
316
+ if (cfid -> on_list ) {
317
+ list_del (& cfid -> entry );
318
+ cfid -> on_list = false;
319
+ cfids -> num_entries -- ;
320
+ }
321
+ rc = - ENOENT ;
322
+ } else {
323
+ /*
324
+ * We are guaranteed to have two references at this
325
+ * point. One for the caller and one for a potential
326
+ * lease. Release the Lease-ref so that the directory
327
+ * will be closed when the caller closes the cached
328
+ * handle.
329
+ */
330
+ spin_unlock (& cfids -> cfid_list_lock );
331
+ kref_put (& cfid -> refcount , smb2_close_cached_fid );
332
+ goto out ;
313
333
}
314
- rc = - ENOENT ;
315
334
}
316
335
spin_unlock (& cfids -> cfid_list_lock );
317
- if (!rc && !cfid -> has_lease ) {
318
- /*
319
- * We are guaranteed to have two references at this point.
320
- * One for the caller and one for a potential lease.
321
- * Release the Lease-ref so that the directory will be closed
322
- * when the caller closes the cached handle.
323
- */
324
- kref_put (& cfid -> refcount , smb2_close_cached_fid );
325
- }
326
336
if (rc ) {
327
337
if (cfid -> is_open )
328
338
SMB2_close (0 , cfid -> tcon , cfid -> fid .persistent_fid ,
329
339
cfid -> fid .volatile_fid );
330
340
free_cached_dir (cfid );
331
341
cfid = NULL ;
332
342
}
333
-
343
+ out :
334
344
if (rc == 0 ) {
335
345
* ret_cfid = cfid ;
336
346
atomic_inc (& tcon -> num_remote_opens );
@@ -572,53 +582,51 @@ static void free_cached_dir(struct cached_fid *cfid)
572
582
kfree (cfid );
573
583
}
574
584
575
- static int
576
- cifs_cfids_laundromat_thread (void * p )
585
+ static void cfids_laundromat_worker (struct work_struct * work )
577
586
{
578
- struct cached_fids * cfids = p ;
587
+ struct cached_fids * cfids ;
579
588
struct cached_fid * cfid , * q ;
580
- struct list_head entry ;
589
+ LIST_HEAD ( entry ) ;
581
590
582
- while (!kthread_should_stop ()) {
583
- ssleep (1 );
584
- INIT_LIST_HEAD (& entry );
585
- if (kthread_should_stop ())
586
- return 0 ;
587
- spin_lock (& cfids -> cfid_list_lock );
588
- list_for_each_entry_safe (cfid , q , & cfids -> entries , entry ) {
589
- if (time_after (jiffies , cfid -> time + HZ * dir_cache_timeout )) {
590
- list_del (& cfid -> entry );
591
- list_add (& cfid -> entry , & entry );
592
- cfids -> num_entries -- ;
593
- }
594
- }
595
- spin_unlock (& cfids -> cfid_list_lock );
591
+ cfids = container_of (work , struct cached_fids , laundromat_work .work );
596
592
597
- list_for_each_entry_safe (cfid , q , & entry , entry ) {
593
+ spin_lock (& cfids -> cfid_list_lock );
594
+ list_for_each_entry_safe (cfid , q , & cfids -> entries , entry ) {
595
+ if (cfid -> time &&
596
+ time_after (jiffies , cfid -> time + HZ * dir_cache_timeout )) {
598
597
cfid -> on_list = false;
599
- list_del (& cfid -> entry );
598
+ list_move (& cfid -> entry , & entry );
599
+ cfids -> num_entries -- ;
600
+ /* To prevent race with smb2_cached_lease_break() */
601
+ kref_get (& cfid -> refcount );
602
+ }
603
+ }
604
+ spin_unlock (& cfids -> cfid_list_lock );
605
+
606
+ list_for_each_entry_safe (cfid , q , & entry , entry ) {
607
+ list_del (& cfid -> entry );
608
+ /*
609
+ * Cancel and wait for the work to finish in case we are racing
610
+ * with it.
611
+ */
612
+ cancel_work_sync (& cfid -> lease_break );
613
+ if (cfid -> has_lease ) {
600
614
/*
601
- * Cancel, and wait for the work to finish in
602
- * case we are racing with it .
615
+ * Our lease has not yet been cancelled from the server
616
+ * so we need to drop the reference .
603
617
*/
604
- cancel_work_sync (& cfid -> lease_break );
605
- if (cfid -> has_lease ) {
606
- /*
607
- * We lease has not yet been cancelled from
608
- * the server so we need to drop the reference.
609
- */
610
- spin_lock (& cfids -> cfid_list_lock );
611
- cfid -> has_lease = false;
612
- spin_unlock (& cfids -> cfid_list_lock );
613
- kref_put (& cfid -> refcount , smb2_close_cached_fid );
614
- }
618
+ spin_lock (& cfids -> cfid_list_lock );
619
+ cfid -> has_lease = false;
620
+ spin_unlock (& cfids -> cfid_list_lock );
621
+ kref_put (& cfid -> refcount , smb2_close_cached_fid );
615
622
}
623
+ /* Drop the extra reference opened above */
624
+ kref_put (& cfid -> refcount , smb2_close_cached_fid );
616
625
}
617
-
618
- return 0 ;
626
+ queue_delayed_work ( cifsiod_wq , & cfids -> laundromat_work ,
627
+ dir_cache_timeout * HZ ) ;
619
628
}
620
629
621
-
622
630
struct cached_fids * init_cached_dirs (void )
623
631
{
624
632
struct cached_fids * cfids ;
@@ -629,19 +637,10 @@ struct cached_fids *init_cached_dirs(void)
629
637
spin_lock_init (& cfids -> cfid_list_lock );
630
638
INIT_LIST_HEAD (& cfids -> entries );
631
639
632
- /*
633
- * since we're in a cifs function already, we know that
634
- * this will succeed. No need for try_module_get().
635
- */
636
- __module_get (THIS_MODULE );
637
- cfids -> laundromat = kthread_run (cifs_cfids_laundromat_thread ,
638
- cfids , "cifsd-cfid-laundromat" );
639
- if (IS_ERR (cfids -> laundromat )) {
640
- cifs_dbg (VFS , "Failed to start cfids laundromat thread.\n" );
641
- kfree (cfids );
642
- module_put (THIS_MODULE );
643
- return NULL ;
644
- }
640
+ INIT_DELAYED_WORK (& cfids -> laundromat_work , cfids_laundromat_worker );
641
+ queue_delayed_work (cifsiod_wq , & cfids -> laundromat_work ,
642
+ dir_cache_timeout * HZ );
643
+
645
644
return cfids ;
646
645
}
647
646
@@ -657,11 +656,7 @@ void free_cached_dirs(struct cached_fids *cfids)
657
656
if (cfids == NULL )
658
657
return ;
659
658
660
- if (cfids -> laundromat ) {
661
- kthread_stop (cfids -> laundromat );
662
- cfids -> laundromat = NULL ;
663
- module_put (THIS_MODULE );
664
- }
659
+ cancel_delayed_work_sync (& cfids -> laundromat_work );
665
660
666
661
spin_lock (& cfids -> cfid_list_lock );
667
662
list_for_each_entry_safe (cfid , q , & cfids -> entries , entry ) {
0 commit comments