@@ -17,6 +17,11 @@ static void free_cached_dir(struct cached_fid *cfid);
17
17
static void smb2_close_cached_fid (struct kref * ref );
18
18
static void cfids_laundromat_worker (struct work_struct * work );
19
19
20
+ struct cached_dir_dentry {
21
+ struct list_head entry ;
22
+ struct dentry * dentry ;
23
+ };
24
+
20
25
static struct cached_fid * find_or_create_cached_dir (struct cached_fids * cfids ,
21
26
const char * path ,
22
27
bool lookup_only ,
@@ -472,7 +477,10 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
472
477
struct cifs_tcon * tcon ;
473
478
struct tcon_link * tlink ;
474
479
struct cached_fids * cfids ;
480
+ struct cached_dir_dentry * tmp_list , * q ;
481
+ LIST_HEAD (entry );
475
482
483
+ spin_lock (& cifs_sb -> tlink_tree_lock );
476
484
for (node = rb_first (root ); node ; node = rb_next (node )) {
477
485
tlink = rb_entry (node , struct tcon_link , tl_rbnode );
478
486
tcon = tlink_tcon (tlink );
@@ -481,11 +489,30 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
481
489
cfids = tcon -> cfids ;
482
490
if (cfids == NULL )
483
491
continue ;
492
+ spin_lock (& cfids -> cfid_list_lock );
484
493
list_for_each_entry (cfid , & cfids -> entries , entry ) {
485
- dput (cfid -> dentry );
494
+ tmp_list = kmalloc (sizeof (* tmp_list ), GFP_ATOMIC );
495
+ if (tmp_list == NULL )
496
+ break ;
497
+ spin_lock (& cfid -> fid_lock );
498
+ tmp_list -> dentry = cfid -> dentry ;
486
499
cfid -> dentry = NULL ;
500
+ spin_unlock (& cfid -> fid_lock );
501
+
502
+ list_add_tail (& tmp_list -> entry , & entry );
487
503
}
504
+ spin_unlock (& cfids -> cfid_list_lock );
505
+ }
506
+ spin_unlock (& cifs_sb -> tlink_tree_lock );
507
+
508
+ list_for_each_entry_safe (tmp_list , q , & entry , entry ) {
509
+ list_del (& tmp_list -> entry );
510
+ dput (tmp_list -> dentry );
511
+ kfree (tmp_list );
488
512
}
513
+
514
+ /* Flush any pending work that will drop dentries */
515
+ flush_workqueue (cfid_put_wq );
489
516
}
490
517
491
518
/*
@@ -496,14 +523,18 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
496
523
{
497
524
struct cached_fids * cfids = tcon -> cfids ;
498
525
struct cached_fid * cfid , * q ;
499
- LIST_HEAD (entry );
500
526
501
527
if (cfids == NULL )
502
528
return ;
503
529
530
+ /*
531
+ * Mark all the cfids as closed, and move them to the cfids->dying list.
532
+ * They'll be cleaned up later by cfids_invalidation_worker. Take
533
+ * a reference to each cfid during this process.
534
+ */
504
535
spin_lock (& cfids -> cfid_list_lock );
505
536
list_for_each_entry_safe (cfid , q , & cfids -> entries , entry ) {
506
- list_move (& cfid -> entry , & entry );
537
+ list_move (& cfid -> entry , & cfids -> dying );
507
538
cfids -> num_entries -- ;
508
539
cfid -> is_open = false;
509
540
cfid -> on_list = false;
@@ -516,26 +547,47 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
516
547
} else
517
548
kref_get (& cfid -> refcount );
518
549
}
550
+ /*
551
+ * Queue dropping of the dentries once locks have been dropped
552
+ */
553
+ if (!list_empty (& cfids -> dying ))
554
+ queue_work (cfid_put_wq , & cfids -> invalidation_work );
519
555
spin_unlock (& cfids -> cfid_list_lock );
520
-
521
- list_for_each_entry_safe (cfid , q , & entry , entry ) {
522
- list_del (& cfid -> entry );
523
- cancel_work_sync (& cfid -> lease_break );
524
- /*
525
- * Drop the ref-count from above, either the lease-ref (if there
526
- * was one) or the extra one acquired.
527
- */
528
- kref_put (& cfid -> refcount , smb2_close_cached_fid );
529
- }
530
556
}
531
557
532
558
static void
533
- smb2_cached_lease_break (struct work_struct * work )
559
+ cached_dir_offload_close (struct work_struct * work )
534
560
{
535
561
struct cached_fid * cfid = container_of (work ,
536
- struct cached_fid , lease_break );
562
+ struct cached_fid , close_work );
563
+ struct cifs_tcon * tcon = cfid -> tcon ;
564
+
565
+ WARN_ON (cfid -> on_list );
537
566
538
567
kref_put (& cfid -> refcount , smb2_close_cached_fid );
568
+ cifs_put_tcon (tcon , netfs_trace_tcon_ref_put_cached_close );
569
+ }
570
+
571
+ /*
572
+ * Release the cached directory's dentry, and then queue work to drop cached
573
+ * directory itself (closing on server if needed).
574
+ *
575
+ * Must be called with a reference to the cached_fid and a reference to the
576
+ * tcon.
577
+ */
578
+ static void cached_dir_put_work (struct work_struct * work )
579
+ {
580
+ struct cached_fid * cfid = container_of (work , struct cached_fid ,
581
+ put_work );
582
+ struct dentry * dentry ;
583
+
584
+ spin_lock (& cfid -> fid_lock );
585
+ dentry = cfid -> dentry ;
586
+ cfid -> dentry = NULL ;
587
+ spin_unlock (& cfid -> fid_lock );
588
+
589
+ dput (dentry );
590
+ queue_work (serverclose_wq , & cfid -> close_work );
539
591
}
540
592
541
593
int cached_dir_lease_break (struct cifs_tcon * tcon , __u8 lease_key [16 ])
@@ -562,8 +614,10 @@ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
562
614
cfid -> on_list = false;
563
615
cfids -> num_entries -- ;
564
616
565
- queue_work (cifsiod_wq ,
566
- & cfid -> lease_break );
617
+ ++ tcon -> tc_count ;
618
+ trace_smb3_tcon_ref (tcon -> debug_id , tcon -> tc_count ,
619
+ netfs_trace_tcon_ref_get_cached_lease_break );
620
+ queue_work (cfid_put_wq , & cfid -> put_work );
567
621
spin_unlock (& cfids -> cfid_list_lock );
568
622
return true;
569
623
}
@@ -585,7 +639,8 @@ static struct cached_fid *init_cached_dir(const char *path)
585
639
return NULL ;
586
640
}
587
641
588
- INIT_WORK (& cfid -> lease_break , smb2_cached_lease_break );
642
+ INIT_WORK (& cfid -> close_work , cached_dir_offload_close );
643
+ INIT_WORK (& cfid -> put_work , cached_dir_put_work );
589
644
INIT_LIST_HEAD (& cfid -> entry );
590
645
INIT_LIST_HEAD (& cfid -> dirents .entries );
591
646
mutex_init (& cfid -> dirents .de_mutex );
@@ -598,6 +653,9 @@ static void free_cached_dir(struct cached_fid *cfid)
598
653
{
599
654
struct cached_dirent * dirent , * q ;
600
655
656
+ WARN_ON (work_pending (& cfid -> close_work ));
657
+ WARN_ON (work_pending (& cfid -> put_work ));
658
+
601
659
dput (cfid -> dentry );
602
660
cfid -> dentry = NULL ;
603
661
@@ -615,10 +673,30 @@ static void free_cached_dir(struct cached_fid *cfid)
615
673
kfree (cfid );
616
674
}
617
675
676
+ static void cfids_invalidation_worker (struct work_struct * work )
677
+ {
678
+ struct cached_fids * cfids = container_of (work , struct cached_fids ,
679
+ invalidation_work );
680
+ struct cached_fid * cfid , * q ;
681
+ LIST_HEAD (entry );
682
+
683
+ spin_lock (& cfids -> cfid_list_lock );
684
+ /* move cfids->dying to the local list */
685
+ list_cut_before (& entry , & cfids -> dying , & cfids -> dying );
686
+ spin_unlock (& cfids -> cfid_list_lock );
687
+
688
+ list_for_each_entry_safe (cfid , q , & entry , entry ) {
689
+ list_del (& cfid -> entry );
690
+ /* Drop the ref-count acquired in invalidate_all_cached_dirs */
691
+ kref_put (& cfid -> refcount , smb2_close_cached_fid );
692
+ }
693
+ }
694
+
618
695
static void cfids_laundromat_worker (struct work_struct * work )
619
696
{
620
697
struct cached_fids * cfids ;
621
698
struct cached_fid * cfid , * q ;
699
+ struct dentry * dentry ;
622
700
LIST_HEAD (entry );
623
701
624
702
cfids = container_of (work , struct cached_fids , laundromat_work .work );
@@ -644,18 +722,28 @@ static void cfids_laundromat_worker(struct work_struct *work)
644
722
645
723
list_for_each_entry_safe (cfid , q , & entry , entry ) {
646
724
list_del (& cfid -> entry );
647
- /*
648
- * Cancel and wait for the work to finish in case we are racing
649
- * with it.
650
- */
651
- cancel_work_sync (& cfid -> lease_break );
652
- /*
653
- * Drop the ref-count from above, either the lease-ref (if there
654
- * was one) or the extra one acquired.
655
- */
656
- kref_put (& cfid -> refcount , smb2_close_cached_fid );
725
+
726
+ spin_lock (& cfid -> fid_lock );
727
+ dentry = cfid -> dentry ;
728
+ cfid -> dentry = NULL ;
729
+ spin_unlock (& cfid -> fid_lock );
730
+
731
+ dput (dentry );
732
+ if (cfid -> is_open ) {
733
+ spin_lock (& cifs_tcp_ses_lock );
734
+ ++ cfid -> tcon -> tc_count ;
735
+ trace_smb3_tcon_ref (cfid -> tcon -> debug_id , cfid -> tcon -> tc_count ,
736
+ netfs_trace_tcon_ref_get_cached_laundromat );
737
+ spin_unlock (& cifs_tcp_ses_lock );
738
+ queue_work (serverclose_wq , & cfid -> close_work );
739
+ } else
740
+ /*
741
+ * Drop the ref-count from above, either the lease-ref (if there
742
+ * was one) or the extra one acquired.
743
+ */
744
+ kref_put (& cfid -> refcount , smb2_close_cached_fid );
657
745
}
658
- queue_delayed_work (cifsiod_wq , & cfids -> laundromat_work ,
746
+ queue_delayed_work (cfid_put_wq , & cfids -> laundromat_work ,
659
747
dir_cache_timeout * HZ );
660
748
}
661
749
@@ -668,9 +756,11 @@ struct cached_fids *init_cached_dirs(void)
668
756
return NULL ;
669
757
spin_lock_init (& cfids -> cfid_list_lock );
670
758
INIT_LIST_HEAD (& cfids -> entries );
759
+ INIT_LIST_HEAD (& cfids -> dying );
671
760
761
+ INIT_WORK (& cfids -> invalidation_work , cfids_invalidation_worker );
672
762
INIT_DELAYED_WORK (& cfids -> laundromat_work , cfids_laundromat_worker );
673
- queue_delayed_work (cifsiod_wq , & cfids -> laundromat_work ,
763
+ queue_delayed_work (cfid_put_wq , & cfids -> laundromat_work ,
674
764
dir_cache_timeout * HZ );
675
765
676
766
return cfids ;
@@ -689,13 +779,19 @@ void free_cached_dirs(struct cached_fids *cfids)
689
779
return ;
690
780
691
781
cancel_delayed_work_sync (& cfids -> laundromat_work );
782
+ cancel_work_sync (& cfids -> invalidation_work );
692
783
693
784
spin_lock (& cfids -> cfid_list_lock );
694
785
list_for_each_entry_safe (cfid , q , & cfids -> entries , entry ) {
695
786
cfid -> on_list = false;
696
787
cfid -> is_open = false;
697
788
list_move (& cfid -> entry , & entry );
698
789
}
790
+ list_for_each_entry_safe (cfid , q , & cfids -> dying , entry ) {
791
+ cfid -> on_list = false;
792
+ cfid -> is_open = false;
793
+ list_move (& cfid -> entry , & entry );
794
+ }
699
795
spin_unlock (& cfids -> cfid_list_lock );
700
796
701
797
list_for_each_entry_safe (cfid , q , & entry , entry ) {
0 commit comments