@@ -32,6 +32,100 @@ MODULE_ALIAS("devname:fuse");
32
32
33
33
static struct kmem_cache * fuse_req_cachep ;
34
34
35
+ const unsigned long fuse_timeout_timer_freq =
36
+ secs_to_jiffies (FUSE_TIMEOUT_TIMER_FREQ );
37
+
38
+ bool fuse_request_expired (struct fuse_conn * fc , struct list_head * list )
39
+ {
40
+ struct fuse_req * req ;
41
+
42
+ req = list_first_entry_or_null (list , struct fuse_req , list );
43
+ if (!req )
44
+ return false;
45
+ return time_is_before_jiffies (req -> create_time + fc -> timeout .req_timeout );
46
+ }
47
+
48
+ bool fuse_fpq_processing_expired (struct fuse_conn * fc , struct list_head * processing )
49
+ {
50
+ int i ;
51
+
52
+ for (i = 0 ; i < FUSE_PQ_HASH_SIZE ; i ++ )
53
+ if (fuse_request_expired (fc , & processing [i ]))
54
+ return true;
55
+
56
+ return false;
57
+ }
58
+
59
+ /*
60
+ * Check if any requests aren't being completed by the time the request timeout
61
+ * elapses. To do so, we:
62
+ * - check the fiq pending list
63
+ * - check the bg queue
64
+ * - check the fpq io and processing lists
65
+ *
66
+ * To make this fast, we only check against the head request on each list since
67
+ * these are generally queued in order of creation time (eg newer requests get
68
+ * queued to the tail). We might miss a few edge cases (eg requests transitioning
69
+ * between lists, re-sent requests at the head of the pending list having a
70
+ * later creation time than other requests on that list, etc.) but that is fine
71
+ * since if the request never gets fulfilled, it will eventually be caught.
72
+ */
73
+ void fuse_check_timeout (struct work_struct * work )
74
+ {
75
+ struct delayed_work * dwork = to_delayed_work (work );
76
+ struct fuse_conn * fc = container_of (dwork , struct fuse_conn ,
77
+ timeout .work );
78
+ struct fuse_iqueue * fiq = & fc -> iq ;
79
+ struct fuse_dev * fud ;
80
+ struct fuse_pqueue * fpq ;
81
+ bool expired = false;
82
+
83
+ if (!atomic_read (& fc -> num_waiting ))
84
+ goto out ;
85
+
86
+ spin_lock (& fiq -> lock );
87
+ expired = fuse_request_expired (fc , & fiq -> pending );
88
+ spin_unlock (& fiq -> lock );
89
+ if (expired )
90
+ goto abort_conn ;
91
+
92
+ spin_lock (& fc -> bg_lock );
93
+ expired = fuse_request_expired (fc , & fc -> bg_queue );
94
+ spin_unlock (& fc -> bg_lock );
95
+ if (expired )
96
+ goto abort_conn ;
97
+
98
+ spin_lock (& fc -> lock );
99
+ if (!fc -> connected ) {
100
+ spin_unlock (& fc -> lock );
101
+ return ;
102
+ }
103
+ list_for_each_entry (fud , & fc -> devices , entry ) {
104
+ fpq = & fud -> pq ;
105
+ spin_lock (& fpq -> lock );
106
+ if (fuse_request_expired (fc , & fpq -> io ) ||
107
+ fuse_fpq_processing_expired (fc , fpq -> processing )) {
108
+ spin_unlock (& fpq -> lock );
109
+ spin_unlock (& fc -> lock );
110
+ goto abort_conn ;
111
+ }
112
+
113
+ spin_unlock (& fpq -> lock );
114
+ }
115
+ spin_unlock (& fc -> lock );
116
+
117
+ if (fuse_uring_request_expired (fc ))
118
+ goto abort_conn ;
119
+
120
+ out :
121
+ queue_delayed_work (system_wq , & fc -> timeout .work ,
122
+ fuse_timeout_timer_freq );
123
+ return ;
124
+
125
+ abort_conn :
126
+ fuse_abort_conn (fc );
127
+ }
128
+
35
129
static void fuse_request_init (struct fuse_mount * fm , struct fuse_req * req )
36
130
{
37
131
INIT_LIST_HEAD (& req -> list );
@@ -40,6 +134,7 @@ static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
40
134
refcount_set (& req -> count , 1 );
41
135
__set_bit (FR_PENDING , & req -> flags );
42
136
req -> fm = fm ;
137
+ req -> create_time = jiffies ;
43
138
}
44
139
45
140
static struct fuse_req * fuse_request_alloc (struct fuse_mount * fm , gfp_t flags )
@@ -407,6 +502,24 @@ static int queue_interrupt(struct fuse_req *req)
407
502
return 0 ;
408
503
}
409
504
505
+ bool fuse_remove_pending_req (struct fuse_req * req , spinlock_t * lock )
506
+ {
507
+ spin_lock (lock );
508
+ if (test_bit (FR_PENDING , & req -> flags )) {
509
+ /*
510
+ * FR_PENDING does not get cleared as the request will end
511
+ * up in destruction anyway.
512
+ */
513
+ list_del (& req -> list );
514
+ spin_unlock (lock );
515
+ __fuse_put_request (req );
516
+ req -> out .h .error = - EINTR ;
517
+ return true;
518
+ }
519
+ spin_unlock (lock );
520
+ return false;
521
+ }
522
+
410
523
static void request_wait_answer (struct fuse_req * req )
411
524
{
412
525
struct fuse_conn * fc = req -> fm -> fc ;
@@ -428,22 +541,20 @@ static void request_wait_answer(struct fuse_req *req)
428
541
}
429
542
430
543
if (!test_bit (FR_FORCE , & req -> flags )) {
544
+ bool removed ;
545
+
431
546
/* Only fatal signals may interrupt this */
432
547
err = wait_event_killable (req -> waitq ,
433
548
test_bit (FR_FINISHED , & req -> flags ));
434
549
if (!err )
435
550
return ;
436
551
437
- spin_lock (& fiq -> lock );
438
- /* Request is not yet in userspace, bail out */
439
- if (test_bit (FR_PENDING , & req -> flags )) {
440
- list_del (& req -> list );
441
- spin_unlock (& fiq -> lock );
442
- __fuse_put_request (req );
443
- req -> out .h .error = - EINTR ;
552
+ if (test_bit (FR_URING , & req -> flags ))
553
+ removed = fuse_uring_remove_pending_req (req );
554
+ else
555
+ removed = fuse_remove_pending_req (req , & fiq -> lock );
556
+ if (removed )
444
557
return ;
445
- }
446
- spin_unlock (& fiq -> lock );
447
558
}
448
559
449
560
/*
@@ -1533,14 +1644,10 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1533
1644
struct fuse_copy_state * cs )
1534
1645
{
1535
1646
struct fuse_notify_inval_entry_out outarg ;
1536
- int err = - ENOMEM ;
1537
- char * buf ;
1647
+ int err ;
1648
+ char * buf = NULL ;
1538
1649
struct qstr name ;
1539
1650
1540
- buf = kzalloc (FUSE_NAME_MAX + 1 , GFP_KERNEL );
1541
- if (!buf )
1542
- goto err ;
1543
-
1544
1651
err = - EINVAL ;
1545
1652
if (size < sizeof (outarg ))
1546
1653
goto err ;
@@ -1550,13 +1657,18 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1550
1657
goto err ;
1551
1658
1552
1659
err = - ENAMETOOLONG ;
1553
- if (outarg .namelen > FUSE_NAME_MAX )
1660
+ if (outarg .namelen > fc -> name_max )
1554
1661
goto err ;
1555
1662
1556
1663
err = - EINVAL ;
1557
1664
if (size != sizeof (outarg ) + outarg .namelen + 1 )
1558
1665
goto err ;
1559
1666
1667
+ err = - ENOMEM ;
1668
+ buf = kzalloc (outarg .namelen + 1 , GFP_KERNEL );
1669
+ if (!buf )
1670
+ goto err ;
1671
+
1560
1672
name .name = buf ;
1561
1673
name .len = outarg .namelen ;
1562
1674
err = fuse_copy_one (cs , buf , outarg .namelen + 1 );
@@ -1581,14 +1693,10 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1581
1693
struct fuse_copy_state * cs )
1582
1694
{
1583
1695
struct fuse_notify_delete_out outarg ;
1584
- int err = - ENOMEM ;
1585
- char * buf ;
1696
+ int err ;
1697
+ char * buf = NULL ;
1586
1698
struct qstr name ;
1587
1699
1588
- buf = kzalloc (FUSE_NAME_MAX + 1 , GFP_KERNEL );
1589
- if (!buf )
1590
- goto err ;
1591
-
1592
1700
err = - EINVAL ;
1593
1701
if (size < sizeof (outarg ))
1594
1702
goto err ;
@@ -1598,13 +1706,18 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1598
1706
goto err ;
1599
1707
1600
1708
err = - ENAMETOOLONG ;
1601
- if (outarg .namelen > FUSE_NAME_MAX )
1709
+ if (outarg .namelen > fc -> name_max )
1602
1710
goto err ;
1603
1711
1604
1712
err = - EINVAL ;
1605
1713
if (size != sizeof (outarg ) + outarg .namelen + 1 )
1606
1714
goto err ;
1607
1715
1716
+ err = - ENOMEM ;
1717
+ buf = kzalloc (outarg .namelen + 1 , GFP_KERNEL );
1718
+ if (!buf )
1719
+ goto err ;
1720
+
1608
1721
name .name = buf ;
1609
1722
name .len = outarg .namelen ;
1610
1723
err = fuse_copy_one (cs , buf , outarg .namelen + 1 );
@@ -2275,6 +2388,9 @@ void fuse_abort_conn(struct fuse_conn *fc)
2275
2388
LIST_HEAD (to_end );
2276
2389
unsigned int i ;
2277
2390
2391
+ if (fc -> timeout .req_timeout )
2392
+ cancel_delayed_work (& fc -> timeout .work );
2393
+
2278
2394
/* Background queuing checks fc->connected under bg_lock */
2279
2395
spin_lock (& fc -> bg_lock );
2280
2396
fc -> connected = 0 ;
0 commit comments