@@ -1888,17 +1888,57 @@ unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1888
1888
return instruction_pointer (regs );
1889
1889
}
1890
1890
1891
- static void free_ret_instance (struct return_instance * ri , bool cleanup_hprobe )
1891
+ static void ri_pool_push (struct uprobe_task * utask , struct return_instance * ri )
1892
1892
{
1893
+ ri -> cons_cnt = 0 ;
1894
+ ri -> next = utask -> ri_pool ;
1895
+ utask -> ri_pool = ri ;
1896
+ }
1897
+
1898
+ static struct return_instance * ri_pool_pop (struct uprobe_task * utask )
1899
+ {
1900
+ struct return_instance * ri = utask -> ri_pool ;
1901
+
1902
+ if (likely (ri ))
1903
+ utask -> ri_pool = ri -> next ;
1904
+
1905
+ return ri ;
1906
+ }
1907
+
1908
+ static void ri_free (struct return_instance * ri )
1909
+ {
1910
+ kfree (ri -> extra_consumers );
1911
+ kfree_rcu (ri , rcu );
1912
+ }
1913
+
1914
+ static void free_ret_instance (struct uprobe_task * utask ,
1915
+ struct return_instance * ri , bool cleanup_hprobe )
1916
+ {
1917
+ unsigned seq ;
1918
+
1893
1919
if (cleanup_hprobe ) {
1894
1920
enum hprobe_state hstate ;
1895
1921
1896
1922
(void )hprobe_consume (& ri -> hprobe , & hstate );
1897
1923
hprobe_finalize (& ri -> hprobe , hstate );
1898
1924
}
1899
1925
1900
- kfree (ri -> extra_consumers );
1901
- kfree_rcu (ri , rcu );
1926
+ /*
1927
+ * At this point return_instance is unlinked from utask's
1928
+ * return_instances list and this has become visible to ri_timer().
1929
+ * If seqcount now indicates that ri_timer's return instance
1930
+ * processing loop isn't active, we can return ri into the pool of
1931
+ * to-be-reused return instances for future uretprobes. If ri_timer()
1932
+ * happens to be running right now, though, we fallback to safety and
1933
+ * just perform RCU-delated freeing of ri.
1934
+ */
1935
+ if (raw_seqcount_try_begin (& utask -> ri_seqcount , seq )) {
1936
+ /* immediate reuse of ri without RCU GP is OK */
1937
+ ri_pool_push (utask , ri );
1938
+ } else {
1939
+ /* we might be racing with ri_timer(), so play it safe */
1940
+ ri_free (ri );
1941
+ }
1902
1942
}
1903
1943
1904
1944
/*
@@ -1920,7 +1960,15 @@ void uprobe_free_utask(struct task_struct *t)
1920
1960
ri = utask -> return_instances ;
1921
1961
while (ri ) {
1922
1962
ri_next = ri -> next ;
1923
- free_ret_instance (ri , true /* cleanup_hprobe */ );
1963
+ free_ret_instance (utask , ri , true /* cleanup_hprobe */ );
1964
+ ri = ri_next ;
1965
+ }
1966
+
1967
+ /* free_ret_instance() above might add to ri_pool, so this loop should come last */
1968
+ ri = utask -> ri_pool ;
1969
+ while (ri ) {
1970
+ ri_next = ri -> next ;
1971
+ ri_free (ri );
1924
1972
ri = ri_next ;
1925
1973
}
1926
1974
@@ -1943,8 +1991,12 @@ static void ri_timer(struct timer_list *timer)
1943
1991
/* RCU protects return_instance from freeing. */
1944
1992
guard (rcu )();
1945
1993
1994
+ write_seqcount_begin (& utask -> ri_seqcount );
1995
+
1946
1996
for_each_ret_instance_rcu (ri , utask -> return_instances )
1947
1997
hprobe_expire (& ri -> hprobe , false);
1998
+
1999
+ write_seqcount_end (& utask -> ri_seqcount );
1948
2000
}
1949
2001
1950
2002
static struct uprobe_task * alloc_utask (void )
@@ -1956,6 +2008,7 @@ static struct uprobe_task *alloc_utask(void)
1956
2008
return NULL ;
1957
2009
1958
2010
timer_setup (& utask -> ri_timer , ri_timer , 0 );
2011
+ seqcount_init (& utask -> ri_seqcount );
1959
2012
1960
2013
return utask ;
1961
2014
}
@@ -1975,10 +2028,14 @@ static struct uprobe_task *get_utask(void)
1975
2028
return current -> utask ;
1976
2029
}
1977
2030
1978
- static struct return_instance * alloc_return_instance (void )
2031
+ static struct return_instance * alloc_return_instance (struct uprobe_task * utask )
1979
2032
{
1980
2033
struct return_instance * ri ;
1981
2034
2035
+ ri = ri_pool_pop (utask );
2036
+ if (ri )
2037
+ return ri ;
2038
+
1982
2039
ri = kzalloc (sizeof (* ri ), GFP_KERNEL );
1983
2040
if (!ri )
1984
2041
return ZERO_SIZE_PTR ;
@@ -2119,7 +2176,7 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
2119
2176
rcu_assign_pointer (utask -> return_instances , ri_next );
2120
2177
utask -> depth -- ;
2121
2178
2122
- free_ret_instance (ri , true /* cleanup_hprobe */ );
2179
+ free_ret_instance (utask , ri , true /* cleanup_hprobe */ );
2123
2180
ri = ri_next ;
2124
2181
}
2125
2182
}
@@ -2186,7 +2243,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
2186
2243
2187
2244
return ;
2188
2245
free :
2189
- kfree (ri );
2246
+ ri_free (ri );
2190
2247
}
2191
2248
2192
2249
/* Prepare to single-step probed instruction out of line. */
@@ -2385,8 +2442,7 @@ static struct return_instance *push_consumer(struct return_instance *ri, __u64 i
2385
2442
if (unlikely (ri -> cons_cnt > 0 )) {
2386
2443
ric = krealloc (ri -> extra_consumers , sizeof (* ric ) * ri -> cons_cnt , GFP_KERNEL );
2387
2444
if (!ric ) {
2388
- kfree (ri -> extra_consumers );
2389
- kfree_rcu (ri , rcu );
2445
+ ri_free (ri );
2390
2446
return ZERO_SIZE_PTR ;
2391
2447
}
2392
2448
ri -> extra_consumers = ric ;
@@ -2428,8 +2484,9 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2428
2484
struct uprobe_consumer * uc ;
2429
2485
bool has_consumers = false, remove = true;
2430
2486
struct return_instance * ri = NULL ;
2487
+ struct uprobe_task * utask = current -> utask ;
2431
2488
2432
- current -> utask -> auprobe = & uprobe -> arch ;
2489
+ utask -> auprobe = & uprobe -> arch ;
2433
2490
2434
2491
list_for_each_entry_rcu (uc , & uprobe -> consumers , cons_node , rcu_read_lock_trace_held ()) {
2435
2492
bool session = uc -> handler && uc -> ret_handler ;
@@ -2449,12 +2506,12 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2449
2506
continue ;
2450
2507
2451
2508
if (!ri )
2452
- ri = alloc_return_instance ();
2509
+ ri = alloc_return_instance (utask );
2453
2510
2454
2511
if (session )
2455
2512
ri = push_consumer (ri , uc -> id , cookie );
2456
2513
}
2457
- current -> utask -> auprobe = NULL ;
2514
+ utask -> auprobe = NULL ;
2458
2515
2459
2516
if (!ZERO_OR_NULL_PTR (ri ))
2460
2517
prepare_uretprobe (uprobe , regs , ri );
@@ -2554,7 +2611,7 @@ void uprobe_handle_trampoline(struct pt_regs *regs)
2554
2611
hprobe_finalize (& ri -> hprobe , hstate );
2555
2612
2556
2613
/* We already took care of hprobe, no need to waste more time on that. */
2557
- free_ret_instance (ri , false /* !cleanup_hprobe */ );
2614
+ free_ret_instance (utask , ri , false /* !cleanup_hprobe */ );
2558
2615
ri = ri_next ;
2559
2616
} while (ri != next_chain );
2560
2617
} while (!valid );
0 commit comments