Skip to content

Commit 01d3722

Browse files
author
Alexei Starovoitov
committed
memcg: Use trylock to access memcg stock_lock.
Teach memcg to operate under trylock conditions when spinning locks cannot be used. localtry_trylock might fail and this would lead to charge cache bypass if the calling context doesn't allow spinning (gfpflags_allow_spinning). In those cases charge the memcg counter directly and fail early if that is not possible. This might cause a pre-mature charge failing but it will allow an opportunistic charging that is safe from try_alloc_pages path. Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/r/20250222024427.30294-5-alexei.starovoitov@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent 8c57b68 commit 01d3722

File tree

1 file changed

+37
-16
lines changed

1 file changed

+37
-16
lines changed

mm/memcontrol.c

Lines changed: 37 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1739,7 +1739,7 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
17391739
}
17401740

17411741
struct memcg_stock_pcp {
1742-
local_lock_t stock_lock;
1742+
localtry_lock_t stock_lock;
17431743
struct mem_cgroup *cached; /* this never be root cgroup */
17441744
unsigned int nr_pages;
17451745

@@ -1754,7 +1754,7 @@ struct memcg_stock_pcp {
17541754
#define FLUSHING_CACHED_CHARGE 0
17551755
};
17561756
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
1757-
.stock_lock = INIT_LOCAL_LOCK(stock_lock),
1757+
.stock_lock = INIT_LOCALTRY_LOCK(stock_lock),
17581758
};
17591759
static DEFINE_MUTEX(percpu_charge_mutex);
17601760

@@ -1766,14 +1766,16 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
17661766
* consume_stock: Try to consume stocked charge on this cpu.
17671767
* @memcg: memcg to consume from.
17681768
* @nr_pages: how many pages to charge.
1769+
* @gfp_mask: allocation mask.
17691770
*
17701771
* The charges will only happen if @memcg matches the current cpu's memcg
17711772
* stock, and at least @nr_pages are available in that stock. Failure to
17721773
* service an allocation will refill the stock.
17731774
*
17741775
* returns true if successful, false otherwise.
17751776
*/
1776-
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1777+
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
1778+
gfp_t gfp_mask)
17771779
{
17781780
struct memcg_stock_pcp *stock;
17791781
unsigned int stock_pages;
@@ -1783,7 +1785,11 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
17831785
if (nr_pages > MEMCG_CHARGE_BATCH)
17841786
return ret;
17851787

1786-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
1788+
if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
1789+
if (!gfpflags_allow_spinning(gfp_mask))
1790+
return ret;
1791+
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
1792+
}
17871793

17881794
stock = this_cpu_ptr(&memcg_stock);
17891795
stock_pages = READ_ONCE(stock->nr_pages);
@@ -1792,7 +1798,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
17921798
ret = true;
17931799
}
17941800

1795-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1801+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
17961802

17971803
return ret;
17981804
}
@@ -1831,14 +1837,14 @@ static void drain_local_stock(struct work_struct *dummy)
18311837
* drain_stock races is that we always operate on local CPU stock
18321838
* here with IRQ disabled
18331839
*/
1834-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
1840+
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
18351841

18361842
stock = this_cpu_ptr(&memcg_stock);
18371843
old = drain_obj_stock(stock);
18381844
drain_stock(stock);
18391845
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
18401846

1841-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1847+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
18421848
obj_cgroup_put(old);
18431849
}
18441850

@@ -1868,9 +1874,20 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
18681874
{
18691875
unsigned long flags;
18701876

1871-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
1877+
if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
1878+
/*
1879+
* In case of unlikely failure to lock percpu stock_lock
1880+
* uncharge memcg directly.
1881+
*/
1882+
if (mem_cgroup_is_root(memcg))
1883+
return;
1884+
page_counter_uncharge(&memcg->memory, nr_pages);
1885+
if (do_memsw_account())
1886+
page_counter_uncharge(&memcg->memsw, nr_pages);
1887+
return;
1888+
}
18721889
__refill_stock(memcg, nr_pages);
1873-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1890+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
18741891
}
18751892

18761893
/*
@@ -2213,9 +2230,13 @@ int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
22132230
unsigned long pflags;
22142231

22152232
retry:
2216-
if (consume_stock(memcg, nr_pages))
2233+
if (consume_stock(memcg, nr_pages, gfp_mask))
22172234
return 0;
22182235

2236+
if (!gfpflags_allow_spinning(gfp_mask))
2237+
/* Avoid the refill and flush of the older stock */
2238+
batch = nr_pages;
2239+
22192240
if (!do_memsw_account() ||
22202241
page_counter_try_charge(&memcg->memsw, batch, &counter)) {
22212242
if (page_counter_try_charge(&memcg->memory, batch, &counter))
@@ -2699,7 +2720,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
26992720
unsigned long flags;
27002721
int *bytes;
27012722

2702-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
2723+
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
27032724
stock = this_cpu_ptr(&memcg_stock);
27042725

27052726
/*
@@ -2752,7 +2773,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
27522773
if (nr)
27532774
__mod_objcg_mlstate(objcg, pgdat, idx, nr);
27542775

2755-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2776+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
27562777
obj_cgroup_put(old);
27572778
}
27582779

@@ -2762,15 +2783,15 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
27622783
unsigned long flags;
27632784
bool ret = false;
27642785

2765-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
2786+
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
27662787

27672788
stock = this_cpu_ptr(&memcg_stock);
27682789
if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
27692790
stock->nr_bytes -= nr_bytes;
27702791
ret = true;
27712792
}
27722793

2773-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2794+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
27742795

27752796
return ret;
27762797
}
@@ -2862,7 +2883,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
28622883
unsigned long flags;
28632884
unsigned int nr_pages = 0;
28642885

2865-
local_lock_irqsave(&memcg_stock.stock_lock, flags);
2886+
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
28662887

28672888
stock = this_cpu_ptr(&memcg_stock);
28682889
if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
@@ -2880,7 +2901,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
28802901
stock->nr_bytes &= (PAGE_SIZE - 1);
28812902
}
28822903

2883-
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2904+
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
28842905
obj_cgroup_put(old);
28852906

28862907
if (nr_pages)

0 commit comments

Comments
 (0)