@@ -1739,7 +1739,7 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1739
1739
}
1740
1740
1741
1741
struct memcg_stock_pcp {
1742
- local_lock_t stock_lock ;
1742
+ localtry_lock_t stock_lock ;
1743
1743
struct mem_cgroup * cached ; /* this never be root cgroup */
1744
1744
unsigned int nr_pages ;
1745
1745
@@ -1754,7 +1754,7 @@ struct memcg_stock_pcp {
1754
1754
#define FLUSHING_CACHED_CHARGE 0
1755
1755
};
1756
1756
static DEFINE_PER_CPU (struct memcg_stock_pcp , memcg_stock ) = {
1757
- .stock_lock = INIT_LOCAL_LOCK (stock_lock ),
1757
+ .stock_lock = INIT_LOCALTRY_LOCK (stock_lock ),
1758
1758
};
1759
1759
static DEFINE_MUTEX (percpu_charge_mutex );
1760
1760
@@ -1766,14 +1766,16 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
1766
1766
* consume_stock: Try to consume stocked charge on this cpu.
1767
1767
* @memcg: memcg to consume from.
1768
1768
* @nr_pages: how many pages to charge.
1769
+ * @gfp_mask: allocation mask.
1769
1770
*
1770
1771
* The charges will only happen if @memcg matches the current cpu's memcg
1771
1772
* stock, and at least @nr_pages are available in that stock. Failure to
1772
1773
* service an allocation will refill the stock.
1773
1774
*
1774
1775
* returns true if successful, false otherwise.
1775
1776
*/
1776
- static bool consume_stock (struct mem_cgroup * memcg , unsigned int nr_pages )
1777
+ static bool consume_stock (struct mem_cgroup * memcg , unsigned int nr_pages ,
1778
+ gfp_t gfp_mask )
1777
1779
{
1778
1780
struct memcg_stock_pcp * stock ;
1779
1781
unsigned int stock_pages ;
@@ -1783,7 +1785,11 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1783
1785
if (nr_pages > MEMCG_CHARGE_BATCH )
1784
1786
return ret ;
1785
1787
1786
- local_lock_irqsave (& memcg_stock .stock_lock , flags );
1788
+ if (!localtry_trylock_irqsave (& memcg_stock .stock_lock , flags )) {
1789
+ if (!gfpflags_allow_spinning (gfp_mask ))
1790
+ return ret ;
1791
+ localtry_lock_irqsave (& memcg_stock .stock_lock , flags );
1792
+ }
1787
1793
1788
1794
stock = this_cpu_ptr (& memcg_stock );
1789
1795
stock_pages = READ_ONCE (stock -> nr_pages );
@@ -1792,7 +1798,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1792
1798
ret = true;
1793
1799
}
1794
1800
1795
- local_unlock_irqrestore (& memcg_stock .stock_lock , flags );
1801
+ localtry_unlock_irqrestore (& memcg_stock .stock_lock , flags );
1796
1802
1797
1803
return ret ;
1798
1804
}
@@ -1831,14 +1837,14 @@ static void drain_local_stock(struct work_struct *dummy)
1831
1837
* drain_stock races is that we always operate on local CPU stock
1832
1838
* here with IRQ disabled
1833
1839
*/
1834
- local_lock_irqsave (& memcg_stock .stock_lock , flags );
1840
+ localtry_lock_irqsave (& memcg_stock .stock_lock , flags );
1835
1841
1836
1842
stock = this_cpu_ptr (& memcg_stock );
1837
1843
old = drain_obj_stock (stock );
1838
1844
drain_stock (stock );
1839
1845
clear_bit (FLUSHING_CACHED_CHARGE , & stock -> flags );
1840
1846
1841
- local_unlock_irqrestore (& memcg_stock .stock_lock , flags );
1847
+ localtry_unlock_irqrestore (& memcg_stock .stock_lock , flags );
1842
1848
obj_cgroup_put (old );
1843
1849
}
1844
1850
@@ -1868,9 +1874,20 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1868
1874
{
1869
1875
unsigned long flags ;
1870
1876
1871
- local_lock_irqsave (& memcg_stock .stock_lock , flags );
1877
+ if (!localtry_trylock_irqsave (& memcg_stock .stock_lock , flags )) {
1878
+ /*
1879
+ * In case of unlikely failure to lock percpu stock_lock
1880
+ * uncharge memcg directly.
1881
+ */
1882
+ if (mem_cgroup_is_root (memcg ))
1883
+ return ;
1884
+ page_counter_uncharge (& memcg -> memory , nr_pages );
1885
+ if (do_memsw_account ())
1886
+ page_counter_uncharge (& memcg -> memsw , nr_pages );
1887
+ return ;
1888
+ }
1872
1889
__refill_stock (memcg , nr_pages );
1873
- local_unlock_irqrestore (& memcg_stock .stock_lock , flags );
1890
+ localtry_unlock_irqrestore (& memcg_stock .stock_lock , flags );
1874
1891
}
1875
1892
1876
1893
/*
@@ -2213,9 +2230,13 @@ int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2213
2230
unsigned long pflags ;
2214
2231
2215
2232
retry :
2216
- if (consume_stock (memcg , nr_pages ))
2233
+ if (consume_stock (memcg , nr_pages , gfp_mask ))
2217
2234
return 0 ;
2218
2235
2236
+ if (!gfpflags_allow_spinning (gfp_mask ))
2237
+ /* Avoid the refill and flush of the older stock */
2238
+ batch = nr_pages ;
2239
+
2219
2240
if (!do_memsw_account () ||
2220
2241
page_counter_try_charge (& memcg -> memsw , batch , & counter )) {
2221
2242
if (page_counter_try_charge (& memcg -> memory , batch , & counter ))
@@ -2699,7 +2720,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
2699
2720
unsigned long flags ;
2700
2721
int * bytes ;
2701
2722
2702
- local_lock_irqsave (& memcg_stock .stock_lock , flags );
2723
+ localtry_lock_irqsave (& memcg_stock .stock_lock , flags );
2703
2724
stock = this_cpu_ptr (& memcg_stock );
2704
2725
2705
2726
/*
@@ -2752,7 +2773,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
2752
2773
if (nr )
2753
2774
__mod_objcg_mlstate (objcg , pgdat , idx , nr );
2754
2775
2755
- local_unlock_irqrestore (& memcg_stock .stock_lock , flags );
2776
+ localtry_unlock_irqrestore (& memcg_stock .stock_lock , flags );
2756
2777
obj_cgroup_put (old );
2757
2778
}
2758
2779
@@ -2762,15 +2783,15 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
2762
2783
unsigned long flags ;
2763
2784
bool ret = false;
2764
2785
2765
- local_lock_irqsave (& memcg_stock .stock_lock , flags );
2786
+ localtry_lock_irqsave (& memcg_stock .stock_lock , flags );
2766
2787
2767
2788
stock = this_cpu_ptr (& memcg_stock );
2768
2789
if (objcg == READ_ONCE (stock -> cached_objcg ) && stock -> nr_bytes >= nr_bytes ) {
2769
2790
stock -> nr_bytes -= nr_bytes ;
2770
2791
ret = true;
2771
2792
}
2772
2793
2773
- local_unlock_irqrestore (& memcg_stock .stock_lock , flags );
2794
+ localtry_unlock_irqrestore (& memcg_stock .stock_lock , flags );
2774
2795
2775
2796
return ret ;
2776
2797
}
@@ -2862,7 +2883,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2862
2883
unsigned long flags ;
2863
2884
unsigned int nr_pages = 0 ;
2864
2885
2865
- local_lock_irqsave (& memcg_stock .stock_lock , flags );
2886
+ localtry_lock_irqsave (& memcg_stock .stock_lock , flags );
2866
2887
2867
2888
stock = this_cpu_ptr (& memcg_stock );
2868
2889
if (READ_ONCE (stock -> cached_objcg ) != objcg ) { /* reset if necessary */
@@ -2880,7 +2901,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2880
2901
stock -> nr_bytes &= (PAGE_SIZE - 1 );
2881
2902
}
2882
2903
2883
- local_unlock_irqrestore (& memcg_stock .stock_lock , flags );
2904
+ localtry_unlock_irqrestore (& memcg_stock .stock_lock , flags );
2884
2905
obj_cgroup_put (old );
2885
2906
2886
2907
if (nr_pages )
0 commit comments