Skip to content

Commit 93353d6

Browse files
kirylSasha Levin
authored andcommitted
mm, treewide: introduce NR_PAGE_ORDERS
[ Upstream commit fd37721 ] NR_PAGE_ORDERS defines the number of page orders supported by the page allocator, ranging from 0 to MAX_ORDER, MAX_ORDER + 1 in total. NR_PAGE_ORDERS assists in defining arrays of page orders and allows for more natural iteration over them. [kirill.shutemov@linux.intel.com: fixup for kerneldoc warning] Link: https://lkml.kernel.org/r/20240101111512.7empzyifq7kxtzk3@box Link: https://lkml.kernel.org/r/20231228144704.14033-1-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Stable-dep-of: b6976f3 ("drm/ttm: stop pooling cached NUMA pages v2") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent f0da0c8 commit 93353d6

File tree

15 files changed

+42
-41
lines changed

15 files changed

+42
-41
lines changed

Documentation/admin-guide/kdump/vmcoreinfo.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ variables.
172172
Offset of the free_list's member. This value is used to compute the number
173173
of free pages.
174174

175-
Each zone has a free_area structure array called free_area[MAX_ORDER + 1].
175+
Each zone has a free_area structure array called free_area[NR_PAGE_ORDERS].
176176
The free_list represents a linked list of free page blocks.
177177

178178
(list_head, next|prev)
@@ -189,8 +189,8 @@ Offsets of the vmap_area's members. They carry vmalloc-specific
189189
information. Makedumpfile gets the start address of the vmalloc region
190190
from this.
191191

192-
(zone.free_area, MAX_ORDER + 1)
193-
-------------------------------
192+
(zone.free_area, NR_PAGE_ORDERS)
193+
--------------------------------
194194

195195
Free areas descriptor. User-space tools use this value to iterate the
196196
free_area ranges. MAX_ORDER is used by the zone buddy allocator.

arch/arm64/kvm/hyp/include/nvhe/gfp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ struct hyp_pool {
1616
* API at EL2.
1717
*/
1818
hyp_spinlock_t lock;
19-
struct list_head free_area[MAX_ORDER + 1];
19+
struct list_head free_area[NR_PAGE_ORDERS];
2020
phys_addr_t range_start;
2121
phys_addr_t range_end;
2222
unsigned short max_order;

arch/sparc/kernel/traps_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -897,7 +897,7 @@ void __init cheetah_ecache_flush_init(void)
897897

898898
/* Now allocate error trap reporting scoreboard. */
899899
sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
900-
for (order = 0; order <= MAX_ORDER; order++) {
900+
for (order = 0; order < NR_PAGE_ORDERS; order++) {
901901
if ((PAGE_SIZE << order) >= sz)
902902
break;
903903
}

drivers/gpu/drm/ttm/tests/ttm_device_test.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)
175175

176176
if (params->pools_init_expected) {
177177
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
178-
for (int j = 0; j <= MAX_ORDER; ++j) {
178+
for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
179179
pt = pool->caching[i].orders[j];
180180
KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
181181
KUNIT_EXPECT_EQ(test, pt.caching, i);

drivers/gpu/drm/ttm/ttm_pool.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
6565

6666
static atomic_long_t allocated_pages;
6767

68-
static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
69-
static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
68+
static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
69+
static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
7070

71-
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
72-
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
71+
static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
72+
static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
7373

7474
static spinlock_t shrinker_lock;
7575
static struct list_head shrinker_list;
@@ -565,7 +565,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
565565

566566
if (use_dma_alloc || nid != NUMA_NO_NODE) {
567567
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
568-
for (j = 0; j <= MAX_ORDER; ++j)
568+
for (j = 0; j < NR_PAGE_ORDERS; ++j)
569569
ttm_pool_type_init(&pool->caching[i].orders[j],
570570
pool, i, j);
571571
}
@@ -586,7 +586,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
586586

587587
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
588588
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
589-
for (j = 0; j <= MAX_ORDER; ++j)
589+
for (j = 0; j < NR_PAGE_ORDERS; ++j)
590590
ttm_pool_type_fini(&pool->caching[i].orders[j]);
591591
}
592592

@@ -641,7 +641,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
641641
unsigned int i;
642642

643643
seq_puts(m, "\t ");
644-
for (i = 0; i <= MAX_ORDER; ++i)
644+
for (i = 0; i < NR_PAGE_ORDERS; ++i)
645645
seq_printf(m, " ---%2u---", i);
646646
seq_puts(m, "\n");
647647
}
@@ -652,7 +652,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
652652
{
653653
unsigned int i;
654654

655-
for (i = 0; i <= MAX_ORDER; ++i)
655+
for (i = 0; i < NR_PAGE_ORDERS; ++i)
656656
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
657657
seq_puts(m, "\n");
658658
}
@@ -761,7 +761,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
761761
spin_lock_init(&shrinker_lock);
762762
INIT_LIST_HEAD(&shrinker_list);
763763

764-
for (i = 0; i <= MAX_ORDER; ++i) {
764+
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
765765
ttm_pool_type_init(&global_write_combined[i], NULL,
766766
ttm_write_combined, i);
767767
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -794,7 +794,7 @@ void ttm_pool_mgr_fini(void)
794794
{
795795
unsigned int i;
796796

797-
for (i = 0; i <= MAX_ORDER; ++i) {
797+
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
798798
ttm_pool_type_fini(&global_write_combined[i]);
799799
ttm_pool_type_fini(&global_uncached[i]);
800800

include/drm/ttm/ttm_pool.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ struct ttm_pool {
7474
bool use_dma32;
7575

7676
struct {
77-
struct ttm_pool_type orders[MAX_ORDER + 1];
77+
struct ttm_pool_type orders[NR_PAGE_ORDERS];
7878
} caching[TTM_NUM_CACHING_TYPES];
7979
};
8080

include/linux/mmzone.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,8 @@
3434

3535
#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
3636

37+
#define NR_PAGE_ORDERS (MAX_ORDER + 1)
38+
3739
/*
3840
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
3941
* costly to service. That is between allocation orders which should
@@ -95,7 +97,7 @@ static inline bool migratetype_is_mergeable(int mt)
9597
}
9698

9799
#define for_each_migratetype_order(order, type) \
98-
for (order = 0; order <= MAX_ORDER; order++) \
100+
for (order = 0; order < NR_PAGE_ORDERS; order++) \
99101
for (type = 0; type < MIGRATE_TYPES; type++)
100102

101103
extern int page_group_by_mobility_disabled;
@@ -929,7 +931,7 @@ struct zone {
929931
CACHELINE_PADDING(_pad1_);
930932

931933
/* free areas of different sizes */
932-
struct free_area free_area[MAX_ORDER + 1];
934+
struct free_area free_area[NR_PAGE_ORDERS];
933935

934936
#ifdef CONFIG_UNACCEPTED_MEMORY
935937
/* Pages to be accepted. All pages on the list are MAX_ORDER */

kernel/crash_core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -660,7 +660,7 @@ static int __init crash_save_vmcoreinfo_init(void)
660660
VMCOREINFO_OFFSET(list_head, prev);
661661
VMCOREINFO_OFFSET(vmap_area, va_start);
662662
VMCOREINFO_OFFSET(vmap_area, list);
663-
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER + 1);
663+
VMCOREINFO_LENGTH(zone.free_area, NR_PAGE_ORDERS);
664664
log_buf_vmcoreinfo_setup();
665665
VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
666666
VMCOREINFO_NUMBER(NR_FREE_PAGES);

lib/test_meminit.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
9393
int failures = 0, num_tests = 0;
9494
int i;
9595

96-
for (i = 0; i <= MAX_ORDER; i++)
96+
for (i = 0; i < NR_PAGE_ORDERS; i++)
9797
num_tests += do_alloc_pages_order(i, &failures);
9898

9999
REPORT_FAILURES_IN_FN();

mm/compaction.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2225,7 +2225,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
22252225

22262226
/* Direct compactor: Is a suitable page free? */
22272227
ret = COMPACT_NO_SUITABLE_PAGE;
2228-
for (order = cc->order; order <= MAX_ORDER; order++) {
2228+
for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
22292229
struct free_area *area = &cc->zone->free_area[order];
22302230
bool can_steal;
22312231

0 commit comments

Comments
 (0)