Skip to content

Commit fd37721

Browse files
kirylakpm00
authored andcommitted
mm, treewide: introduce NR_PAGE_ORDERS
NR_PAGE_ORDERS defines the number of page orders supported by the page allocator, ranging from 0 to MAX_ORDER, MAX_ORDER + 1 in total. NR_PAGE_ORDERS assists in defining arrays of page orders and allows for more natural iteration over them. [kirill.shutemov@linux.intel.com: fixup for kerneldoc warning] Link: https://lkml.kernel.org/r/20240101111512.7empzyifq7kxtzk3@box Link: https://lkml.kernel.org/r/20231228144704.14033-1-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent a5b7620 commit fd37721

File tree

15 files changed

+42
-41
lines changed

15 files changed

+42
-41
lines changed

Documentation/admin-guide/kdump/vmcoreinfo.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ variables.
172172
Offset of the free_list's member. This value is used to compute the number
173173
of free pages.
174174

175-
Each zone has a free_area structure array called free_area[MAX_ORDER + 1].
175+
Each zone has a free_area structure array called free_area[NR_PAGE_ORDERS].
176176
The free_list represents a linked list of free page blocks.
177177

178178
(list_head, next|prev)
@@ -189,8 +189,8 @@ Offsets of the vmap_area's members. They carry vmalloc-specific
189189
information. Makedumpfile gets the start address of the vmalloc region
190190
from this.
191191

192-
(zone.free_area, MAX_ORDER + 1)
193-
-------------------------------
192+
(zone.free_area, NR_PAGE_ORDERS)
193+
--------------------------------
194194

195195
Free areas descriptor. User-space tools use this value to iterate the
196196
free_area ranges. MAX_ORDER is used by the zone buddy allocator.

arch/arm64/kvm/hyp/include/nvhe/gfp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ struct hyp_pool {
1616
* API at EL2.
1717
*/
1818
hyp_spinlock_t lock;
19-
struct list_head free_area[MAX_ORDER + 1];
19+
struct list_head free_area[NR_PAGE_ORDERS];
2020
phys_addr_t range_start;
2121
phys_addr_t range_end;
2222
unsigned short max_order;

arch/sparc/kernel/traps_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -897,7 +897,7 @@ void __init cheetah_ecache_flush_init(void)
897897

898898
/* Now allocate error trap reporting scoreboard. */
899899
sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
900-
for (order = 0; order <= MAX_ORDER; order++) {
900+
for (order = 0; order < NR_PAGE_ORDERS; order++) {
901901
if ((PAGE_SIZE << order) >= sz)
902902
break;
903903
}

drivers/gpu/drm/ttm/tests/ttm_device_test.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)
175175

176176
if (params->pools_init_expected) {
177177
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
178-
for (int j = 0; j <= MAX_ORDER; ++j) {
178+
for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
179179
pt = pool->caching[i].orders[j];
180180
KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
181181
KUNIT_EXPECT_EQ(test, pt.caching, i);

drivers/gpu/drm/ttm/ttm_pool.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
6565

6666
static atomic_long_t allocated_pages;
6767

68-
static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
69-
static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
68+
static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
69+
static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
7070

71-
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
72-
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
71+
static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
72+
static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
7373

7474
static spinlock_t shrinker_lock;
7575
static struct list_head shrinker_list;
@@ -568,7 +568,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
568568

569569
if (use_dma_alloc || nid != NUMA_NO_NODE) {
570570
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
571-
for (j = 0; j <= MAX_ORDER; ++j)
571+
for (j = 0; j < NR_PAGE_ORDERS; ++j)
572572
ttm_pool_type_init(&pool->caching[i].orders[j],
573573
pool, i, j);
574574
}
@@ -601,7 +601,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
601601

602602
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
603603
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
604-
for (j = 0; j <= MAX_ORDER; ++j)
604+
for (j = 0; j < NR_PAGE_ORDERS; ++j)
605605
ttm_pool_type_fini(&pool->caching[i].orders[j]);
606606
}
607607

@@ -656,7 +656,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
656656
unsigned int i;
657657

658658
seq_puts(m, "\t ");
659-
for (i = 0; i <= MAX_ORDER; ++i)
659+
for (i = 0; i < NR_PAGE_ORDERS; ++i)
660660
seq_printf(m, " ---%2u---", i);
661661
seq_puts(m, "\n");
662662
}
@@ -667,7 +667,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
667667
{
668668
unsigned int i;
669669

670-
for (i = 0; i <= MAX_ORDER; ++i)
670+
for (i = 0; i < NR_PAGE_ORDERS; ++i)
671671
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
672672
seq_puts(m, "\n");
673673
}
@@ -776,7 +776,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
776776
spin_lock_init(&shrinker_lock);
777777
INIT_LIST_HEAD(&shrinker_list);
778778

779-
for (i = 0; i <= MAX_ORDER; ++i) {
779+
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
780780
ttm_pool_type_init(&global_write_combined[i], NULL,
781781
ttm_write_combined, i);
782782
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -816,7 +816,7 @@ void ttm_pool_mgr_fini(void)
816816
{
817817
unsigned int i;
818818

819-
for (i = 0; i <= MAX_ORDER; ++i) {
819+
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
820820
ttm_pool_type_fini(&global_write_combined[i]);
821821
ttm_pool_type_fini(&global_uncached[i]);
822822

include/drm/ttm/ttm_pool.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ struct ttm_pool {
7474
bool use_dma32;
7575

7676
struct {
77-
struct ttm_pool_type orders[MAX_ORDER + 1];
77+
struct ttm_pool_type orders[NR_PAGE_ORDERS];
7878
} caching[TTM_NUM_CACHING_TYPES];
7979
};
8080

include/linux/mmzone.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@
3535

3636
#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
3737

38+
#define NR_PAGE_ORDERS (MAX_ORDER + 1)
39+
3840
/*
3941
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
4042
* costly to service. That is between allocation orders which should
@@ -96,7 +98,7 @@ static inline bool migratetype_is_mergeable(int mt)
9698
}
9799

98100
#define for_each_migratetype_order(order, type) \
99-
for (order = 0; order <= MAX_ORDER; order++) \
101+
for (order = 0; order < NR_PAGE_ORDERS; order++) \
100102
for (type = 0; type < MIGRATE_TYPES; type++)
101103

102104
extern int page_group_by_mobility_disabled;
@@ -933,7 +935,7 @@ struct zone {
933935
CACHELINE_PADDING(_pad1_);
934936

935937
/* free areas of different sizes */
936-
struct free_area free_area[MAX_ORDER + 1];
938+
struct free_area free_area[NR_PAGE_ORDERS];
937939

938940
#ifdef CONFIG_UNACCEPTED_MEMORY
939941
/* Pages to be accepted. All pages on the list are MAX_ORDER */

kernel/crash_core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -802,7 +802,7 @@ static int __init crash_save_vmcoreinfo_init(void)
802802
VMCOREINFO_OFFSET(list_head, prev);
803803
VMCOREINFO_OFFSET(vmap_area, va_start);
804804
VMCOREINFO_OFFSET(vmap_area, list);
805-
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER + 1);
805+
VMCOREINFO_LENGTH(zone.free_area, NR_PAGE_ORDERS);
806806
log_buf_vmcoreinfo_setup();
807807
VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
808808
VMCOREINFO_NUMBER(NR_FREE_PAGES);

lib/test_meminit.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
9393
int failures = 0, num_tests = 0;
9494
int i;
9595

96-
for (i = 0; i <= MAX_ORDER; i++)
96+
for (i = 0; i < NR_PAGE_ORDERS; i++)
9797
num_tests += do_alloc_pages_order(i, &failures);
9898

9999
REPORT_FAILURES_IN_FN();

mm/compaction.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2229,7 +2229,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
22292229

22302230
/* Direct compactor: Is a suitable page free? */
22312231
ret = COMPACT_NO_SUITABLE_PAGE;
2232-
for (order = cc->order; order <= MAX_ORDER; order++) {
2232+
for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
22332233
struct free_area *area = &cc->zone->free_area[order];
22342234
bool can_steal;
22352235

0 commit comments

Comments
 (0)