Skip to content

Commit 07f910f

Browse files
Matthew Wilcox (Oracle)tehcaster
authored andcommitted
mm: Remove slab from struct page
All members of struct slab can now be removed from struct page. This shrinks the definition of struct page by 30 LOC, making it easier to understand. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 9cc960a commit 07f910f

File tree

3 files changed

+0
-71
lines changed

3 files changed

+0
-71
lines changed

include/linux/mm_types.h

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -118,31 +118,6 @@ struct page {
118118
atomic_long_t pp_frag_count;
119119
};
120120
};
121-
struct { /* slab, slob and slub */
122-
union {
123-
struct list_head slab_list;
124-
struct { /* Partial pages */
125-
struct page *next;
126-
#ifdef CONFIG_64BIT
127-
int pages; /* Nr of pages left */
128-
#else
129-
short int pages;
130-
#endif
131-
};
132-
};
133-
struct kmem_cache *slab_cache; /* not slob */
134-
/* Double-word boundary */
135-
void *freelist; /* first free object */
136-
union {
137-
void *s_mem; /* slab: first object */
138-
unsigned long counters; /* SLUB */
139-
struct { /* SLUB */
140-
unsigned inuse:16;
141-
unsigned objects:15;
142-
unsigned frozen:1;
143-
};
144-
};
145-
};
146121
struct { /* Tail pages of compound page */
147122
unsigned long compound_head; /* Bit zero is set */
148123

@@ -206,9 +181,6 @@ struct page {
206181
* which are currently stored here.
207182
*/
208183
unsigned int page_type;
209-
210-
unsigned int active; /* SLAB */
211-
int units; /* SLOB */
212184
};
213185

214186
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */

include/linux/page-flags.h

Lines changed: 0 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -909,43 +909,6 @@ extern bool is_free_buddy_page(struct page *page);
909909

910910
__PAGEFLAG(Isolated, isolated, PF_ANY);
911911

912-
/*
913-
* If network-based swap is enabled, sl*b must keep track of whether pages
914-
* were allocated from pfmemalloc reserves.
915-
*/
916-
static inline int PageSlabPfmemalloc(struct page *page)
917-
{
918-
VM_BUG_ON_PAGE(!PageSlab(page), page);
919-
return PageActive(page);
920-
}
921-
922-
/*
923-
* A version of PageSlabPfmemalloc() for opportunistic checks where the page
924-
* might have been freed under us and not be a PageSlab anymore.
925-
*/
926-
static inline int __PageSlabPfmemalloc(struct page *page)
927-
{
928-
return PageActive(page);
929-
}
930-
931-
static inline void SetPageSlabPfmemalloc(struct page *page)
932-
{
933-
VM_BUG_ON_PAGE(!PageSlab(page), page);
934-
SetPageActive(page);
935-
}
936-
937-
static inline void __ClearPageSlabPfmemalloc(struct page *page)
938-
{
939-
VM_BUG_ON_PAGE(!PageSlab(page), page);
940-
__ClearPageActive(page);
941-
}
942-
943-
static inline void ClearPageSlabPfmemalloc(struct page *page)
944-
{
945-
VM_BUG_ON_PAGE(!PageSlab(page), page);
946-
ClearPageActive(page);
947-
}
948-
949912
#ifdef CONFIG_MMU
950913
#define __PG_MLOCKED (1UL << PG_mlocked)
951914
#else

mm/slab.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -67,14 +67,8 @@ struct slab {
6767
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
6868
SLAB_MATCH(flags, __page_flags);
6969
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
70-
SLAB_MATCH(slab_list, slab_list);
7170
#ifndef CONFIG_SLOB
7271
SLAB_MATCH(rcu_head, rcu_head);
73-
SLAB_MATCH(slab_cache, slab_cache);
74-
#endif
75-
#ifdef CONFIG_SLAB
76-
SLAB_MATCH(s_mem, s_mem);
77-
SLAB_MATCH(active, active);
7872
#endif
7973
SLAB_MATCH(_refcount, __page_refcount);
8074
#ifdef CONFIG_MEMCG

0 commit comments

Comments
 (0)