Skip to content

Commit bb7dbaa

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: remove checks for pte_index
Since pte_index is always defined, we don't need to check whether it's defined or not. Delete the slow version that doesn't depend on it and remove the #define since nobody needs to test for it. Link: https://lkml.kernel.org/r/20230819031837.3160096-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Christian Dietrich <stettberger@dokucode.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 14a405c commit bb7dbaa

File tree

2 files changed

+1
-17
lines changed

2 files changed

+1
-17
lines changed

include/linux/pgtable.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@ static inline unsigned long pte_index(unsigned long address)
6363
{
6464
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
6565
}
66-
#define pte_index pte_index
6766

6867
#ifndef pmd_index
6968
static inline unsigned long pmd_index(unsigned long address)

mm/memory.c

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1869,7 +1869,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
18691869
return retval;
18701870
}
18711871

1872-
#ifdef pte_index
18731872
static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
18741873
unsigned long addr, struct page *page, pgprot_t prot)
18751874
{
@@ -1884,7 +1883,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
18841883
}
18851884

18861885
/* insert_pages() amortizes the cost of spinlock operations
1887-
* when inserting pages in a loop. Arch *must* define pte_index.
1886+
* when inserting pages in a loop.
18881887
*/
18891888
static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
18901889
struct page **pages, unsigned long *num, pgprot_t prot)
@@ -1943,7 +1942,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
19431942
*num = remaining_pages_total;
19441943
return ret;
19451944
}
1946-
#endif /* ifdef pte_index */
19471945

19481946
/**
19491947
* vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
@@ -1963,7 +1961,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
19631961
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
19641962
struct page **pages, unsigned long *num)
19651963
{
1966-
#ifdef pte_index
19671964
const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
19681965

19691966
if (addr < vma->vm_start || end_addr >= vma->vm_end)
@@ -1975,18 +1972,6 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
19751972
}
19761973
/* Defer page refcount checking till we're about to map that page. */
19771974
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1978-
#else
1979-
unsigned long idx = 0, pgcount = *num;
1980-
int err = -EINVAL;
1981-
1982-
for (; idx < pgcount; ++idx) {
1983-
err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1984-
if (err)
1985-
break;
1986-
}
1987-
*num = pgcount - idx;
1988-
return err;
1989-
#endif /* ifdef pte_index */
19901975
}
19911976
EXPORT_SYMBOL(vm_insert_pages);
19921977

0 commit comments

Comments
 (0)