Skip to content

Commit 7abb3e4

Browse files
committed
Merge branch 'for-next/mm' into for-next/core
* for-next/mm: arm64: fix build warning for ARM64_MEMSTART_SHIFT arm64: Remove unsued extern declaration init_mem_pgprot() arm64/mm: Set only the PTE_DIRTY bit while preserving the HW dirty state arm64/mm: Add pte_rdonly() helper arm64/mm: Directly use ID_AA64MMFR2_EL1_VARange_MASK arm64/mm: Replace an open coding with ID_AA64MMFR1_EL1_HAFDBS_MASK
2 parents 438ddc3 + 4e0bacd commit 7abb3e4

File tree

6 files changed

+35
-34
lines changed

6 files changed

+35
-34
lines changed

arch/arm64/include/asm/kernel-pgtable.h

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -118,31 +118,4 @@
118118
#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY)
119119
#endif
120120

121-
/*
122-
* To make optimal use of block mappings when laying out the linear
123-
* mapping, round down the base of physical memory to a size that can
124-
* be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
125-
* (64k granule), or a multiple that can be mapped using contiguous bits
126-
* in the page tables: 32 * PMD_SIZE (16k granule)
127-
*/
128-
#if defined(CONFIG_ARM64_4K_PAGES)
129-
#define ARM64_MEMSTART_SHIFT PUD_SHIFT
130-
#elif defined(CONFIG_ARM64_16K_PAGES)
131-
#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
132-
#else
133-
#define ARM64_MEMSTART_SHIFT PMD_SHIFT
134-
#endif
135-
136-
/*
137-
* sparsemem vmemmap imposes an additional requirement on the alignment of
138-
* memstart_addr, due to the fact that the base of the vmemmap region
139-
* has a direct correspondence, and needs to appear sufficiently aligned
140-
* in the virtual address space.
141-
*/
142-
#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
143-
#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
144-
#else
145-
#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
146-
#endif
147-
148121
#endif /* __ASM_KERNEL_PGTABLE_H */

arch/arm64/include/asm/mmu.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,6 @@ extern void arm64_memblock_init(void);
6464
extern void paging_init(void);
6565
extern void bootmem_init(void);
6666
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
67-
extern void init_mem_pgprot(void);
6867
extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
6968
phys_addr_t size, pgprot_t prot);
7069
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,

arch/arm64/include/asm/pgtable.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
103103
#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
104104
#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
105105
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
106+
#define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY))
106107
#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
107108
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
108109
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
@@ -120,7 +121,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
120121
(__boundary - 1 < (end) - 1) ? __boundary : (end); \
121122
})
122123

123-
#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
124+
#define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte))
124125
#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
125126
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
126127

@@ -212,7 +213,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
212213
* clear), set the PTE_DIRTY bit.
213214
*/
214215
if (pte_hw_dirty(pte))
215-
pte = pte_mkdirty(pte);
216+
pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
216217

217218
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
218219
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
@@ -823,7 +824,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
823824
PTE_ATTRINDX_MASK;
824825
/* preserve the hardware dirty information */
825826
if (pte_hw_dirty(pte))
826-
pte = pte_mkdirty(pte);
827+
pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
828+
827829
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
828830
return pte;
829831
}

arch/arm64/kernel/head.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ SYM_CODE_START(primary_entry)
113113
*/
114114
#if VA_BITS > 48
115115
mrs_s x0, SYS_ID_AA64MMFR2_EL1
116-
tst x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT
116+
tst x0, ID_AA64MMFR2_EL1_VARange_MASK
117117
mov x0, #VA_BITS
118118
mov x25, #VA_BITS_MIN
119119
csel x25, x25, x0, eq
@@ -756,7 +756,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva)
756756
b.ne 2f
757757

758758
mrs_s x0, SYS_ID_AA64MMFR2_EL1
759-
and x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
759+
and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK
760760
cbnz x0, 2f
761761

762762
update_early_cpu_boot_status \

arch/arm64/mm/init.c

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,33 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
7373

7474
#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
7575

76+
/*
77+
* To make optimal use of block mappings when laying out the linear
78+
* mapping, round down the base of physical memory to a size that can
79+
* be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
80+
* (64k granule), or a multiple that can be mapped using contiguous bits
81+
* in the page tables: 32 * PMD_SIZE (16k granule)
82+
*/
83+
#if defined(CONFIG_ARM64_4K_PAGES)
84+
#define ARM64_MEMSTART_SHIFT PUD_SHIFT
85+
#elif defined(CONFIG_ARM64_16K_PAGES)
86+
#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
87+
#else
88+
#define ARM64_MEMSTART_SHIFT PMD_SHIFT
89+
#endif
90+
91+
/*
92+
* sparsemem vmemmap imposes an additional requirement on the alignment of
93+
* memstart_addr, due to the fact that the base of the vmemmap region
94+
* has a direct correspondence, and needs to appear sufficiently aligned
95+
* in the virtual address space.
96+
*/
97+
#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
98+
#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
99+
#else
100+
#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
101+
#endif
102+
76103
static int __init reserve_crashkernel_low(unsigned long long low_size)
77104
{
78105
unsigned long long low_base;

arch/arm64/mm/proc.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ SYM_FUNC_START(__cpu_setup)
447447
* via capabilities.
448448
*/
449449
mrs x9, ID_AA64MMFR1_EL1
450-
and x9, x9, #0xf
450+
and x9, x9, ID_AA64MMFR1_EL1_HAFDBS_MASK
451451
cbz x9, 1f
452452
orr tcr, tcr, #TCR_HA // hardware Access flag update
453453
1:

0 commit comments

Comments
 (0)