Skip to content

Commit 1519018

Browse files
committed
Merge branches 'for-next/sve-remove-pseudo-regs', 'for-next/backtrace-ipi', 'for-next/kselftest', 'for-next/misc' and 'for-next/cpufeat-display-cores', remote-tracking branch 'arm64/for-next/perf' into for-next/core
* arm64/for-next/perf: perf: hisi: Fix use-after-free when register pmu fails drivers/perf: hisi_pcie: Initialize event->cpu only on success drivers/perf: hisi_pcie: Check the type first in pmu::event_init() perf/arm-cmn: Enable per-DTC counter allocation perf/arm-cmn: Rework DTC counters (again) perf/arm-cmn: Fix DTC domain detection drivers: perf: arm_pmuv3: Drop some unused arguments from armv8_pmu_init() drivers: perf: arm_pmuv3: Read PMMIR_EL1 unconditionally drivers/perf: hisi: use cpuhp_state_remove_instance_nocalls() for hisi_hns3_pmu uninit process drivers/perf: xgene: Use device_get_match_data() perf/amlogic: add missing MODULE_DEVICE_TABLE docs/perf: Add ampere_cspmu to toctree to fix a build warning perf: arm_cspmu: ampere_cspmu: Add support for Ampere SoC PMU perf: arm_cspmu: Support implementation specific validation perf: arm_cspmu: Support implementation specific filters perf: arm_cspmu: Split 64-bit write to 32-bit writes perf: arm_cspmu: Separate Arm and vendor module * for-next/sve-remove-pseudo-regs: : arm64/fpsimd: Remove the vector length pseudo registers arm64/sve: Remove SMCR pseudo register from cpufeature code arm64/sve: Remove ZCR pseudo register from cpufeature code * for-next/backtrace-ipi: : Add IPI for backtraces/kgdb, use NMI arm64: smp: Don't directly call arch_smp_send_reschedule() for wakeup arm64: smp: avoid NMI IPIs with broken MediaTek FW arm64: smp: Mark IPI globals as __ro_after_init arm64: kgdb: Implement kgdb_roundup_cpus() to enable pseudo-NMI roundup arm64: smp: IPI_CPU_STOP and IPI_CPU_CRASH_STOP should try for NMI arm64: smp: Add arch support for backtrace using pseudo-NMI arm64: smp: Remove dedicated wakeup IPI arm64: idle: Tag the arm64 idle functions as __cpuidle irqchip/gic-v3: Enable support for SGIs to act as NMIs * for-next/kselftest: : Various arm64 kselftest updates kselftest/arm64: Validate SVCR in streaming SVE stress test * for-next/misc: : Miscellaneous patches arm64: Restrict CPU_BIG_ENDIAN to GNU as or LLVM IAS 15.x or newer arm64: module: Fix PLT counting when CONFIG_RANDOMIZE_BASE=n arm64, irqchip/gic-v3, ACPI: Move MADT GICC enabled check into a helper clocksource/drivers/arm_arch_timer: limit XGene-1 workaround arm64: Remove system_uses_lse_atomics() arm64: Mark the 'addr' argument to set_ptes() and __set_pte_at() as unused arm64/mm: Hoist synchronization out of set_ptes() loop arm64: swiotlb: Reduce the default size if no ZONE_DMA bouncing needed * for-next/cpufeat-display-cores: : arm64 cpufeature display enabled cores arm64: cpufeature: Change DBM to display enabled cores arm64: cpufeature: Display the set of cores with a feature
6 parents b805caf + 3912084 + ef31b8c + 11a7a42 + 146a15b + 04d402a commit 1519018

File tree

24 files changed

+280
-251
lines changed

24 files changed

+280
-251
lines changed

arch/arm64/Kconfig

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1355,6 +1355,8 @@ choice
13551355
config CPU_BIG_ENDIAN
13561356
bool "Build big-endian kernel"
13571357
depends on !LD_IS_LLD || LLD_VERSION >= 130000
1358+
# https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
1359+
depends on AS_IS_GNU || AS_VERSION >= 150000
13581360
help
13591361
Say Y if you plan on running a kernel with a big-endian userspace.
13601362

arch/arm64/include/asm/cpu.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -63,12 +63,6 @@ struct cpuinfo_arm64 {
6363
u64 reg_id_aa64smfr0;
6464

6565
struct cpuinfo_32bit aarch32;
66-
67-
/* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
68-
u64 reg_zcr;
69-
70-
/* pseudo-SMCR for recording maximum SMCR_EL1 LEN value: */
71-
u64 reg_smcr;
7266
};
7367

7468
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);

arch/arm64/include/asm/cpufeature.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <linux/bug.h>
2424
#include <linux/jump_label.h>
2525
#include <linux/kernel.h>
26+
#include <linux/cpumask.h>
2627

2728
/*
2829
* CPU feature register tracking
@@ -380,6 +381,7 @@ struct arm64_cpu_capabilities {
380381
* method is robust against being called multiple times.
381382
*/
382383
const struct arm64_cpu_capabilities *match_list;
384+
const struct cpumask *cpus;
383385
};
384386

385387
static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)

arch/arm64/include/asm/cputype.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,8 @@
8585
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
8686
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
8787

88-
#define APM_CPU_PART_POTENZA 0x000
88+
#define APM_CPU_PART_XGENE 0x000
89+
#define APM_CPU_VAR_POTENZA 0x00
8990

9091
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
9192
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2

arch/arm64/include/asm/fpsimd.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,6 @@ extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
128128
extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused);
129129
extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
130130

131-
extern u64 read_zcr_features(void);
132131
extern u64 read_smcr_features(void);
133132

134133
/*

arch/arm64/include/asm/irq.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66

77
#include <asm-generic/irq.h>
88

9+
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu);
10+
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
11+
912
struct pt_regs;
1013

1114
int set_handle_irq(void (*handle_irq)(struct pt_regs *));

arch/arm64/include/asm/lse.h

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,9 @@
1616
#include <asm/atomic_lse.h>
1717
#include <asm/cpucaps.h>
1818

19-
static __always_inline bool system_uses_lse_atomics(void)
20-
{
21-
return alternative_has_cap_likely(ARM64_HAS_LSE_ATOMICS);
22-
}
23-
2419
#define __lse_ll_sc_body(op, ...) \
2520
({ \
26-
system_uses_lse_atomics() ? \
21+
alternative_has_cap_likely(ARM64_HAS_LSE_ATOMICS) ? \
2722
__lse_##op(__VA_ARGS__) : \
2823
__ll_sc_##op(__VA_ARGS__); \
2924
})
@@ -34,8 +29,6 @@ static __always_inline bool system_uses_lse_atomics(void)
3429

3530
#else /* CONFIG_ARM64_LSE_ATOMICS */
3631

37-
static inline bool system_uses_lse_atomics(void) { return false; }
38-
3932
#define __lse_ll_sc_body(op, ...) __ll_sc_##op(__VA_ARGS__)
4033

4134
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc

arch/arm64/include/asm/mte.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ static inline bool try_page_mte_tagging(struct page *page)
9090
}
9191

9292
void mte_zero_clear_page_tags(void *addr);
93-
void mte_sync_tags(pte_t pte);
93+
void mte_sync_tags(pte_t pte, unsigned int nr_pages);
9494
void mte_copy_page_tags(void *kto, const void *kfrom);
9595
void mte_thread_init_user(void);
9696
void mte_thread_switch(struct task_struct *next);
@@ -122,7 +122,7 @@ static inline bool try_page_mte_tagging(struct page *page)
122122
static inline void mte_zero_clear_page_tags(void *addr)
123123
{
124124
}
125-
static inline void mte_sync_tags(pte_t pte)
125+
static inline void mte_sync_tags(pte_t pte, unsigned int nr_pages)
126126
{
127127
}
128128
static inline void mte_copy_page_tags(void *kto, const void *kfrom)

arch/arm64/include/asm/pgtable.h

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -325,8 +325,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
325325
__func__, pte_val(old_pte), pte_val(pte));
326326
}
327327

328-
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
329-
pte_t *ptep, pte_t pte)
328+
static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
330329
{
331330
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
332331
__sync_icache_dcache(pte);
@@ -339,24 +338,22 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
339338
*/
340339
if (system_supports_mte() && pte_access_permitted(pte, false) &&
341340
!pte_special(pte) && pte_tagged(pte))
342-
mte_sync_tags(pte);
343-
344-
__check_safe_pte_update(mm, ptep, pte);
345-
346-
set_pte(ptep, pte);
341+
mte_sync_tags(pte, nr_pages);
347342
}
348343

349-
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
350-
pte_t *ptep, pte_t pte, unsigned int nr)
344+
static inline void set_ptes(struct mm_struct *mm,
345+
unsigned long __always_unused addr,
346+
pte_t *ptep, pte_t pte, unsigned int nr)
351347
{
352348
page_table_check_ptes_set(mm, ptep, pte, nr);
349+
__sync_cache_and_tags(pte, nr);
353350

354351
for (;;) {
355-
__set_pte_at(mm, addr, ptep, pte);
352+
__check_safe_pte_update(mm, ptep, pte);
353+
set_pte(ptep, pte);
356354
if (--nr == 0)
357355
break;
358356
ptep++;
359-
addr += PAGE_SIZE;
360357
pte_val(pte) += PAGE_SIZE;
361358
}
362359
}
@@ -531,18 +528,29 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
531528
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
532529
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
533530

531+
static inline void __set_pte_at(struct mm_struct *mm,
532+
unsigned long __always_unused addr,
533+
pte_t *ptep, pte_t pte, unsigned int nr)
534+
{
535+
__sync_cache_and_tags(pte, nr);
536+
__check_safe_pte_update(mm, ptep, pte);
537+
set_pte(ptep, pte);
538+
}
539+
534540
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
535541
pmd_t *pmdp, pmd_t pmd)
536542
{
537543
page_table_check_pmd_set(mm, pmdp, pmd);
538-
return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
544+
return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
545+
PMD_SIZE >> PAGE_SHIFT);
539546
}
540547

541548
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
542549
pud_t *pudp, pud_t pud)
543550
{
544551
page_table_check_pud_set(mm, pudp, pud);
545-
return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
552+
return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
553+
PUD_SIZE >> PAGE_SHIFT);
546554
}
547555

548556
#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))

arch/arm64/include/asm/smp.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,9 @@ extern void arch_send_call_function_single_ipi(int cpu);
8989
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
9090

9191
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
92-
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
92+
extern void arch_send_wakeup_ipi(unsigned int cpu);
9393
#else
94-
static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
94+
static inline void arch_send_wakeup_ipi(unsigned int cpu)
9595
{
9696
BUILD_BUG();
9797
}

0 commit comments

Comments
 (0)