Skip to content

Commit 3a7101e

Browse files
committed
Merge tag 'powerpc-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: - Reduce alignment constraints on STRICT_KERNEL_RWX and speed-up TLB misses on 8xx and 603 - Replace kretprobe code with rethook and enable fprobe - Remove the "fast endian switch" syscall - Handle DLPAR device tree updates in kernel, allowing the deprecation of the binary /proc/powerpc/ofdt interface Thanks to Abhishek Dubey, Alex Shi, Benjamin Gray, Christophe Leroy, Gaosheng Cui, Gautam Menghani, Geert Uytterhoeven, Haren Myneni, Hari Bathini, Huang Xiaojia, Jinjie Ruan, Madhavan Srinivasan, Miguel Ojeda, Mina Almasry, Narayana Murty N, Naveen Rao, Rob Herring (Arm), Scott Cheloha, Segher Boessenkool, Stephen Rothwell, Thomas Zimmermann, Uwe Kleine-König, Vaibhav Jain, and Zhang Zekun. * tag 'powerpc-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (59 commits) powerpc/atomic: Use YZ constraints for DS-form instructions MAINTAINERS: powerpc: Add Maddy powerpc: Switch back to struct platform_driver::remove() powerpc/pseries/eeh: Fix pseries_eeh_err_inject selftests/powerpc: Allow building without static libc macintosh/via-pmu: register_pmu_pm_ops() can be __init powerpc: Stop using no_llseek powerpc/64s: Remove the "fast endian switch" syscall powerpc/mm/64s: Restrict THP to Radix or HPT w/64K pages powerpc/mm/64s: Move THP reqs into a separate symbol powerpc/64s: Make mmu_hash_ops __ro_after_init powerpc: Replace kretprobe code with rethook on powerpc powerpc: pseries: Constify struct kobj_type powerpc: powernv: Constify struct kobj_type powerpc: Constify struct kobj_type powerpc/pseries/dlpar: Add device tree nodes for DLPAR IO add powerpc/pseries/dlpar: Remove device tree node for DLPAR IO remove powerpc/pseries: Use correct data types from pseries_hp_errorlog struct powerpc/vdso: Inconditionally use CFUNC macro powerpc/32: Implement validation of emergency stack ...
2 parents 54450af + 39190ac commit 3a7101e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

75 files changed

+781
-385
lines changed

CREDITS

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -378,6 +378,9 @@ S: 1549 Hiironen Rd.
378378
S: Brimson, MN 55602
379379
S: USA
380380

381+
N: Arnd Bergmann
382+
D: Maintainer of Cell Broadband Engine Architecture
383+
381384
N: Hennus Bergman
382385
P: 1024/77D50909 76 99 FD 31 91 E1 96 1C 90 BB 22 80 62 F6 BD 63
383386
D: Author and maintainer of the QIC-02 tape driver
@@ -1869,6 +1872,9 @@ S: K osmidomkum 723
18691872
S: 160 00 Praha 6
18701873
S: Czech Republic
18711874

1875+
N: Jeremy Kerr
1876+
D: Maintainer of SPU File System
1877+
18721878
N: Michael Kerrisk
18731879
E: mtk.manpages@gmail.com
18741880
W: https://man7.org/

MAINTAINERS

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5145,10 +5145,8 @@ F: Documentation/devicetree/bindings/media/cec/cec-gpio.yaml
51455145
F: drivers/media/cec/platform/cec-gpio/
51465146

51475147
CELL BROADBAND ENGINE ARCHITECTURE
5148-
M: Arnd Bergmann <arnd@arndb.de>
51495148
L: linuxppc-dev@lists.ozlabs.org
5150-
S: Supported
5151-
W: http://www.ibm.com/developerworks/power/cell/
5149+
S: Orphan
51525150
F: arch/powerpc/include/asm/cell*.h
51535151
F: arch/powerpc/include/asm/spu*.h
51545152
F: arch/powerpc/include/uapi/asm/spu*.h
@@ -12995,6 +12993,7 @@ M: Michael Ellerman <mpe@ellerman.id.au>
1299512993
R: Nicholas Piggin <npiggin@gmail.com>
1299612994
R: Christophe Leroy <christophe.leroy@csgroup.eu>
1299712995
R: Naveen N Rao <naveen@kernel.org>
12996+
R: Madhavan Srinivasan <maddy@linux.ibm.com>
1299812997
L: linuxppc-dev@lists.ozlabs.org
1299912998
S: Supported
1300012999
W: https://github.com/linuxppc/wiki/wiki
@@ -21672,10 +21671,8 @@ F: include/linux/spmi.h
2167221671
F: include/trace/events/spmi.h
2167321672

2167421673
SPU FILE SYSTEM
21675-
M: Jeremy Kerr <jk@ozlabs.org>
2167621674
L: linuxppc-dev@lists.ozlabs.org
21677-
S: Supported
21678-
W: http://www.ibm.com/developerworks/power/cell/
21675+
S: Orphan
2167921676
F: Documentation/filesystems/spufs/spufs.rst
2168021677
F: arch/powerpc/platforms/cell/spufs/
2168121678

arch/powerpc/Kconfig

Lines changed: 26 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,7 @@ config PPC
269269
select HAVE_PERF_EVENTS_NMI if PPC64
270270
select HAVE_PERF_REGS
271271
select HAVE_PERF_USER_STACK_DUMP
272+
select HAVE_RETHOOK if KPROBES
272273
select HAVE_REGS_AND_STACK_ACCESS_API
273274
select HAVE_RELIABLE_STACKTRACE
274275
select HAVE_RSEQ
@@ -854,8 +855,8 @@ config DATA_SHIFT_BOOL
854855
bool "Set custom data alignment"
855856
depends on ADVANCED_OPTIONS
856857
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
857-
depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \
858-
PPC_85xx
858+
depends on (PPC_8xx && !PIN_TLB_DATA && (!STRICT_KERNEL_RWX || !PIN_TLB_TEXT)) || \
859+
PPC_BOOK3S_32 || PPC_85xx
859860
help
860861
This option allows you to set the kernel data alignment. When
861862
RAM is mapped by blocks, the alignment needs to fit the size and
@@ -871,9 +872,9 @@ config DATA_SHIFT
871872
range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx
872873
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
873874
default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
874-
default 23 if STRICT_KERNEL_RWX && PPC_8xx
875-
default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
876-
default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
875+
default 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && \
876+
(PIN_TLB_DATA || PIN_TLB_TEXT)
877+
default 19 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
877878
default 24 if STRICT_KERNEL_RWX && PPC_85xx
878879
default PAGE_SHIFT
879880
help
@@ -1274,8 +1275,27 @@ config TASK_SIZE_BOOL
12741275
config TASK_SIZE
12751276
hex "Size of user task space" if TASK_SIZE_BOOL
12761277
default "0x80000000" if PPC_8xx
1277-
default "0xb0000000" if PPC_BOOK3S_32
1278+
default "0xb0000000" if PPC_BOOK3S_32 && EXECMEM
12781279
default "0xc0000000"
1280+
1281+
config MODULES_SIZE_BOOL
1282+
bool "Set custom size for modules/execmem area"
1283+
depends on EXECMEM && ADVANCED_OPTIONS
1284+
help
1285+
This option allows you to set the size of kernel virtual address
1286+
space dedicated for modules/execmem.
1287+
For the time being it is only for 8xx and book3s/32. Other
1288+
platform share it with vmalloc space.
1289+
1290+
Say N here unless you know what you are doing.
1291+
1292+
config MODULES_SIZE
1293+
int "Size of modules/execmem area (In Mbytes)" if MODULES_SIZE_BOOL
1294+
range 1 256 if EXECMEM
1295+
default 64 if EXECMEM && PPC_BOOK3S_32
1296+
default 32 if EXECMEM && PPC_8xx
1297+
default 0
1298+
12791299
endmenu
12801300

12811301
if PPC64

arch/powerpc/Kconfig.debug

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -379,12 +379,6 @@ config FAIL_IOMMU
379379

380380
If you are unsure, say N.
381381

382-
config PPC_FAST_ENDIAN_SWITCH
383-
bool "Deprecated fast endian-switch syscall"
384-
depends on DEBUG_KERNEL && PPC_BOOK3S_64
385-
help
386-
If you're unsure what this is, say N.
387-
388382
config KASAN_SHADOW_OFFSET
389383
hex
390384
depends on KASAN

arch/powerpc/configs/ppc64_defconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ CONFIG_MEMORY_HOTREMOVE=y
9393
CONFIG_KSM=y
9494
CONFIG_TRANSPARENT_HUGEPAGE=y
9595
CONFIG_MEM_SOFT_DIRTY=y
96+
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
9697
CONFIG_ZONE_DEVICE=y
9798
CONFIG_NET=y
9899
CONFIG_PACKET=y

arch/powerpc/include/asm/asm-compat.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,19 +26,23 @@
2626
#define PPC_MIN_STKFRM 112
2727

2828
#ifdef __BIG_ENDIAN__
29-
#define LHZX_BE stringify_in_c(lhzx)
3029
#define LWZX_BE stringify_in_c(lwzx)
3130
#define LDX_BE stringify_in_c(ldx)
3231
#define STWX_BE stringify_in_c(stwx)
3332
#define STDX_BE stringify_in_c(stdx)
3433
#else
35-
#define LHZX_BE stringify_in_c(lhbrx)
3634
#define LWZX_BE stringify_in_c(lwbrx)
3735
#define LDX_BE stringify_in_c(ldbrx)
3836
#define STWX_BE stringify_in_c(stwbrx)
3937
#define STDX_BE stringify_in_c(stdbrx)
4038
#endif
4139

40+
#ifdef CONFIG_CC_IS_CLANG
41+
#define DS_FORM_CONSTRAINT "Z<>"
42+
#else
43+
#define DS_FORM_CONSTRAINT "YZ<>"
44+
#endif
45+
4246
#else /* 32-bit */
4347

4448
/* operations for longs and pointers */

arch/powerpc/include/asm/atomic.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <asm/cmpxchg.h>
1212
#include <asm/barrier.h>
1313
#include <asm/asm-const.h>
14+
#include <asm/asm-compat.h>
1415

1516
/*
1617
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
@@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
197198
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
198199
__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
199200
else
200-
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
201+
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
201202

202203
return t;
203204
}
@@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
208209
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
209210
__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
210211
else
211-
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
212+
__asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
212213
}
213214

214215
#define ATOMIC64_OP(op, asm_op) \

arch/powerpc/include/asm/book3s/32/pgtable.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,8 @@ void unmap_kernel_page(unsigned long va);
196196
#endif
197197

198198
#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
199-
#define MODULES_VADDR (MODULES_END - SZ_256M)
199+
#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
200+
#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
200201

201202
#ifndef __ASSEMBLY__
202203
#include <linux/sched.h>

arch/powerpc/include/asm/book3s/64/hash-4k.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,26 @@
7474
#define remap_4k_pfn(vma, addr, pfn, prot) \
7575
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
7676

77+
/*
78+
* With 4K page size the real_pte machinery is all nops.
79+
*/
80+
#define __real_pte(e, p, o) ((real_pte_t){(e)})
81+
#define __rpte_to_pte(r) ((r).pte)
82+
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
83+
84+
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
85+
do { \
86+
index = 0; \
87+
shift = mmu_psize_defs[psize].shift; \
88+
89+
#define pte_iterate_hashed_end() } while(0)
90+
91+
/*
92+
* We expect this to be called only for user addresses or kernel virtual
93+
* addresses other than the linear mapping.
94+
*/
95+
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
96+
7797
/*
7898
* 4K PTE format is different from 64K PTE format. Saving the hash_slot is just
7999
* a matter of returning the PTE bits that need to be modified. On 64K PTE,

arch/powerpc/include/asm/book3s/64/pgtable.h

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -330,32 +330,6 @@ static inline unsigned long pud_leaf_size(pud_t pud)
330330

331331
#ifndef __ASSEMBLY__
332332

333-
/*
334-
* This is the default implementation of various PTE accessors, it's
335-
* used in all cases except Book3S with 64K pages where we have a
336-
* concept of sub-pages
337-
*/
338-
#ifndef __real_pte
339-
340-
#define __real_pte(e, p, o) ((real_pte_t){(e)})
341-
#define __rpte_to_pte(r) ((r).pte)
342-
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
343-
344-
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
345-
do { \
346-
index = 0; \
347-
shift = mmu_psize_defs[psize].shift; \
348-
349-
#define pte_iterate_hashed_end() } while(0)
350-
351-
/*
352-
* We expect this to be called only for user addresses or kernel virtual
353-
* addresses other than the linear mapping.
354-
*/
355-
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
356-
357-
#endif /* __real_pte */
358-
359333
static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
360334
pte_t *ptep, unsigned long clr,
361335
unsigned long set, int huge)

0 commit comments

Comments
 (0)