Skip to content

Commit 4f42d0b

Browse files
committed
Merge tag 's390-6.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Alexander Gordeev: - Select config option KASAN_VMALLOC if KASAN is enabled - Select config option VMAP_STACK unconditionally - Implement arch_atomic_inc() / arch_atomic_dec() functions which result in a single instruction if compiled for z196 or newer architectures - Make layering between atomic.h and atomic_ops.h consistent - Comment s390 preempt_count implementation - Remove pre MARCH_HAS_Z196_FEATURES preempt count implementation - GCC uses the number of lines of an inline assembly to calculate number of instructions and decide on inlining. Therefore remove superfluous new lines from a couple of inline assemblies. - Provide arch_atomic_*_and_test() implementations that allow the compiler to generate slightly better code. - Optimize __preempt_count_dec_and_test() - Remove __bootdata annotations from declarations in header files - Add missing include of <linux/smp.h> in abs_lowcore.h to provide declarations for get_cpu() and put_cpu() used in the code - Fix suboptimal kernel image base when running make kasan.config - Remove huge_pte_none() and huge_pte_none_mostly() as are identical to the generic variants - Remove unused PAGE_KERNEL_EXEC, SEGMENT_KERNEL_EXEC, and REGION3_KERNEL_EXEC defines - Simplify noexec page protection handling and change the page, segment and region3 protection definitions automatically if the instruction execution-protection facility is not available - Save one instruction and prefer EXRL instruction over EX in string, xor_*(), amode31 and other functions - Create /dev/diag misc device to fetch diagnose specific information from the kernel and provide it to userspace - Retrieve electrical power readings using DIAGNOSE 0x324 ioctl - Make ccw_device_get_ciw() consistent and use array indices instead of pointer arithmetic - s390/qdio: Move memory alloc/pointer arithmetic for slib and sl into one place - The sysfs core now allows instances of 'struct bin_attribute' to be moved into read-only memory. Make use of that in s390 code - Add missing TLB range adjustment in pud_free_tlb() - Improve topology setup by adding early polarization detection - Fix length checks in codepage_convert() function - The generic bitops implementation is nearly identical to the s390 one. Switch to the generic variant and decrease a bit the kernel image size - Provide an optimized arch_test_bit() implementation which makes use of flag output constraint. This generates slightly better code - Provide memory topology information obtanied with DIAGNOSE 0x310 using ioctl. - Various other small improvements, fixes, and cleanups Also, some changes came in through a merge of 'pci-device-recovery' branch: - Add PCI error recovery status mechanism - Simplify and document debug_next_entry() logic - Split private data allocation and freeing out of debug file open() and close() operations - Add debug_dump() function that gets a textual representation of a debug info (e.g. PCI recovery hardware error logs) - Add formatted content of pci_debug_msg_id to the PCI report * tag 's390-6.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (48 commits) s390/futex: Fix FUTEX_OP_ANDN implementation s390/diag: Add memory topology information via diag310 s390/bitops: Provide optimized arch_test_bit() s390/bitops: Switch to generic bitops s390/ebcdic: Fix length decrement in codepage_convert() s390/ebcdic: Fix length check in codepage_convert() s390/ebcdic: Use exrl instead of ex s390/amode31: Use exrl instead of ex s390/stackleak: Use exrl instead of ex in __stackleak_poison() s390/lib: Use exrl instead of ex in xor functions s390/topology: Improve topology detection s390/tlb: Add missing TLB range adjustment s390/pkey: Constify 'struct bin_attribute' s390/sclp: Constify 'struct bin_attribute' s390/pci: Constify 'struct bin_attribute' s390/ipl: Constify 'struct bin_attribute' s390/crypto/cpacf: Constify 'struct bin_attribute' s390/qdio: Move memory alloc/pointer arithmetic for slib and sl into one place s390/cio: Use array indices instead of pointer arithmetic s390/qdio: Rename feature flag aif_osa to aif_qdio ...
2 parents a312e17 + 2670157 commit 4f42d0b

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

65 files changed

+1670
-796
lines changed

arch/s390/Kconfig

Lines changed: 2 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,7 @@ config S390
233233
select HAVE_VIRT_CPU_ACCOUNTING_IDLE
234234
select IOMMU_HELPER if PCI
235235
select IOMMU_SUPPORT if PCI
236+
select KASAN_VMALLOC if KASAN
236237
select LOCK_MM_AND_FIND_VMA
237238
select MMU_GATHER_MERGE_VMAS
238239
select MMU_GATHER_NO_GATHER
@@ -255,6 +256,7 @@ config S390
255256
select USER_STACKTRACE_SUPPORT
256257
select VDSO_GETRANDOM
257258
select VIRT_CPU_ACCOUNTING
259+
select VMAP_STACK
258260
select ZONE_DMA
259261
# Note: keep the above list sorted alphabetically
260262

@@ -688,32 +690,6 @@ config MAX_PHYSMEM_BITS
688690
Increasing the number of bits also increases the kernel image size.
689691
By default 46 bits (64TB) are supported.
690692

691-
config CHECK_STACK
692-
def_bool y
693-
depends on !VMAP_STACK
694-
prompt "Detect kernel stack overflow"
695-
help
696-
This option enables the compiler option -mstack-guard and
697-
-mstack-size if they are available. If the compiler supports them
698-
it will emit additional code to each function prolog to trigger
699-
an illegal operation if the kernel stack is about to overflow.
700-
701-
Say N if you are unsure.
702-
703-
config STACK_GUARD
704-
int "Size of the guard area (128-1024)"
705-
range 128 1024
706-
depends on CHECK_STACK
707-
default "256"
708-
help
709-
This allows you to specify the size of the guard area at the lower
710-
end of the kernel stack. If the kernel stack points into the guard
711-
area on function entry an illegal operation is triggered. The size
712-
needs to be a power of 2. Please keep in mind that the size of an
713-
interrupt frame is 184 bytes for 31 bit and 328 bytes on 64 bit.
714-
The minimum size for the stack guard should be 256 for 31 bit and
715-
512 for 64 bit.
716-
717693
endmenu
718694

719695
menu "I/O subsystem"

arch/s390/Makefile

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -72,15 +72,6 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
7272
KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y)
7373
KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y)
7474

75-
ifneq ($(call cc-option,-mstack-size=8192 -mstack-guard=128),)
76-
CC_FLAGS_CHECK_STACK := -mstack-size=$(STACK_SIZE)
77-
ifeq ($(call cc-option,-mstack-size=8192),)
78-
CC_FLAGS_CHECK_STACK += -mstack-guard=$(CONFIG_STACK_GUARD)
79-
endif
80-
export CC_FLAGS_CHECK_STACK
81-
cflags-$(CONFIG_CHECK_STACK) += $(CC_FLAGS_CHECK_STACK)
82-
endif
83-
8475
ifdef CONFIG_EXPOLINE
8576
ifdef CONFIG_EXPOLINE_EXTERN
8677
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk-extern

arch/s390/boot/boot.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
struct machine_info {
1414
unsigned char has_edat1 : 1;
1515
unsigned char has_edat2 : 1;
16-
unsigned char has_nx : 1;
1716
};
1817

1918
struct vmlinux_info {

arch/s390/boot/startup.c

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,9 @@ unsigned long __bootdata_preserved(vmemmap_size);
3030
unsigned long __bootdata_preserved(MODULES_VADDR);
3131
unsigned long __bootdata_preserved(MODULES_END);
3232
unsigned long __bootdata_preserved(max_mappable);
33+
unsigned long __bootdata_preserved(page_noexec_mask);
34+
unsigned long __bootdata_preserved(segment_noexec_mask);
35+
unsigned long __bootdata_preserved(region_noexec_mask);
3336
int __bootdata_preserved(relocate_lowcore);
3437

3538
u64 __bootdata_preserved(stfle_fac_list[16]);
@@ -51,8 +54,14 @@ static void detect_facilities(void)
5154
}
5255
if (test_facility(78))
5356
machine.has_edat2 = 1;
54-
if (test_facility(130))
55-
machine.has_nx = 1;
57+
page_noexec_mask = -1UL;
58+
segment_noexec_mask = -1UL;
59+
region_noexec_mask = -1UL;
60+
if (!test_facility(130)) {
61+
page_noexec_mask &= ~_PAGE_NOEXEC;
62+
segment_noexec_mask &= ~_SEGMENT_ENTRY_NOEXEC;
63+
region_noexec_mask &= ~_REGION_ENTRY_NOEXEC;
64+
}
5665
}
5766

5867
static int cmma_test_essa(void)

arch/s390/boot/vmem.c

Lines changed: 3 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -63,13 +63,10 @@ static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kern
6363
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
6464
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
6565
unsigned long memgap_start = 0;
66-
unsigned long untracked_end;
6766
unsigned long start, end;
6867
int i;
6968

7069
pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
71-
if (!machine.has_nx)
72-
pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
7370
crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
7471
crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
7572
crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
@@ -93,15 +90,10 @@ static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kern
9390
kasan_populate(kernel_start + TEXT_OFFSET, kernel_end, POPULATE_KASAN_MAP_SHADOW);
9491
kasan_populate(0, (unsigned long)__identity_va(0), POPULATE_KASAN_ZERO_SHADOW);
9592
kasan_populate(AMODE31_START, AMODE31_END, POPULATE_KASAN_ZERO_SHADOW);
96-
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
97-
untracked_end = VMALLOC_START;
98-
/* shallowly populate kasan shadow for vmalloc and modules */
99-
kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
100-
} else {
101-
untracked_end = MODULES_VADDR;
102-
}
93+
/* shallowly populate kasan shadow for vmalloc and modules */
94+
kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
10395
/* populate kasan shadow for untracked memory */
104-
kasan_populate((unsigned long)__identity_va(ident_map_size), untracked_end,
96+
kasan_populate((unsigned long)__identity_va(ident_map_size), VMALLOC_START,
10597
POPULATE_KASAN_ZERO_SHADOW);
10698
kasan_populate(kernel_end, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
10799
}
@@ -300,8 +292,6 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
300292
continue;
301293
entry = __pte(_pa(addr, PAGE_SIZE, mode));
302294
entry = set_pte_bit(entry, PAGE_KERNEL);
303-
if (!machine.has_nx)
304-
entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
305295
set_pte(pte, entry);
306296
pages++;
307297
}
@@ -326,8 +316,6 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
326316
if (can_large_pmd(pmd, addr, next, mode)) {
327317
entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
328318
entry = set_pmd_bit(entry, SEGMENT_KERNEL);
329-
if (!machine.has_nx)
330-
entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
331319
set_pmd(pmd, entry);
332320
pages++;
333321
continue;
@@ -359,8 +347,6 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
359347
if (can_large_pud(pud, addr, next, mode)) {
360348
entry = __pud(_pa(addr, _REGION3_SIZE, mode));
361349
entry = set_pud_bit(entry, REGION3_KERNEL);
362-
if (!machine.has_nx)
363-
entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
364350
set_pud(pud, entry);
365351
pages++;
366352
continue;

arch/s390/configs/kasan.config

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Help: Enable KASan for debugging
22
CONFIG_KASAN=y
33
CONFIG_KASAN_INLINE=y
4-
CONFIG_KASAN_VMALLOC=y
4+
CONFIG_KERNEL_IMAGE_BASE=0x7FFFE0000000

arch/s390/include/asm/abs_lowcore.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
#ifndef _ASM_S390_ABS_LOWCORE_H
33
#define _ASM_S390_ABS_LOWCORE_H
44

5-
#include <asm/sections.h>
5+
#include <linux/smp.h>
66
#include <asm/lowcore.h>
77

88
#define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore))
@@ -25,7 +25,7 @@ static inline void put_abs_lowcore(struct lowcore *lc)
2525
put_cpu();
2626
}
2727

28-
extern int __bootdata_preserved(relocate_lowcore);
28+
extern int relocate_lowcore;
2929

3030
static inline int have_relocated_lowcore(void)
3131
{

arch/s390/include/asm/atomic.h

Lines changed: 64 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,13 @@
1717

1818
static __always_inline int arch_atomic_read(const atomic_t *v)
1919
{
20-
return __atomic_read(v);
20+
return __atomic_read(&v->counter);
2121
}
2222
#define arch_atomic_read arch_atomic_read
2323

2424
static __always_inline void arch_atomic_set(atomic_t *v, int i)
2525
{
26-
__atomic_set(v, i);
26+
__atomic_set(&v->counter, i);
2727
}
2828
#define arch_atomic_set arch_atomic_set
2929

@@ -45,6 +45,36 @@ static __always_inline void arch_atomic_add(int i, atomic_t *v)
4545
}
4646
#define arch_atomic_add arch_atomic_add
4747

48+
static __always_inline void arch_atomic_inc(atomic_t *v)
49+
{
50+
__atomic_add_const(1, &v->counter);
51+
}
52+
#define arch_atomic_inc arch_atomic_inc
53+
54+
static __always_inline void arch_atomic_dec(atomic_t *v)
55+
{
56+
__atomic_add_const(-1, &v->counter);
57+
}
58+
#define arch_atomic_dec arch_atomic_dec
59+
60+
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
61+
{
62+
return __atomic_add_and_test_barrier(-i, &v->counter);
63+
}
64+
#define arch_atomic_sub_and_test arch_atomic_sub_and_test
65+
66+
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
67+
{
68+
return __atomic_add_const_and_test_barrier(-1, &v->counter);
69+
}
70+
#define arch_atomic_dec_and_test arch_atomic_dec_and_test
71+
72+
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
73+
{
74+
return __atomic_add_const_and_test_barrier(1, &v->counter);
75+
}
76+
#define arch_atomic_inc_and_test arch_atomic_inc_and_test
77+
4878
#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v)
4979
#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v)
5080
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
@@ -94,13 +124,13 @@ static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
94124

95125
static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
96126
{
97-
return __atomic64_read(v);
127+
return __atomic64_read((long *)&v->counter);
98128
}
99129
#define arch_atomic64_read arch_atomic64_read
100130

101131
static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
102132
{
103-
__atomic64_set(v, i);
133+
__atomic64_set((long *)&v->counter, i);
104134
}
105135
#define arch_atomic64_set arch_atomic64_set
106136

@@ -122,6 +152,36 @@ static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
122152
}
123153
#define arch_atomic64_add arch_atomic64_add
124154

155+
static __always_inline void arch_atomic64_inc(atomic64_t *v)
156+
{
157+
__atomic64_add_const(1, (long *)&v->counter);
158+
}
159+
#define arch_atomic64_inc arch_atomic64_inc
160+
161+
static __always_inline void arch_atomic64_dec(atomic64_t *v)
162+
{
163+
__atomic64_add_const(-1, (long *)&v->counter);
164+
}
165+
#define arch_atomic64_dec arch_atomic64_dec
166+
167+
static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
168+
{
169+
return __atomic64_add_and_test_barrier(-i, (long *)&v->counter);
170+
}
171+
#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
172+
173+
static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
174+
{
175+
return __atomic64_add_const_and_test_barrier(-1, (long *)&v->counter);
176+
}
177+
#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
178+
179+
static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
180+
{
181+
return __atomic64_add_const_and_test_barrier(1, (long *)&v->counter);
182+
}
183+
#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
184+
125185
static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
126186
{
127187
return arch_xchg(&v->counter, new);

0 commit comments

Comments
 (0)