Skip to content
This repository was archived by the owner on Oct 31, 2024. It is now read-only.

Commit cb00323

Browse files
committed
Merge tag 'v6.11.5' into 6.11
This is the 6.11.5 stable release
2 parents 304e2da + 05b1367 commit cb00323

File tree

121 files changed

+858
-451
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

121 files changed

+858
-451
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# SPDX-License-Identifier: GPL-2.0
22
VERSION = 6
33
PATCHLEVEL = 11
4-
SUBLEVEL = 4
4+
SUBLEVEL = 5
55
EXTRAVERSION =
66
NAME = Baby Opossum Posse
77

arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@
136136
};
137137

138138
cp0_mdio_pins: cp0-mdio-pins {
139-
marvell,pins = "mpp40", "mpp41";
139+
marvell,pins = "mpp0", "mpp1";
140140
marvell,function = "ge";
141141
};
142142

arch/arm64/include/asm/uprobes.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,9 @@
1010
#include <asm/insn.h>
1111
#include <asm/probes.h>
1212

13-
#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
14-
1513
#define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES)
1614
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
17-
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
15+
#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE
1816

1917
typedef __le32 uprobe_opcode_t;
2018

@@ -23,8 +21,8 @@ struct arch_uprobe_task {
2321

2422
struct arch_uprobe {
2523
union {
26-
u8 insn[MAX_UINSN_BYTES];
27-
u8 ixol[MAX_UINSN_BYTES];
24+
__le32 insn;
25+
__le32 ixol;
2826
};
2927
struct arch_probe_insn api;
3028
bool simulate;

arch/arm64/kernel/probes/decode-insn.c

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -99,10 +99,6 @@ arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
9999
aarch64_insn_is_blr(insn) ||
100100
aarch64_insn_is_ret(insn)) {
101101
api->handler = simulate_br_blr_ret;
102-
} else if (aarch64_insn_is_ldr_lit(insn)) {
103-
api->handler = simulate_ldr_literal;
104-
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
105-
api->handler = simulate_ldrsw_literal;
106102
} else {
107103
/*
108104
* Instruction cannot be stepped out-of-line and we don't
@@ -140,6 +136,17 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
140136
probe_opcode_t insn = le32_to_cpu(*addr);
141137
probe_opcode_t *scan_end = NULL;
142138
unsigned long size = 0, offset = 0;
139+
struct arch_probe_insn *api = &asi->api;
140+
141+
if (aarch64_insn_is_ldr_lit(insn)) {
142+
api->handler = simulate_ldr_literal;
143+
decoded = INSN_GOOD_NO_SLOT;
144+
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
145+
api->handler = simulate_ldrsw_literal;
146+
decoded = INSN_GOOD_NO_SLOT;
147+
} else {
148+
decoded = arm_probe_decode_insn(insn, &asi->api);
149+
}
143150

144151
/*
145152
* If there's a symbol defined in front of and near enough to
@@ -157,7 +164,6 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
157164
else
158165
scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
159166
}
160-
decoded = arm_probe_decode_insn(insn, &asi->api);
161167

162168
if (decoded != INSN_REJECTED && scan_end)
163169
if (is_probed_address_atomic(addr - 1, scan_end))

arch/arm64/kernel/probes/simulate-insn.c

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -171,32 +171,28 @@ simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
171171
void __kprobes
172172
simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
173173
{
174-
u64 *load_addr;
174+
unsigned long load_addr;
175175
int xn = opcode & 0x1f;
176-
int disp;
177176

178-
disp = ldr_displacement(opcode);
179-
load_addr = (u64 *) (addr + disp);
177+
load_addr = addr + ldr_displacement(opcode);
180178

181179
if (opcode & (1 << 30)) /* x0-x30 */
182-
set_x_reg(regs, xn, *load_addr);
180+
set_x_reg(regs, xn, READ_ONCE(*(u64 *)load_addr));
183181
else /* w0-w30 */
184-
set_w_reg(regs, xn, *load_addr);
182+
set_w_reg(regs, xn, READ_ONCE(*(u32 *)load_addr));
185183

186184
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
187185
}
188186

189187
void __kprobes
190188
simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
191189
{
192-
s32 *load_addr;
190+
unsigned long load_addr;
193191
int xn = opcode & 0x1f;
194-
int disp;
195192

196-
disp = ldr_displacement(opcode);
197-
load_addr = (s32 *) (addr + disp);
193+
load_addr = addr + ldr_displacement(opcode);
198194

199-
set_x_reg(regs, xn, *load_addr);
195+
set_x_reg(regs, xn, READ_ONCE(*(s32 *)load_addr));
200196

201197
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
202198
}

arch/arm64/kernel/probes/uprobes.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
4242
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
4343
return -EINVAL;
4444

45-
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
45+
insn = le32_to_cpu(auprobe->insn);
4646

4747
switch (arm_probe_decode_insn(insn, &auprobe->api)) {
4848
case INSN_REJECTED:
@@ -108,7 +108,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
108108
if (!auprobe->simulate)
109109
return false;
110110

111-
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
111+
insn = le32_to_cpu(auprobe->insn);
112112
addr = instruction_pointer(regs);
113113

114114
if (auprobe->api.handler)

arch/s390/kvm/diag.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
7777
vcpu->stat.instruction_diagnose_258++;
7878
if (vcpu->run->s.regs.gprs[rx] & 7)
7979
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
80-
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
80+
rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
8181
if (rc)
8282
return kvm_s390_inject_prog_cond(vcpu, rc);
8383
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)

arch/s390/kvm/gaccess.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -828,6 +828,8 @@ static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
828828
const gfn_t gfn = gpa_to_gfn(gpa);
829829
int rc;
830830

831+
if (!gfn_to_memslot(kvm, gfn))
832+
return PGM_ADDRESSING;
831833
if (mode == GACC_STORE)
832834
rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
833835
else
@@ -985,6 +987,8 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
985987
gra += fragment_len;
986988
data += fragment_len;
987989
}
990+
if (rc > 0)
991+
vcpu->arch.pgm.code = rc;
988992
return rc;
989993
}
990994

arch/s390/kvm/gaccess.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -405,11 +405,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
405405
* @len: number of bytes to copy
406406
*
407407
* Copy @len bytes from @data (kernel space) to @gra (guest real address).
408-
* It is up to the caller to ensure that the entire guest memory range is
409-
* valid memory before calling this function.
410408
* Guest low address and key protection are not checked.
411409
*
412-
* Returns zero on success or -EFAULT on error.
410+
* Returns zero on success, -EFAULT when copying from @data failed, or
411+
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
412+
* is also stored to allow injecting into the guest (if applicable) using
413+
* kvm_s390_inject_prog_cond().
413414
*
414415
* If an error occurs data may have been copied partially to guest memory.
415416
*/
@@ -428,11 +429,12 @@ int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
428429
* @len: number of bytes to copy
429430
*
430431
* Copy @len bytes from @gra (guest real address) to @data (kernel space).
431-
* It is up to the caller to ensure that the entire guest memory range is
432-
* valid memory before calling this function.
433432
* Guest key protection is not checked.
434433
*
435-
* Returns zero on success or -EFAULT on error.
434+
* Returns zero on success, -EFAULT when copying to @data failed, or
435+
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
436+
* is also stored to allow injecting into the guest (if applicable) using
437+
* kvm_s390_inject_prog_cond().
436438
*
437439
* If an error occurs data may have been copied partially to kernel space.
438440
*/

arch/x86/entry/entry.S

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
#include <asm/unwind_hints.h>
1010
#include <asm/segment.h>
1111
#include <asm/cache.h>
12+
#include <asm/cpufeatures.h>
13+
#include <asm/nospec-branch.h>
1214

1315
#include "calling.h"
1416

@@ -19,6 +21,9 @@ SYM_FUNC_START(entry_ibpb)
1921
movl $PRED_CMD_IBPB, %eax
2022
xorl %edx, %edx
2123
wrmsr
24+
25+
/* Make sure IBPB clears return stack preductions too. */
26+
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
2227
RET
2328
SYM_FUNC_END(entry_ibpb)
2429
/* For KVM */

arch/x86/entry/entry_32.S

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -871,6 +871,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
871871

872872
/* Now ready to switch the cr3 */
873873
SWITCH_TO_USER_CR3 scratch_reg=%eax
874+
/* Clobbers ZF */
875+
CLEAR_CPU_BUFFERS
874876

875877
/*
876878
* Restore all flags except IF. (We restore IF separately because
@@ -881,7 +883,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
881883
BUG_IF_WRONG_CR3 no_user_check=1
882884
popfl
883885
popl %eax
884-
CLEAR_CPU_BUFFERS
885886

886887
/*
887888
* Return back to the vDSO, which will pop ecx and edx.
@@ -1144,7 +1145,6 @@ SYM_CODE_START(asm_exc_nmi)
11441145

11451146
/* Not on SYSENTER stack. */
11461147
call exc_nmi
1147-
CLEAR_CPU_BUFFERS
11481148
jmp .Lnmi_return
11491149

11501150
.Lnmi_from_sysenter_stack:
@@ -1165,6 +1165,7 @@ SYM_CODE_START(asm_exc_nmi)
11651165

11661166
CHECK_AND_APPLY_ESPFIX
11671167
RESTORE_ALL_NMI cr3_reg=%edi pop=4
1168+
CLEAR_CPU_BUFFERS
11681169
jmp .Lirq_return
11691170

11701171
#ifdef CONFIG_X86_ESPFIX32
@@ -1206,6 +1207,7 @@ SYM_CODE_START(asm_exc_nmi)
12061207
* 1 - orig_ax
12071208
*/
12081209
lss (1+5+6)*4(%esp), %esp # back to espfix stack
1210+
CLEAR_CPU_BUFFERS
12091211
jmp .Lirq_return
12101212
#endif
12111213
SYM_CODE_END(asm_exc_nmi)

arch/x86/include/asm/cpufeatures.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@
215215
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */
216216
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */
217217
#define X86_FEATURE_IBRS ( 7*32+25) /* "ibrs" Indirect Branch Restricted Speculation */
218-
#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier */
218+
#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */
219219
#define X86_FEATURE_STIBP ( 7*32+27) /* "stibp" Single Thread Indirect Branch Predictors */
220220
#define X86_FEATURE_ZEN ( 7*32+28) /* Generic flag for all Zen and newer */
221221
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* L1TF workaround PTE inversion */
@@ -348,6 +348,7 @@
348348
#define X86_FEATURE_CPPC (13*32+27) /* "cppc" Collaborative Processor Performance Control */
349349
#define X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */
350350
#define X86_FEATURE_BTC_NO (13*32+29) /* Not vulnerable to Branch Type Confusion */
351+
#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* IBPB clears return address predictor */
351352
#define X86_FEATURE_BRS (13*32+31) /* "brs" Branch Sampling available */
352353

353354
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
@@ -523,4 +524,5 @@
523524
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */
524525
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
525526
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
527+
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
526528
#endif /* _ASM_X86_CPUFEATURES_H */

arch/x86/include/asm/nospec-branch.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,16 @@
323323
* Note: Only the memory operand variant of VERW clears the CPU buffers.
324324
*/
325325
.macro CLEAR_CPU_BUFFERS
326-
ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
326+
#ifdef CONFIG_X86_64
327+
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
328+
#else
329+
/*
330+
* In 32bit mode, the memory operand must be a %cs reference. The data
331+
* segments may not be usable (vm86 mode), and the stack segment may not
332+
* be flat (ESPFIX32).
333+
*/
334+
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
335+
#endif
327336
.endm
328337

329338
#ifdef CONFIG_X86_64

arch/x86/kernel/apic/apic.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -440,7 +440,19 @@ static int lapic_timer_shutdown(struct clock_event_device *evt)
440440
v = apic_read(APIC_LVTT);
441441
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
442442
apic_write(APIC_LVTT, v);
443-
apic_write(APIC_TMICT, 0);
443+
444+
/*
445+
* Setting APIC_LVT_MASKED (above) should be enough to tell
446+
* the hardware that this timer will never fire. But AMD
447+
* erratum 411 and some Intel CPU behavior circa 2024 say
448+
* otherwise. Time for belt and suspenders programming: mask
449+
* the timer _and_ zero the counter registers:
450+
*/
451+
if (v & APIC_LVT_TIMER_TSCDEADLINE)
452+
wrmsrl(MSR_IA32_TSC_DEADLINE, 0);
453+
else
454+
apic_write(APIC_TMICT, 0);
455+
444456
return 0;
445457
}
446458

arch/x86/kernel/cpu/amd.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1218,5 +1218,6 @@ void amd_check_microcode(void)
12181218
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
12191219
return;
12201220

1221-
on_each_cpu(zenbleed_check_cpu, NULL, 1);
1221+
if (cpu_feature_enabled(X86_FEATURE_ZEN2))
1222+
on_each_cpu(zenbleed_check_cpu, NULL, 1);
12221223
}

arch/x86/kernel/cpu/bugs.c

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1113,8 +1113,25 @@ static void __init retbleed_select_mitigation(void)
11131113

11141114
case RETBLEED_MITIGATION_IBPB:
11151115
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1116+
1117+
/*
1118+
* IBPB on entry already obviates the need for
1119+
* software-based untraining so clear those in case some
1120+
* other mitigation like SRSO has selected them.
1121+
*/
1122+
setup_clear_cpu_cap(X86_FEATURE_UNRET);
1123+
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1124+
11161125
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
11171126
mitigate_smt = true;
1127+
1128+
/*
1129+
* There is no need for RSB filling: entry_ibpb() ensures
1130+
* all predictions, including the RSB, are invalidated,
1131+
* regardless of IBPB implementation.
1132+
*/
1133+
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1134+
11181135
break;
11191136

11201137
case RETBLEED_MITIGATION_STUFF:
@@ -2621,6 +2638,14 @@ static void __init srso_select_mitigation(void)
26212638
if (has_microcode) {
26222639
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
26232640
srso_mitigation = SRSO_MITIGATION_IBPB;
2641+
2642+
/*
2643+
* IBPB on entry already obviates the need for
2644+
* software-based untraining so clear those in case some
2645+
* other mitigation like Retbleed has selected them.
2646+
*/
2647+
setup_clear_cpu_cap(X86_FEATURE_UNRET);
2648+
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
26242649
}
26252650
} else {
26262651
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
@@ -2632,6 +2657,13 @@ static void __init srso_select_mitigation(void)
26322657
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
26332658
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
26342659
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2660+
2661+
/*
2662+
* There is no need for RSB filling: entry_ibpb() ensures
2663+
* all predictions, including the RSB, are invalidated,
2664+
* regardless of IBPB implementation.
2665+
*/
2666+
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
26352667
}
26362668
} else {
26372669
pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");

arch/x86/kernel/cpu/common.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1443,6 +1443,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
14431443
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
14441444
setup_force_cpu_bug(X86_BUG_BHI);
14451445

1446+
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
1447+
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
1448+
14461449
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
14471450
return;
14481451

0 commit comments

Comments
 (0)