Skip to content
This repository was archived by the owner on Oct 31, 2024. It is now read-only.

Commit a7ef8d7

Browse files
committed
Merge tag 'v6.6.58' into 6.6
This is the 6.6.58 stable release
2 parents aa9f55c + 18916a6 commit a7ef8d7

File tree

118 files changed

+1122
-570
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

118 files changed

+1122
-570
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# SPDX-License-Identifier: GPL-2.0
22
VERSION = 6
33
PATCHLEVEL = 6
4-
SUBLEVEL = 57
4+
SUBLEVEL = 58
55
EXTRAVERSION =
66
NAME = Pinguïn Aangedreven
77

arch/arm64/include/asm/uprobes.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,9 @@
1010
#include <asm/insn.h>
1111
#include <asm/probes.h>
1212

13-
#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
14-
1513
#define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES)
1614
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
17-
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
15+
#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE
1816

1917
typedef __le32 uprobe_opcode_t;
2018

@@ -23,8 +21,8 @@ struct arch_uprobe_task {
2321

2422
struct arch_uprobe {
2523
union {
26-
u8 insn[MAX_UINSN_BYTES];
27-
u8 ixol[MAX_UINSN_BYTES];
24+
__le32 insn;
25+
__le32 ixol;
2826
};
2927
struct arch_probe_insn api;
3028
bool simulate;

arch/arm64/kernel/probes/decode-insn.c

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -99,10 +99,6 @@ arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
9999
aarch64_insn_is_blr(insn) ||
100100
aarch64_insn_is_ret(insn)) {
101101
api->handler = simulate_br_blr_ret;
102-
} else if (aarch64_insn_is_ldr_lit(insn)) {
103-
api->handler = simulate_ldr_literal;
104-
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
105-
api->handler = simulate_ldrsw_literal;
106102
} else {
107103
/*
108104
* Instruction cannot be stepped out-of-line and we don't
@@ -140,6 +136,17 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
140136
probe_opcode_t insn = le32_to_cpu(*addr);
141137
probe_opcode_t *scan_end = NULL;
142138
unsigned long size = 0, offset = 0;
139+
struct arch_probe_insn *api = &asi->api;
140+
141+
if (aarch64_insn_is_ldr_lit(insn)) {
142+
api->handler = simulate_ldr_literal;
143+
decoded = INSN_GOOD_NO_SLOT;
144+
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
145+
api->handler = simulate_ldrsw_literal;
146+
decoded = INSN_GOOD_NO_SLOT;
147+
} else {
148+
decoded = arm_probe_decode_insn(insn, &asi->api);
149+
}
143150

144151
/*
145152
* If there's a symbol defined in front of and near enough to
@@ -157,7 +164,6 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
157164
else
158165
scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
159166
}
160-
decoded = arm_probe_decode_insn(insn, &asi->api);
161167

162168
if (decoded != INSN_REJECTED && scan_end)
163169
if (is_probed_address_atomic(addr - 1, scan_end))

arch/arm64/kernel/probes/simulate-insn.c

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -171,32 +171,28 @@ simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
171171
void __kprobes
172172
simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
173173
{
174-
u64 *load_addr;
174+
unsigned long load_addr;
175175
int xn = opcode & 0x1f;
176-
int disp;
177176

178-
disp = ldr_displacement(opcode);
179-
load_addr = (u64 *) (addr + disp);
177+
load_addr = addr + ldr_displacement(opcode);
180178

181179
if (opcode & (1 << 30)) /* x0-x30 */
182-
set_x_reg(regs, xn, *load_addr);
180+
set_x_reg(regs, xn, READ_ONCE(*(u64 *)load_addr));
183181
else /* w0-w30 */
184-
set_w_reg(regs, xn, *load_addr);
182+
set_w_reg(regs, xn, READ_ONCE(*(u32 *)load_addr));
185183

186184
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
187185
}
188186

189187
void __kprobes
190188
simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
191189
{
192-
s32 *load_addr;
190+
unsigned long load_addr;
193191
int xn = opcode & 0x1f;
194-
int disp;
195192

196-
disp = ldr_displacement(opcode);
197-
load_addr = (s32 *) (addr + disp);
193+
load_addr = addr + ldr_displacement(opcode);
198194

199-
set_x_reg(regs, xn, *load_addr);
195+
set_x_reg(regs, xn, READ_ONCE(*(s32 *)load_addr));
200196

201197
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
202198
}

arch/arm64/kernel/probes/uprobes.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
4242
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
4343
return -EINVAL;
4444

45-
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
45+
insn = le32_to_cpu(auprobe->insn);
4646

4747
switch (arm_probe_decode_insn(insn, &auprobe->api)) {
4848
case INSN_REJECTED:
@@ -108,7 +108,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
108108
if (!auprobe->simulate)
109109
return false;
110110

111-
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
111+
insn = le32_to_cpu(auprobe->insn);
112112
addr = instruction_pointer(regs);
113113

114114
if (auprobe->api.handler)

arch/s390/kvm/diag.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
7777
vcpu->stat.instruction_diagnose_258++;
7878
if (vcpu->run->s.regs.gprs[rx] & 7)
7979
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
80-
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
80+
rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
8181
if (rc)
8282
return kvm_s390_inject_prog_cond(vcpu, rc);
8383
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)

arch/s390/kvm/gaccess.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1001,6 +1001,8 @@ static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
10011001
const gfn_t gfn = gpa_to_gfn(gpa);
10021002
int rc;
10031003

1004+
if (!gfn_to_memslot(kvm, gfn))
1005+
return PGM_ADDRESSING;
10041006
if (mode == GACC_STORE)
10051007
rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
10061008
else
@@ -1158,6 +1160,8 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
11581160
gra += fragment_len;
11591161
data += fragment_len;
11601162
}
1163+
if (rc > 0)
1164+
vcpu->arch.pgm.code = rc;
11611165
return rc;
11621166
}
11631167

arch/s390/kvm/gaccess.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -405,11 +405,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
405405
* @len: number of bytes to copy
406406
*
407407
* Copy @len bytes from @data (kernel space) to @gra (guest real address).
408-
* It is up to the caller to ensure that the entire guest memory range is
409-
* valid memory before calling this function.
410408
* Guest low address and key protection are not checked.
411409
*
412-
* Returns zero on success or -EFAULT on error.
410+
* Returns zero on success, -EFAULT when copying from @data failed, or
411+
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
412+
* is also stored to allow injecting into the guest (if applicable) using
413+
* kvm_s390_inject_prog_cond().
413414
*
414415
* If an error occurs data may have been copied partially to guest memory.
415416
*/
@@ -428,11 +429,12 @@ int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
428429
* @len: number of bytes to copy
429430
*
430431
* Copy @len bytes from @gra (guest real address) to @data (kernel space).
431-
* It is up to the caller to ensure that the entire guest memory range is
432-
* valid memory before calling this function.
433432
* Guest key protection is not checked.
434433
*
435-
* Returns zero on success or -EFAULT on error.
434+
* Returns zero on success, -EFAULT when copying to @data failed, or
435+
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
436+
* is also stored to allow injecting into the guest (if applicable) using
437+
* kvm_s390_inject_prog_cond().
436438
*
437439
* If an error occurs data may have been copied partially to kernel space.
438440
*/

arch/x86/entry/entry.S

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
#include <asm/unwind_hints.h>
1010
#include <asm/segment.h>
1111
#include <asm/cache.h>
12+
#include <asm/cpufeatures.h>
13+
#include <asm/nospec-branch.h>
1214

1315
.pushsection .noinstr.text, "ax"
1416

@@ -17,6 +19,9 @@ SYM_FUNC_START(entry_ibpb)
1719
movl $PRED_CMD_IBPB, %eax
1820
xorl %edx, %edx
1921
wrmsr
22+
23+
/* Make sure IBPB clears return stack preductions too. */
24+
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
2025
RET
2126
SYM_FUNC_END(entry_ibpb)
2227
/* For KVM */

arch/x86/entry/entry_32.S

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -875,6 +875,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
875875

876876
/* Now ready to switch the cr3 */
877877
SWITCH_TO_USER_CR3 scratch_reg=%eax
878+
/* Clobbers ZF */
879+
CLEAR_CPU_BUFFERS
878880

879881
/*
880882
* Restore all flags except IF. (We restore IF separately because
@@ -885,7 +887,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
885887
BUG_IF_WRONG_CR3 no_user_check=1
886888
popfl
887889
popl %eax
888-
CLEAR_CPU_BUFFERS
889890

890891
/*
891892
* Return back to the vDSO, which will pop ecx and edx.
@@ -1148,7 +1149,6 @@ SYM_CODE_START(asm_exc_nmi)
11481149

11491150
/* Not on SYSENTER stack. */
11501151
call exc_nmi
1151-
CLEAR_CPU_BUFFERS
11521152
jmp .Lnmi_return
11531153

11541154
.Lnmi_from_sysenter_stack:
@@ -1169,6 +1169,7 @@ SYM_CODE_START(asm_exc_nmi)
11691169

11701170
CHECK_AND_APPLY_ESPFIX
11711171
RESTORE_ALL_NMI cr3_reg=%edi pop=4
1172+
CLEAR_CPU_BUFFERS
11721173
jmp .Lirq_return
11731174

11741175
#ifdef CONFIG_X86_ESPFIX32
@@ -1210,6 +1211,7 @@ SYM_CODE_START(asm_exc_nmi)
12101211
* 1 - orig_ax
12111212
*/
12121213
lss (1+5+6)*4(%esp), %esp # back to espfix stack
1214+
CLEAR_CPU_BUFFERS
12131215
jmp .Lirq_return
12141216
#endif
12151217
SYM_CODE_END(asm_exc_nmi)

arch/x86/include/asm/cpufeatures.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@
216216
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
217217
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
218218
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
219-
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
219+
#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */
220220
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
221221
#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
222222
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
@@ -347,6 +347,7 @@
347347
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
348348
#define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */
349349
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
350+
#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* "" IBPB clears return address predictor */
350351
#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
351352

352353
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
@@ -516,4 +517,5 @@
516517
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
517518
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
518519
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
520+
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
519521
#endif /* _ASM_X86_CPUFEATURES_H */

arch/x86/include/asm/nospec-branch.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -332,7 +332,16 @@
332332
* Note: Only the memory operand variant of VERW clears the CPU buffers.
333333
*/
334334
.macro CLEAR_CPU_BUFFERS
335-
ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
335+
#ifdef CONFIG_X86_64
336+
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
337+
#else
338+
/*
339+
* In 32bit mode, the memory operand must be a %cs reference. The data
340+
* segments may not be usable (vm86 mode), and the stack segment may not
341+
* be flat (ESPFIX32).
342+
*/
343+
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
344+
#endif
336345
.endm
337346

338347
#ifdef CONFIG_X86_64

arch/x86/kernel/apic/apic.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -473,7 +473,19 @@ static int lapic_timer_shutdown(struct clock_event_device *evt)
473473
v = apic_read(APIC_LVTT);
474474
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
475475
apic_write(APIC_LVTT, v);
476-
apic_write(APIC_TMICT, 0);
476+
477+
/*
478+
* Setting APIC_LVT_MASKED (above) should be enough to tell
479+
* the hardware that this timer will never fire. But AMD
480+
* erratum 411 and some Intel CPU behavior circa 2024 say
481+
* otherwise. Time for belt and suspenders programming: mask
482+
* the timer _and_ zero the counter registers:
483+
*/
484+
if (v & APIC_LVT_TIMER_TSCDEADLINE)
485+
wrmsrl(MSR_IA32_TSC_DEADLINE, 0);
486+
else
487+
apic_write(APIC_TMICT, 0);
488+
477489
return 0;
478490
}
479491

arch/x86/kernel/cpu/amd.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1374,7 +1374,8 @@ void amd_check_microcode(void)
13741374
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
13751375
return;
13761376

1377-
on_each_cpu(zenbleed_check_cpu, NULL, 1);
1377+
if (cpu_feature_enabled(X86_FEATURE_ZEN2))
1378+
on_each_cpu(zenbleed_check_cpu, NULL, 1);
13781379
}
13791380

13801381
/*

arch/x86/kernel/cpu/bugs.c

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1113,8 +1113,25 @@ static void __init retbleed_select_mitigation(void)
11131113

11141114
case RETBLEED_MITIGATION_IBPB:
11151115
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1116+
1117+
/*
1118+
* IBPB on entry already obviates the need for
1119+
* software-based untraining so clear those in case some
1120+
* other mitigation like SRSO has selected them.
1121+
*/
1122+
setup_clear_cpu_cap(X86_FEATURE_UNRET);
1123+
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1124+
11161125
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
11171126
mitigate_smt = true;
1127+
1128+
/*
1129+
* There is no need for RSB filling: entry_ibpb() ensures
1130+
* all predictions, including the RSB, are invalidated,
1131+
* regardless of IBPB implementation.
1132+
*/
1133+
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1134+
11181135
break;
11191136

11201137
case RETBLEED_MITIGATION_STUFF:
@@ -2610,6 +2627,14 @@ static void __init srso_select_mitigation(void)
26102627
if (has_microcode) {
26112628
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
26122629
srso_mitigation = SRSO_MITIGATION_IBPB;
2630+
2631+
/*
2632+
* IBPB on entry already obviates the need for
2633+
* software-based untraining so clear those in case some
2634+
* other mitigation like Retbleed has selected them.
2635+
*/
2636+
setup_clear_cpu_cap(X86_FEATURE_UNRET);
2637+
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
26132638
}
26142639
} else {
26152640
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
@@ -2622,6 +2647,13 @@ static void __init srso_select_mitigation(void)
26222647
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
26232648
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
26242649
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2650+
2651+
/*
2652+
* There is no need for RSB filling: entry_ibpb() ensures
2653+
* all predictions, including the RSB, are invalidated,
2654+
* regardless of IBPB implementation.
2655+
*/
2656+
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
26252657
}
26262658
} else {
26272659
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");

arch/x86/kernel/cpu/common.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1483,6 +1483,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
14831483
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
14841484
setup_force_cpu_bug(X86_BUG_BHI);
14851485

1486+
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
1487+
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
1488+
14861489
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
14871490
return;
14881491

0 commit comments

Comments
 (0)