Skip to content

Commit c47d122

Browse files
committed
Merge tag 'perf-tools-fixes-for-v6.4-1-2023-05-20' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux
Pull perf tools fixes from Arnaldo Carvalho de Melo: - Fail graciously if BUILD_BPF_SKEL=1 is specified and clang isn't available - Add empty 'struct rq' to 'perf lock contention' to satisfy libbpf 'runqueue' type verification. This feature is built only with BUILD_BPF_SKEL=1 - Make vmlinux.h use bpf.h and perf_event.h in source directory, not system ones that may be old and not have things like 'union perf_sample_weight' - Add system include paths to BPF builds to pick things missing in the headers included by clang -target bpf - Update various header copies with the kernel sources - Change divide by zero and not supported events behavior to show 'nan'/'not counted' in 'perf stat' output. This happens when using things like 'perf stat -M TopdownL2 true', involving JSON metrics - Update no event/metric expectations affected by using JSON metrics in 'perf stat -ddd' perf test - Avoid segv with 'perf stat --topdown' for metrics without a group - Do not assume which events may have a PMU name, allowing the logic to keep an AUX event group together. Makes this usecase work again: $ perf record --no-bpf-event -c 10 -e '{intel_pt//,tlb_flush.stlb_any/aux-sample-size=8192/pp}:u' -- sleep 0.1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.078 MB perf.data ] $ perf script -F-dso,+addr | grep -C5 tlb_flush.stlb_any | head -11 sleep 20444 [003] 7939.510243: 1 branches:uH: 7f5350cc82a2 dl_main+0x9a2 => 7f5350cb38f0 _dl_add_to_namespace_list+0x0 sleep 20444 [003] 7939.510243: 1 branches:uH: 7f5350cb3908 _dl_add_to_namespace_list+0x18 => 7f5350cbb080 rtld_mutex_dummy+0x0 sleep 20444 [003] 7939.510243: 1 branches:uH: 7f5350cc8350 dl_main+0xa50 => 0 [unknown] sleep 20444 [003] 7939.510244: 1 branches:uH: 7f5350cc83ca dl_main+0xaca => 7f5350caeb60 _dl_process_pt_gnu_property+0x0 sleep 20444 [003] 7939.510245: 1 branches:uH: 7f5350caeb60 _dl_process_pt_gnu_property+0x0 => 0 [unknown] sleep 20444 7939.510245: 10 tlb_flush.stlb_any/aux-sample-size=8192/pp: 0 7f5350caeb60 _dl_process_pt_gnu_property+0x0 sleep 20444 [003] 7939.510254: 1 branches:uH: 7f5350cc87fe dl_main+0xefe => 7f5350ccd240 strcmp+0x0 sleep 20444 [003] 7939.510254: 1 branches:uH: 7f5350cc8862 dl_main+0xf62 => 0 [unknown] - Add a check for the above use case in 'perf test test_intel_pt' - Fix build with refcount checking on arm64, it was still accessing fields that need to be wrapped so that the refcounted struct gets checked - Fix contextid validation in ARM's CS-ETM, so that older kernels without that field can still be supported - Skip unsupported aggregation for stat events found in perf.data files in 'perf script' - Add stat test for record and script to check the previous problem - Remove needless debuginfod queries from 'perf test java symbol', this was just making the test take a long time to complete - Address python SafeConfigParser() deprecation warning in 'perf test attr' - Fix __NR_execve undeclared on i386 'perf bench syscall' build error * tag 'perf-tools-fixes-for-v6.4-1-2023-05-20' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: (33 commits) perf bench syscall: Fix __NR_execve undeclared build error perf test attr: Fix python SafeConfigParser() deprecation warning perf test attr: Update no event/metric expectations tools headers disabled-features: Sync with the kernel sources tools headers UAPI: Sync arch prctl headers with the kernel sources tools headers: Update the copy of x86's mem{cpy,set}_64.S used in 'perf bench' tools headers x86 cpufeatures: Sync with the kernel sources tools headers UAPI: Sync s390 syscall table file that wires up the memfd_secret syscall tools headers UAPI: Sync linux/prctl.h with the kernel sources perf metrics: Avoid segv with --topdown for metrics without a group perf lock contention: Add empty 'struct rq' to satisfy libbpf 'runqueue' type verification perf cs-etm: Fix contextid validation perf arm64: Fix build with refcount checking perf test: Add stat test for record and script perf script: Skip aggregation for stat events perf build: Add system include paths to BPF builds perf bpf skels: Make vmlinux.h use bpf.h and perf_event.h in source directory perf parse-events: Do not break up AUX event group perf test test_intel_pt.sh: Test sample mode with event with PMU name perf evsel: Modify group pmu name for software events ...
2 parents 4927cb9 + 4e111f0 commit c47d122

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+865
-322
lines changed

tools/arch/arm64/include/uapi/asm/kvm.h

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,15 @@ struct kvm_arm_copy_mte_tags {
198198
__u64 reserved[2];
199199
};
200200

201+
/*
202+
* Counter/Timer offset structure. Describe the virtual/physical offset.
203+
* To be used with KVM_ARM_SET_COUNTER_OFFSET.
204+
*/
205+
struct kvm_arm_counter_offset {
206+
__u64 counter_offset;
207+
__u64 reserved;
208+
};
209+
201210
#define KVM_ARM_TAGS_TO_GUEST 0
202211
#define KVM_ARM_TAGS_FROM_GUEST 1
203212

@@ -372,6 +381,10 @@ enum {
372381
#endif
373382
};
374383

384+
/* Device Control API on vm fd */
385+
#define KVM_ARM_VM_SMCCC_CTRL 0
386+
#define KVM_ARM_VM_SMCCC_FILTER 0
387+
375388
/* Device Control API: ARM VGIC */
376389
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
377390
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
@@ -411,6 +424,8 @@ enum {
411424
#define KVM_ARM_VCPU_TIMER_CTRL 1
412425
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
413426
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
427+
#define KVM_ARM_VCPU_TIMER_IRQ_HVTIMER 2
428+
#define KVM_ARM_VCPU_TIMER_IRQ_HPTIMER 3
414429
#define KVM_ARM_VCPU_PVTIME_CTRL 2
415430
#define KVM_ARM_VCPU_PVTIME_IPA 0
416431

@@ -469,6 +484,27 @@ enum {
469484
/* run->fail_entry.hardware_entry_failure_reason codes. */
470485
#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0)
471486

487+
enum kvm_smccc_filter_action {
488+
KVM_SMCCC_FILTER_HANDLE = 0,
489+
KVM_SMCCC_FILTER_DENY,
490+
KVM_SMCCC_FILTER_FWD_TO_USER,
491+
492+
#ifdef __KERNEL__
493+
NR_SMCCC_FILTER_ACTIONS
494+
#endif
495+
};
496+
497+
struct kvm_smccc_filter {
498+
__u32 base;
499+
__u32 nr_functions;
500+
__u8 action;
501+
__u8 pad[15];
502+
};
503+
504+
/* arm64-specific KVM_EXIT_HYPERCALL flags */
505+
#define KVM_HYPERCALL_EXIT_SMC (1U << 0)
506+
#define KVM_HYPERCALL_EXIT_16BIT (1U << 1)
507+
472508
#endif
473509

474510
#endif /* __ARM_KVM_H__ */

tools/arch/x86/include/asm/cpufeatures.h

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@
9797
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
9898
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
9999
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
100-
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
100+
/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */
101101
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
102102
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
103103
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
@@ -226,10 +226,9 @@
226226

227227
/* Virtualization flags: Linux defined, word 8 */
228228
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
229-
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
230-
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
231-
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
232-
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
229+
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* Intel FlexPriority */
230+
#define X86_FEATURE_EPT ( 8*32+ 2) /* Intel Extended Page Table */
231+
#define X86_FEATURE_VPID ( 8*32+ 3) /* Intel Virtual Processor ID */
233232

234233
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
235234
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
@@ -307,14 +306,21 @@
307306
#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* "" SGX EDECCSSA user leaf function */
308307
#define X86_FEATURE_CALL_DEPTH (11*32+19) /* "" Call depth tracking for RSB stuffing */
309308
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
309+
#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
310+
#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
310311

311312
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
312313
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
313314
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
314315
#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */
316+
#define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* "" Intel Architectural PerfMon Extension */
317+
#define X86_FEATURE_FZRM (12*32+10) /* "" Fast zero-length REP MOVSB */
318+
#define X86_FEATURE_FSRS (12*32+11) /* "" Fast short REP STOSB */
319+
#define X86_FEATURE_FSRC (12*32+12) /* "" Fast short REP {CMPSB,SCASB} */
315320
#define X86_FEATURE_LKGS (12*32+18) /* "" Load "kernel" (userspace) GS */
316321
#define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */
317322
#define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */
323+
#define X86_FEATURE_LAM (12*32+26) /* Linear Address Masking */
318324

319325
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
320326
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
@@ -331,6 +337,7 @@
331337
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
332338
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
333339
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
340+
#define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */
334341
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
335342
#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
336343

@@ -363,6 +370,7 @@
363370
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
364371
#define X86_FEATURE_X2AVIC (15*32+18) /* Virtual x2apic */
365372
#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* Virtual SPEC_CTRL */
373+
#define X86_FEATURE_VNMI (15*32+25) /* Virtual NMI */
366374
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */
367375

368376
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
@@ -427,6 +435,13 @@
427435
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
428436
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
429437

438+
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
439+
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
440+
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
441+
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
442+
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
443+
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
444+
430445
/*
431446
* BUG word(s)
432447
*/
@@ -467,5 +482,6 @@
467482
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
468483
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
469484
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
485+
#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
470486

471487
#endif /* _ASM_X86_CPUFEATURES_H */

tools/arch/x86/include/asm/disabled-features.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,12 @@
7575
# define DISABLE_CALL_DEPTH_TRACKING (1 << (X86_FEATURE_CALL_DEPTH & 31))
7676
#endif
7777

78+
#ifdef CONFIG_ADDRESS_MASKING
79+
# define DISABLE_LAM 0
80+
#else
81+
# define DISABLE_LAM (1 << (X86_FEATURE_LAM & 31))
82+
#endif
83+
7884
#ifdef CONFIG_INTEL_IOMMU_SVM
7985
# define DISABLE_ENQCMD 0
8086
#else
@@ -115,7 +121,7 @@
115121
#define DISABLED_MASK10 0
116122
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
117123
DISABLE_CALL_DEPTH_TRACKING)
118-
#define DISABLED_MASK12 0
124+
#define DISABLED_MASK12 (DISABLE_LAM)
119125
#define DISABLED_MASK13 0
120126
#define DISABLED_MASK14 0
121127
#define DISABLED_MASK15 0

tools/arch/x86/include/asm/msr-index.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,8 @@
206206

207207
/* Abbreviated from Intel SDM name IA32_INTEGRITY_CAPABILITIES */
208208
#define MSR_INTEGRITY_CAPS 0x000002d9
209+
#define MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT 2
210+
#define MSR_INTEGRITY_CAPS_ARRAY_BIST BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT)
209211
#define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4
210212
#define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT)
211213

tools/arch/x86/include/uapi/asm/kvm.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -559,4 +559,7 @@ struct kvm_pmu_event_filter {
559559
#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
560560
#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
561561

562+
/* x86-specific KVM_EXIT_HYPERCALL flags. */
563+
#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0)
564+
562565
#endif /* _ASM_X86_KVM_H */

tools/arch/x86/include/uapi/asm/prctl.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,16 @@
1616
#define ARCH_GET_XCOMP_GUEST_PERM 0x1024
1717
#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
1818

19+
#define ARCH_XCOMP_TILECFG 17
20+
#define ARCH_XCOMP_TILEDATA 18
21+
1922
#define ARCH_MAP_VDSO_X32 0x2001
2023
#define ARCH_MAP_VDSO_32 0x2002
2124
#define ARCH_MAP_VDSO_64 0x2003
2225

26+
#define ARCH_GET_UNTAG_MASK 0x4001
27+
#define ARCH_ENABLE_TAGGED_ADDR 0x4002
28+
#define ARCH_GET_MAX_TAG_BITS 0x4003
29+
#define ARCH_FORCE_TAGGED_SVA 0x4004
30+
2331
#endif /* _ASM_X86_PRCTL_H */

tools/arch/x86/include/uapi/asm/unistd_32.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,9 @@
22
#ifndef __NR_fork
33
#define __NR_fork 2
44
#endif
5+
#ifndef __NR_execve
6+
#define __NR_execve 11
7+
#endif
58
#ifndef __NR_getppid
69
#define __NR_getppid 64
710
#endif

tools/arch/x86/lib/memcpy_64.S

Lines changed: 10 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,6 @@
99

1010
.section .noinstr.text, "ax"
1111

12-
/*
13-
* We build a jump to memcpy_orig by default which gets NOPped out on
14-
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
15-
* have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
16-
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
17-
*/
18-
1912
/*
2013
* memcpy - Copy a memory block.
2114
*
@@ -26,17 +19,21 @@
2619
*
2720
* Output:
2821
* rax original destination
22+
*
23+
* The FSRM alternative should be done inline (avoiding the call and
24+
* the disgusting return handling), but that would require some help
25+
* from the compiler for better calling conventions.
26+
*
27+
* The 'rep movsb' itself is small enough to replace the call, but the
28+
* two register moves blow up the code. And one of them is "needed"
29+
* only for the return value that is the same as the source input,
30+
* which the compiler could/should do much better anyway.
2931
*/
3032
SYM_TYPED_FUNC_START(__memcpy)
31-
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
32-
"jmp memcpy_erms", X86_FEATURE_ERMS
33+
ALTERNATIVE "jmp memcpy_orig", "", X86_FEATURE_FSRM
3334

3435
movq %rdi, %rax
3536
movq %rdx, %rcx
36-
shrq $3, %rcx
37-
andl $7, %edx
38-
rep movsq
39-
movl %edx, %ecx
4037
rep movsb
4138
RET
4239
SYM_FUNC_END(__memcpy)
@@ -45,17 +42,6 @@ EXPORT_SYMBOL(__memcpy)
4542
SYM_FUNC_ALIAS(memcpy, __memcpy)
4643
EXPORT_SYMBOL(memcpy)
4744

48-
/*
49-
* memcpy_erms() - enhanced fast string memcpy. This is faster and
50-
* simpler than memcpy. Use memcpy_erms when possible.
51-
*/
52-
SYM_FUNC_START_LOCAL(memcpy_erms)
53-
movq %rdi, %rax
54-
movq %rdx, %rcx
55-
rep movsb
56-
RET
57-
SYM_FUNC_END(memcpy_erms)
58-
5945
SYM_FUNC_START_LOCAL(memcpy_orig)
6046
movq %rdi, %rax
6147

tools/arch/x86/lib/memset_64.S

Lines changed: 11 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -18,27 +18,22 @@
1818
* rdx count (bytes)
1919
*
2020
* rax original destination
21+
*
22+
* The FSRS alternative should be done inline (avoiding the call and
23+
* the disgusting return handling), but that would require some help
24+
* from the compiler for better calling conventions.
25+
*
26+
* The 'rep stosb' itself is small enough to replace the call, but all
27+
* the register moves blow up the code. And two of them are "needed"
28+
* only for the return value that is the same as the source input,
29+
* which the compiler could/should do much better anyway.
2130
*/
2231
SYM_FUNC_START(__memset)
23-
/*
24-
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
25-
* to use it when possible. If not available, use fast string instructions.
26-
*
27-
* Otherwise, use original memset function.
28-
*/
29-
ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
30-
"jmp memset_erms", X86_FEATURE_ERMS
32+
ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS
3133

3234
movq %rdi,%r9
35+
movb %sil,%al
3336
movq %rdx,%rcx
34-
andl $7,%edx
35-
shrq $3,%rcx
36-
/* expand byte value */
37-
movzbl %sil,%esi
38-
movabs $0x0101010101010101,%rax
39-
imulq %rsi,%rax
40-
rep stosq
41-
movl %edx,%ecx
4237
rep stosb
4338
movq %r9,%rax
4439
RET
@@ -48,26 +43,6 @@ EXPORT_SYMBOL(__memset)
4843
SYM_FUNC_ALIAS(memset, __memset)
4944
EXPORT_SYMBOL(memset)
5045

51-
/*
52-
* ISO C memset - set a memory block to a byte value. This function uses
53-
* enhanced rep stosb to override the fast string function.
54-
* The code is simpler and shorter than the fast string function as well.
55-
*
56-
* rdi destination
57-
* rsi value (char)
58-
* rdx count (bytes)
59-
*
60-
* rax original destination
61-
*/
62-
SYM_FUNC_START_LOCAL(memset_erms)
63-
movq %rdi,%r9
64-
movb %sil,%al
65-
movq %rdx,%rcx
66-
rep stosb
67-
movq %r9,%rax
68-
RET
69-
SYM_FUNC_END(memset_erms)
70-
7146
SYM_FUNC_START_LOCAL(memset_orig)
7247
movq %rdi,%r10
7348

tools/include/asm/alternative.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
66

7-
#define altinstruction_entry #
8-
#define ALTERNATIVE_2 #
7+
#define ALTERNATIVE #
98

109
#endif

0 commit comments

Comments
 (0)