Skip to content

Commit 6c26fbe

Browse files
committed
Merge tag 'perf-core-2025-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull performance events updates from Ingo Molnar: "Callchain support: - Add support for deferred user-space stack unwinding for perf, enabled on x86. (Peter Zijlstra, Steven Rostedt) - unwind_user/x86: Enable frame pointer unwinding on x86 (Josh Poimboeuf) x86 PMU support and infrastructure: - x86/insn: Simplify for_each_insn_prefix() (Peter Zijlstra) - x86/insn,uprobes,alternative: Unify insn_is_nop() (Peter Zijlstra) Intel PMU driver: - Large series to prepare for and implement architectural PEBS support for Intel platforms such as Clearwater Forest (CWF) and Panther Lake (PTL). (Dapeng Mi, Kan Liang) - Check dynamic constraints (Kan Liang) - Optimize PEBS extended config (Peter Zijlstra) - cstates: - Remove PC3 support from LunarLake (Zhang Rui) - Add Pantherlake support (Zhang Rui) - Clearwater Forest support (Zide Chen) AMD PMU driver: - x86/amd: Check event before enable to avoid GPF (George Kennedy) Fixes and cleanups: - task_work: Fix NMI race condition (Peter Zijlstra) - perf/x86: Fix NULL event access and potential PEBS record loss (Dapeng Mi) - Misc other fixes and cleanups (Dapeng Mi, Ingo Molnar, Peter Zijlstra)" * tag 'perf-core-2025-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (38 commits) perf/x86/intel: Fix and clean up intel_pmu_drain_arch_pebs() type use perf/x86/intel: Optimize PEBS extended config perf/x86/intel: Check PEBS dyn_constraints perf/x86/intel: Add a check for dynamic constraints perf/x86/intel: Add counter group support for arch-PEBS perf/x86/intel: Setup PEBS data configuration and enable legacy groups perf/x86/intel: Update dyn_constraint base on PEBS event precise level perf/x86/intel: Allocate arch-PEBS buffer and initialize PEBS_BASE MSR perf/x86/intel: Process arch-PEBS records or record fragments perf/x86/intel/ds: Factor out PEBS group processing code to functions perf/x86/intel/ds: Factor out PEBS record processing code to functions perf/x86/intel: Initialize architectural PEBS perf/x86/intel: Correct large PEBS flag check perf/x86/intel: Replace x86_pmu.drain_pebs calling with static call perf/x86: Fix NULL event access and potential PEBS record loss perf/x86: Remove redundant is_x86_event() prototype entry,unwind/deferred: Fix unwind_reset_info() placement unwind_user/x86: Fix arch=um build perf: Support deferred user unwind unwind_user/x86: Teach FP unwind about start of function ...
2 parents 63e6995 + 9929dff commit 6c26fbe

34 files changed

Lines changed: 1627 additions & 338 deletions

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -298,6 +298,7 @@ config X86
298298
select HAVE_SYSCALL_TRACEPOINTS
299299
select HAVE_UACCESS_VALIDATION if HAVE_OBJTOOL
300300
select HAVE_UNSTABLE_SCHED_CLOCK
301+
select HAVE_UNWIND_USER_FP if X86_64
301302
select HAVE_USER_RETURN_NOTIFIER
302303
select HAVE_GENERIC_VDSO
303304
select VDSO_GETRANDOM if X86_64

arch/x86/boot/compressed/sev-handle-vc.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,10 @@
2929
bool insn_has_rep_prefix(struct insn *insn)
3030
{
3131
insn_byte_t p;
32-
int i;
3332

3433
insn_get_prefixes(insn);
3534

36-
for_each_insn_prefix(insn, i, p) {
35+
for_each_insn_prefix(insn, p) {
3736
if (p == 0xf2 || p == 0xf3)
3837
return true;
3938
}

arch/x86/events/amd/core.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -763,7 +763,12 @@ static void amd_pmu_enable_all(int added)
763763
if (!test_bit(idx, cpuc->active_mask))
764764
continue;
765765

766-
amd_pmu_enable_event(cpuc->events[idx]);
766+
/*
767+
* FIXME: cpuc->events[idx] can become NULL in a subtle race
768+
* condition with NMI->throttle->x86_pmu_stop().
769+
*/
770+
if (cpuc->events[idx])
771+
amd_pmu_enable_event(cpuc->events[idx]);
767772
}
768773
}
769774

arch/x86/events/core.c

Lines changed: 19 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -554,28 +554,37 @@ static inline int precise_br_compat(struct perf_event *event)
554554
return m == b;
555555
}
556556

557-
int x86_pmu_max_precise(void)
557+
int x86_pmu_max_precise(struct pmu *pmu)
558558
{
559559
int precise = 0;
560560

561-
/* Support for constant skid */
562561
if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
563-
precise++;
562+
/* arch PEBS */
563+
if (x86_pmu.arch_pebs) {
564+
precise = 2;
565+
if (hybrid(pmu, arch_pebs_cap).pdists)
566+
precise++;
567+
568+
return precise;
569+
}
564570

571+
/* legacy PEBS - support for constant skid */
572+
precise++;
565573
/* Support for IP fixup */
566574
if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
567575
precise++;
568576

569577
if (x86_pmu.pebs_prec_dist)
570578
precise++;
571579
}
580+
572581
return precise;
573582
}
574583

575584
int x86_pmu_hw_config(struct perf_event *event)
576585
{
577586
if (event->attr.precise_ip) {
578-
int precise = x86_pmu_max_precise();
587+
int precise = x86_pmu_max_precise(event->pmu);
579588

580589
if (event->attr.precise_ip > precise)
581590
return -EOPNOTSUPP;
@@ -1344,6 +1353,7 @@ static void x86_pmu_enable(struct pmu *pmu)
13441353
hwc->state |= PERF_HES_ARCH;
13451354

13461355
x86_pmu_stop(event, PERF_EF_UPDATE);
1356+
cpuc->events[hwc->idx] = NULL;
13471357
}
13481358

13491359
/*
@@ -1365,6 +1375,7 @@ static void x86_pmu_enable(struct pmu *pmu)
13651375
* if cpuc->enabled = 0, then no wrmsr as
13661376
* per x86_pmu_enable_event()
13671377
*/
1378+
cpuc->events[hwc->idx] = event;
13681379
x86_pmu_start(event, PERF_EF_RELOAD);
13691380
}
13701381
cpuc->n_added = 0;
@@ -1531,7 +1542,6 @@ static void x86_pmu_start(struct perf_event *event, int flags)
15311542

15321543
event->hw.state = 0;
15331544

1534-
cpuc->events[idx] = event;
15351545
__set_bit(idx, cpuc->active_mask);
15361546
static_call(x86_pmu_enable)(event);
15371547
perf_event_update_userpage(event);
@@ -1610,7 +1620,6 @@ void x86_pmu_stop(struct perf_event *event, int flags)
16101620
if (test_bit(hwc->idx, cpuc->active_mask)) {
16111621
static_call(x86_pmu_disable)(event);
16121622
__clear_bit(hwc->idx, cpuc->active_mask);
1613-
cpuc->events[hwc->idx] = NULL;
16141623
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
16151624
hwc->state |= PERF_HES_STOPPED;
16161625
}
@@ -1648,6 +1657,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
16481657
* Not a TXN, therefore cleanup properly.
16491658
*/
16501659
x86_pmu_stop(event, PERF_EF_UPDATE);
1660+
cpuc->events[event->hw.idx] = NULL;
16511661

16521662
for (i = 0; i < cpuc->n_events; i++) {
16531663
if (event == cpuc->event_list[i])
@@ -2629,7 +2639,9 @@ static ssize_t max_precise_show(struct device *cdev,
26292639
struct device_attribute *attr,
26302640
char *buf)
26312641
{
2632-
return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise());
2642+
struct pmu *pmu = dev_get_drvdata(cdev);
2643+
2644+
return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise(pmu));
26332645
}
26342646

26352647
static DEVICE_ATTR_RO(max_precise);
@@ -2845,46 +2857,6 @@ static unsigned long get_segment_base(unsigned int segment)
28452857
return get_desc_base(desc);
28462858
}
28472859

2848-
#ifdef CONFIG_UPROBES
2849-
/*
2850-
* Heuristic-based check if uprobe is installed at the function entry.
2851-
*
2852-
* Under assumption of user code being compiled with frame pointers,
2853-
* `push %rbp/%ebp` is a good indicator that we indeed are.
2854-
*
2855-
* Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern.
2856-
* If we get this wrong, captured stack trace might have one extra bogus
2857-
* entry, but the rest of stack trace will still be meaningful.
2858-
*/
2859-
static bool is_uprobe_at_func_entry(struct pt_regs *regs)
2860-
{
2861-
struct arch_uprobe *auprobe;
2862-
2863-
if (!current->utask)
2864-
return false;
2865-
2866-
auprobe = current->utask->auprobe;
2867-
if (!auprobe)
2868-
return false;
2869-
2870-
/* push %rbp/%ebp */
2871-
if (auprobe->insn[0] == 0x55)
2872-
return true;
2873-
2874-
/* endbr64 (64-bit only) */
2875-
if (user_64bit_mode(regs) && is_endbr((u32 *)auprobe->insn))
2876-
return true;
2877-
2878-
return false;
2879-
}
2880-
2881-
#else
2882-
static bool is_uprobe_at_func_entry(struct pt_regs *regs)
2883-
{
2884-
return false;
2885-
}
2886-
#endif /* CONFIG_UPROBES */
2887-
28882860
#ifdef CONFIG_IA32_EMULATION
28892861

28902862
#include <linux/compat.h>

0 commit comments

Comments
 (0)