|
17 | 17 | #include <sbi/sbi_pmu.h>
|
18 | 18 | #include <sbi/sbi_scratch.h>
|
19 | 19 | #include <sbi/sbi_string.h>
|
| 20 | +#include <sbi/sbi_sse.h> |
20 | 21 |
|
21 | 22 | /** Information about hardware counters */
|
22 | 23 | struct sbi_pmu_hw_event {
|
@@ -62,6 +63,8 @@ struct sbi_pmu_hart_state {
|
62 | 63 | uint32_t active_events[SBI_PMU_HW_CTR_MAX + SBI_PMU_FW_CTR_MAX];
|
63 | 64 | /* Bitmap of firmware counters started */
|
64 | 65 | unsigned long fw_counters_started;
|
| 66 | + /* if true, SSE is enabled */ |
| 67 | + bool sse_enabled; |
65 | 68 | /*
|
66 | 69 | * Counter values for SBI firmware events and event codes
|
67 | 70 | * for platform firmware events. Both are mutually exclusive
|
@@ -300,6 +303,16 @@ int sbi_pmu_add_raw_event_counter_map(uint64_t select, uint64_t select_mask, u32
|
300 | 303 | SBI_PMU_EVENT_RAW_IDX, cmap, select, select_mask);
|
301 | 304 | }
|
302 | 305 |
|
| 306 | +void sbi_pmu_ovf_irq() |
| 307 | +{ |
| 308 | + /* |
| 309 | + * We need to disable LCOFIP before returning to S-mode or we will loop |
| 310 | + * on LCOFIP being triggered |
| 311 | + */ |
| 312 | + csr_clear(CSR_MIE, MIP_LCOFIP); |
| 313 | + sbi_sse_inject_event(SBI_SSE_EVENT_LOCAL_PMU); |
| 314 | +} |
| 315 | + |
303 | 316 | static int pmu_ctr_enable_irq_hw(int ctr_idx)
|
304 | 317 | {
|
305 | 318 | unsigned long mhpmevent_csr;
|
@@ -575,6 +588,10 @@ int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask,
|
575 | 588 | }
|
576 | 589 | }
|
577 | 590 |
|
| 591 | + /* Clear MIP_LCOFIP to avoid spurious interrupts */ |
| 592 | + if (phs->sse_enabled) |
| 593 | + csr_clear(CSR_MIP, MIP_LCOFIP); |
| 594 | + |
578 | 595 | return ret;
|
579 | 596 | }
|
580 | 597 |
|
@@ -962,6 +979,7 @@ static void pmu_reset_event_map(struct sbi_pmu_hart_state *phs)
|
962 | 979 | for (j = 0; j < SBI_PMU_FW_CTR_MAX; j++)
|
963 | 980 | phs->fw_counters_data[j] = 0;
|
964 | 981 | phs->fw_counters_started = 0;
|
| 982 | + phs->sse_enabled = 0; |
965 | 983 | }
|
966 | 984 |
|
967 | 985 | const struct sbi_pmu_device *sbi_pmu_get_device(void)
|
@@ -993,6 +1011,37 @@ void sbi_pmu_exit(struct sbi_scratch *scratch)
|
993 | 1011 | pmu_reset_event_map(phs);
|
994 | 1012 | }
|
995 | 1013 |
|
| 1014 | +static void pmu_sse_enable(uint32_t event_id) |
| 1015 | +{ |
| 1016 | + struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr(); |
| 1017 | + |
| 1018 | + phs->sse_enabled = true; |
| 1019 | + csr_clear(CSR_MIDELEG, sbi_pmu_irq_bit()); |
| 1020 | + csr_clear(CSR_MIP, MIP_LCOFIP); |
| 1021 | + csr_set(CSR_MIE, MIP_LCOFIP); |
| 1022 | +} |
| 1023 | + |
| 1024 | +static void pmu_sse_disable(uint32_t event_id) |
| 1025 | +{ |
| 1026 | + struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr(); |
| 1027 | + |
| 1028 | + csr_clear(CSR_MIE, MIP_LCOFIP); |
| 1029 | + csr_clear(CSR_MIP, MIP_LCOFIP); |
| 1030 | + csr_set(CSR_MIDELEG, sbi_pmu_irq_bit()); |
| 1031 | + phs->sse_enabled = false; |
| 1032 | +} |
| 1033 | + |
| 1034 | +static void pmu_sse_complete(uint32_t event_id) |
| 1035 | +{ |
| 1036 | + csr_set(CSR_MIE, MIP_LCOFIP); |
| 1037 | +} |
| 1038 | + |
| 1039 | +static const struct sbi_sse_cb_ops pmu_sse_cb_ops = { |
| 1040 | + .enable_cb = pmu_sse_enable, |
| 1041 | + .disable_cb = pmu_sse_disable, |
| 1042 | + .complete_cb = pmu_sse_complete, |
| 1043 | +}; |
| 1044 | + |
996 | 1045 | int sbi_pmu_init(struct sbi_scratch *scratch, bool cold_boot)
|
997 | 1046 | {
|
998 | 1047 | int hpm_count = sbi_fls(sbi_hart_mhpm_mask(scratch));
|
@@ -1032,6 +1081,8 @@ int sbi_pmu_init(struct sbi_scratch *scratch, bool cold_boot)
|
1032 | 1081 | total_ctrs = num_hw_ctrs + SBI_PMU_FW_CTR_MAX;
|
1033 | 1082 | }
|
1034 | 1083 |
|
| 1084 | + sbi_sse_set_cb_ops(SBI_SSE_EVENT_LOCAL_PMU, &pmu_sse_cb_ops); |
| 1085 | + |
1035 | 1086 | phs = pmu_get_hart_state_ptr(scratch);
|
1036 | 1087 | if (!phs) {
|
1037 | 1088 | phs = sbi_zalloc(sizeof(*phs));
|
|
0 commit comments