Skip to content

Commit ed35954

Browse files
authored
Merge pull request torvalds#652 from ojeda/ppc-apply-fix
ppc: apply fix for hard lockup
2 parents 3fe71f8 + 8e5bf28 commit ed35954

File tree

4 files changed

+70
-29
lines changed

4 files changed

+70
-29
lines changed

arch/powerpc/include/asm/hw_irq.h

Lines changed: 47 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -345,17 +345,54 @@ static inline bool lazy_irq_pending_nocheck(void)
345345
bool power_pmu_wants_prompt_pmi(void);
346346

347347
/*
348-
* This is called by asynchronous interrupts to conditionally
349-
* re-enable hard interrupts after having cleared the source
350-
* of the interrupt. They are kept disabled if there is a different
351-
* soft-masked interrupt pending that requires hard masking.
348+
* This is called by asynchronous interrupts to check whether to
349+
* conditionally re-enable hard interrupts after having cleared
350+
* the source of the interrupt. They are kept disabled if there
351+
* is a different soft-masked interrupt pending that requires hard
352+
* masking.
352353
*/
353-
static inline void may_hard_irq_enable(void)
354+
static inline bool should_hard_irq_enable(void)
354355
{
355-
if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
356-
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
357-
__hard_irq_enable();
358-
}
356+
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
357+
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
358+
WARN_ON(mfmsr() & MSR_EE);
359+
#endif
360+
#ifdef CONFIG_PERF_EVENTS
361+
/*
362+
* If the PMU is not running, there is not much reason to enable
363+
* MSR[EE] in irq handlers because any interrupts would just be
364+
* soft-masked.
365+
*
366+
* TODO: Add test for 64e
367+
*/
368+
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
369+
return false;
370+
371+
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
372+
return false;
373+
374+
return true;
375+
#else
376+
return false;
377+
#endif
378+
}
379+
380+
/*
381+
* Do the hard enabling, only call this if should_hard_irq_enable is true.
382+
*/
383+
static inline void do_hard_irq_enable(void)
384+
{
385+
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
386+
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
387+
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
388+
WARN_ON(mfmsr() & MSR_EE);
389+
#endif
390+
/*
391+
* This allows PMI interrupts (and watchdog soft-NMIs) through.
392+
* There is no other reason to enable this way.
393+
*/
394+
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
395+
__hard_irq_enable();
359396
}
360397

361398
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
@@ -436,7 +473,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
436473
return !(regs->msr & MSR_EE);
437474
}
438475

439-
static inline bool may_hard_irq_enable(void)
476+
static inline bool should_hard_irq_enable(void)
440477
{
441478
return false;
442479
}

arch/powerpc/kernel/dbell.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,8 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
2727

2828
ppc_msgsync();
2929

30-
may_hard_irq_enable();
30+
if (should_hard_irq_enable())
31+
do_hard_irq_enable();
3132

3233
kvmppc_clear_host_ipi(smp_processor_id());
3334
__this_cpu_inc(irq_stat.doorbell_irqs);

arch/powerpc/kernel/irq.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -745,7 +745,8 @@ void __do_irq(struct pt_regs *regs)
745745
irq = ppc_md.get_irq();
746746

747747
/* We can hard enable interrupts now to allow perf interrupts */
748-
may_hard_irq_enable();
748+
if (should_hard_irq_enable())
749+
do_hard_irq_enable();
749750

750751
/* And finally process it */
751752
if (unlikely(!irq))

arch/powerpc/kernel/time.c

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -609,22 +609,23 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
609609
return;
610610
}
611611

612-
/* Ensure a positive value is written to the decrementer, or else
613-
* some CPUs will continue to take decrementer exceptions. When the
614-
* PPC_WATCHDOG (decrementer based) is configured, keep this at most
615-
* 31 bits, which is about 4 seconds on most systems, which gives
616-
* the watchdog a chance of catching timer interrupt hard lockups.
617-
*/
618-
if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
619-
set_dec(0x7fffffff);
620-
else
621-
set_dec(decrementer_max);
622-
623-
/* Conditionally hard-enable interrupts now that the DEC has been
624-
* bumped to its maximum value
625-
*/
626-
may_hard_irq_enable();
612+
/* Conditionally hard-enable interrupts. */
613+
if (should_hard_irq_enable()) {
614+
/*
615+
* Ensure a positive value is written to the decrementer, or
616+
* else some CPUs will continue to take decrementer exceptions.
617+
* When the PPC_WATCHDOG (decrementer based) is configured,
618+
* keep this at most 31 bits, which is about 4 seconds on most
619+
* systems, which gives the watchdog a chance of catching timer
620+
* interrupt hard lockups.
621+
*/
622+
if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
623+
set_dec(0x7fffffff);
624+
else
625+
set_dec(decrementer_max);
627626

627+
do_hard_irq_enable();
628+
}
628629

629630
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
630631
if (atomic_read(&ppc_n_lost_interrupts) != 0)
@@ -648,8 +649,9 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
648649
__this_cpu_inc(irq_stat.timer_irqs_event);
649650
} else {
650651
now = *next_tb - now;
651-
if (now <= decrementer_max)
652-
set_dec_or_work(now);
652+
if (now > decrementer_max)
653+
now = decrementer_max;
654+
set_dec_or_work(now);
653655
__this_cpu_inc(irq_stat.timer_irqs_others);
654656
}
655657

0 commit comments

Comments
 (0)