x86/mce: Prevent duplicate error records

A legitimate use case of the MCA infrastructure is to have the firmware
log all uncorrectable errors and also, have the OS see all correctable
errors.

The uncorrectable, UCNA errors are usually configured to be reported
through an SMI. CMCI, which is the correctable error reporting
interrupt, uses SMI too and having both enabled, leads to unnecessary
overhead.

So what ends up happening is, people disable CMCI in the wild and leave
on only the UCNA SMI.

When CMCI is disabled, the MCA infrastructure resorts to polling the MCA
banks. If a MCA MSR is shared between the logical threads, one error
ends up getting logged multiple times as the polling runs on every
logical thread.

Therefore, introduce locking on the Intel side of the polling routine to
prevent such duplicate error records from appearing.

Based on a patch by Aristeu Rozanski <aris@ruivo.org>.

Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Tested-by: Tony Luck <tony.luck@intel.com>
Acked-by: Aristeu Rozanski <aris@ruivo.org>
Link: https://lore.kernel.org/r/20230515143225.GC4090740@cathedrallabs.org
This commit is contained in:
Borislav Petkov (AMD) 2023-07-19 14:19:50 +02:00
parent fdf0eaf114
commit c3629dd7e6
3 changed files with 27 additions and 2 deletions

View File

@ -1608,6 +1608,13 @@ static void __start_timer(struct timer_list *t, unsigned long interval)
local_irq_restore(flags);
}
static void mc_poll_banks_default(void)
{
machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
}
void (*mc_poll_banks)(void) = mc_poll_banks_default;
static void mce_timer_fn(struct timer_list *t)
{
struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
@ -1618,7 +1625,7 @@ static void mce_timer_fn(struct timer_list *t)
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
mc_poll_banks();
if (mce_intel_cmci_poll()) {
iv = mce_adjust_timer(iv);

View File

@ -56,6 +56,13 @@ static DEFINE_PER_CPU(int, cmci_backoff_cnt);
*/
static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
/*
* On systems that do support CMCI but it's disabled, polling for MCEs can
* cause the same event to be reported multiple times because IA32_MCi_STATUS
* is shared by the same package.
*/
static DEFINE_SPINLOCK(cmci_poll_lock);
#define CMCI_THRESHOLD 1
#define CMCI_POLL_INTERVAL (30 * HZ)
#define CMCI_STORM_INTERVAL (HZ)
@ -426,12 +433,22 @@ void cmci_disable_bank(int bank)
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
/* Bank polling function when CMCI is disabled. */
static void cmci_mc_poll_banks(void)
{
spin_lock(&cmci_poll_lock);
machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
spin_unlock(&cmci_poll_lock);
}
void intel_init_cmci(void)
{
int banks;
if (!cmci_supported(&banks))
if (!cmci_supported(&banks)) {
mc_poll_banks = cmci_mc_poll_banks;
return;
}
mce_threshold_vector = intel_threshold_interrupt;
cmci_discover(banks);

View File

@ -274,4 +274,5 @@ static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
return 0;
}
extern void (*mc_poll_banks)(void);
#endif /* __X86_MCE_INTERNAL_H__ */