mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2025-11-15 22:43:55 +00:00
Gleixner:
- Restructure the code needed for it and add a temporary initrd mapping
on 32-bit so that the loader can access the microcode blobs. This in
itself is a preparation for the next major improvement:
- Do not load microcode on 32-bit before paging has been enabled.
Handling this has caused an endless stream of headaches, issues, ugly
code and unnecessary hacks in the past. And there really wasn't any
sensible reason to do that in the first place. So switch the 32-bit
loading to happen after paging has been enabled and turn the loader
code "real purrty" again
- Drop mixed microcode steppings loading on Intel - there, a single patch
loaded on the whole system is sufficient
- Rework late loading to track which CPUs have updated microcode
successfully and which haven't, act accordingly
- Move late microcode loading on Intel in NMI context in order to
guarantee concurrent loading on all threads
- Make the late loading CPU-hotplug-safe and have the offlined threads
be woken up for the purpose of the update
- Add support for a minimum revision which determines whether late
microcode loading is safe on a machine and the microcode does not
change software visible features which the machine cannot use anyway
since feature detection has happened already. Roughly, the minimum
revision is the smallest revision number which must be loaded
currently on the system so that late updates can be allowed
- Other nice leanups, fixess, etc all over the place
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmVE0xkACgkQEsHwGGHe
VUrCuBAAhOqqwkYPiGXPWd2hvdn1zGtD5fvEdXn3Orzd+Lwc6YaQTsCxCjIO/0ws
8inpPFuOeGz4TZcplzipi3G5oatPVc7ORDuW+/BvQQQljZOsSKfhiaC29t6dvS6z
UG3sbCXKVwlJ5Kwv3Qe4eWur4Ex6GeFDZkIvBCmbaAdGPFlfu1i2uO1yBooNP1Rs
GiUkp+dP1/KREWwR/dOIsHYL2QjWIWfHQEWit/9Bj46rxE9ERx/TWt3AeKPfKriO
Wp0JKp6QY78jg6a0a2/JVmbT1BKz69Z9aPp6hl4P2MfbBYOnqijRhdezFW0NyqV2
pn6nsuiLIiXbnSOEw0+Wdnw5Q0qhICs5B5eaBfQrwgfZ8pxPHv2Ir777GvUTV01E
Dv0ZpYsHa+mHe17nlK8V3+4eajt0PetExcXAYNiIE+pCb7pLjjKkl8e+lcEvEsO0
QSL3zG5i5RWUMPYUvaFRgepWy3k/GPIoDQjRcUD3P+1T0GmnogNN10MMNhmOzfWU
pyafe4tJUOVsq0HJ7V/bxIHk2p+Q+5JLKh5xBm9janE4BpabmSQnvFWNblVfK4ig
M9ohjI/yMtgXROC4xkNXgi8wE5jfDKBghT6FjTqKWSV45vknF1mNEjvuaY+aRZ3H
MB4P3HCj+PKWJimWHRYnDshcytkgcgVcYDiim8va/4UDrw8O2ks=
=JOZu
-----END PGP SIGNATURE-----
Merge tag 'x86_microcode_for_v6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 microcode loading updates from Borislac Petkov:
"Major microcode loader restructuring, cleanup and improvements by
Thomas Gleixner:
- Restructure the code needed for it and add a temporary initrd
mapping on 32-bit so that the loader can access the microcode
blobs. This in itself is a preparation for the next major
improvement:
- Do not load microcode on 32-bit before paging has been enabled.
Handling this has caused an endless stream of headaches, issues,
ugly code and unnecessary hacks in the past. And there really
wasn't any sensible reason to do that in the first place. So switch
the 32-bit loading to happen after paging has been enabled and turn
the loader code "real purrty" again
- Drop mixed microcode steppings loading on Intel - there, a single
patch loaded on the whole system is sufficient
- Rework late loading to track which CPUs have updated microcode
successfully and which haven't, act accordingly
- Move late microcode loading on Intel in NMI context in order to
guarantee concurrent loading on all threads
- Make the late loading CPU-hotplug-safe and have the offlined
threads be woken up for the purpose of the update
- Add support for a minimum revision which determines whether late
microcode loading is safe on a machine and the microcode does not
change software visible features which the machine cannot use
anyway since feature detection has happened already. Roughly, the
minimum revision is the smallest revision number which must be
loaded currently on the system so that late updates can be allowed
- Other nice leanups, fixess, etc all over the place"
* tag 'x86_microcode_for_v6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
x86/microcode/intel: Add a minimum required revision for late loading
x86/microcode: Prepare for minimal revision check
x86/microcode: Handle "offline" CPUs correctly
x86/apic: Provide apic_force_nmi_on_cpu()
x86/microcode: Protect against instrumentation
x86/microcode: Rendezvous and load in NMI
x86/microcode: Replace the all-in-one rendevous handler
x86/microcode: Provide new control functions
x86/microcode: Add per CPU control field
x86/microcode: Add per CPU result state
x86/microcode: Sanitize __wait_for_cpus()
x86/microcode: Clarify the late load logic
x86/microcode: Handle "nosmt" correctly
x86/microcode: Clean up mc_cpu_down_prep()
x86/microcode: Get rid of the schedule work indirection
x86/microcode: Mop up early loading leftovers
x86/microcode/amd: Use cached microcode for AP load
x86/microcode/amd: Cache builtin/initrd microcode early
x86/microcode/amd: Cache builtin microcode too
x86/microcode/amd: Use correct per CPU ucode_cpu_info
...
179 lines
4.0 KiB
C
179 lines
4.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/cpumask.h>
|
|
#include <linux/acpi.h>
|
|
|
|
#include "local.h"
|
|
|
|
int x2apic_phys;
|
|
|
|
static struct apic apic_x2apic_phys;
|
|
u32 x2apic_max_apicid __ro_after_init = UINT_MAX;
|
|
|
|
void __init x2apic_set_max_apicid(u32 apicid)
|
|
{
|
|
x2apic_max_apicid = apicid;
|
|
if (apic->x2apic_set_max_apicid)
|
|
apic->max_apic_id = apicid;
|
|
}
|
|
|
|
static int __init set_x2apic_phys_mode(char *arg)
|
|
{
|
|
x2apic_phys = 1;
|
|
return 0;
|
|
}
|
|
early_param("x2apic_phys", set_x2apic_phys_mode);
|
|
|
|
static bool x2apic_fadt_phys(void)
|
|
{
|
|
#ifdef CONFIG_ACPI
|
|
if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
|
|
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
|
|
printk(KERN_DEBUG "System requires x2apic physical mode\n");
|
|
return true;
|
|
}
|
|
#endif
|
|
return false;
|
|
}
|
|
|
|
static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
|
{
|
|
return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
|
|
}
|
|
|
|
static void x2apic_send_IPI(int cpu, int vector)
|
|
{
|
|
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
|
|
|
|
/* x2apic MSRs are special and need a special fence: */
|
|
weak_wrmsr_fence();
|
|
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
|
|
}
|
|
|
|
static void
|
|
__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
|
|
{
|
|
unsigned long query_cpu;
|
|
unsigned long this_cpu;
|
|
unsigned long flags;
|
|
|
|
/* x2apic MSRs are special and need a special fence: */
|
|
weak_wrmsr_fence();
|
|
|
|
local_irq_save(flags);
|
|
|
|
this_cpu = smp_processor_id();
|
|
for_each_cpu(query_cpu, mask) {
|
|
if (apic_dest == APIC_DEST_ALLBUT && this_cpu == query_cpu)
|
|
continue;
|
|
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
|
|
vector, APIC_DEST_PHYSICAL);
|
|
}
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
|
|
{
|
|
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
|
|
}
|
|
|
|
static void
|
|
x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
|
|
{
|
|
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
|
|
}
|
|
|
|
static void __x2apic_send_IPI_shorthand(int vector, u32 which)
|
|
{
|
|
unsigned long cfg = __prepare_ICR(which, vector, 0);
|
|
|
|
/* x2apic MSRs are special and need a special fence: */
|
|
weak_wrmsr_fence();
|
|
native_x2apic_icr_write(cfg, 0);
|
|
}
|
|
|
|
void x2apic_send_IPI_allbutself(int vector)
|
|
{
|
|
__x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT);
|
|
}
|
|
|
|
void x2apic_send_IPI_all(int vector)
|
|
{
|
|
__x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC);
|
|
}
|
|
|
|
void x2apic_send_IPI_self(int vector)
|
|
{
|
|
apic_write(APIC_SELF_IPI, vector);
|
|
}
|
|
|
|
void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
|
|
{
|
|
unsigned long cfg = __prepare_ICR(0, vector, dest);
|
|
native_x2apic_icr_write(cfg, apicid);
|
|
}
|
|
|
|
static int x2apic_phys_probe(void)
|
|
{
|
|
if (!x2apic_mode)
|
|
return 0;
|
|
|
|
if (x2apic_phys || x2apic_fadt_phys())
|
|
return 1;
|
|
|
|
return apic == &apic_x2apic_phys;
|
|
}
|
|
|
|
u32 x2apic_get_apic_id(u32 id)
|
|
{
|
|
return id;
|
|
}
|
|
|
|
u32 x2apic_set_apic_id(u32 id)
|
|
{
|
|
return id;
|
|
}
|
|
|
|
u32 x2apic_phys_pkg_id(u32 initial_apicid, int index_msb)
|
|
{
|
|
return initial_apicid >> index_msb;
|
|
}
|
|
|
|
static struct apic apic_x2apic_phys __ro_after_init = {
|
|
|
|
.name = "physical x2apic",
|
|
.probe = x2apic_phys_probe,
|
|
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
|
|
|
|
.delivery_mode = APIC_DELIVERY_MODE_FIXED,
|
|
.dest_mode_logical = false,
|
|
|
|
.disable_esr = 0,
|
|
|
|
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
|
.phys_pkg_id = x2apic_phys_pkg_id,
|
|
|
|
.max_apic_id = UINT_MAX,
|
|
.x2apic_set_max_apicid = true,
|
|
.get_apic_id = x2apic_get_apic_id,
|
|
.set_apic_id = x2apic_set_apic_id,
|
|
|
|
.calc_dest_apicid = apic_default_calc_apicid,
|
|
|
|
.send_IPI = x2apic_send_IPI,
|
|
.send_IPI_mask = x2apic_send_IPI_mask,
|
|
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
|
|
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
|
|
.send_IPI_all = x2apic_send_IPI_all,
|
|
.send_IPI_self = x2apic_send_IPI_self,
|
|
.nmi_to_offline_cpu = true,
|
|
|
|
.read = native_apic_msr_read,
|
|
.write = native_apic_msr_write,
|
|
.eoi = native_apic_msr_eoi,
|
|
.icr_read = native_x2apic_icr_read,
|
|
.icr_write = native_x2apic_icr_write,
|
|
};
|
|
|
|
apic_driver(apic_x2apic_phys);
|