mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-31 22:23:05 +00:00
arm64: Modify _midr_range() functions to read MIDR/REVIDR internally
These changes lay the groundwork for adding support for guest kernels, allowing them to leverage target CPU implementations provided by the VMM. No functional changes intended. Suggested-by: Oliver Upton <oliver.upton@linux.dev> Reviewed-by: Sebastian Ott <sebott@redhat.com> Reviewed-by: Cornelia Huck <cohuck@redhat.com> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20250221140229.12588-2-shameerali.kolothum.thodi@huawei.com Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
0ad2507d5d
commit
e3121298c7
@ -231,6 +231,16 @@
|
||||
|
||||
#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
|
||||
|
||||
/*
|
||||
* The CPU ID never changes at run time, so we might as well tell the
|
||||
* compiler that it's constant. Use this function to read the CPU ID
|
||||
* rather than directly reading processor_id or read_cpuid() directly.
|
||||
*/
|
||||
static inline u32 __attribute_const__ read_cpuid_id(void)
|
||||
{
|
||||
return read_cpuid(MIDR_EL1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Represent a range of MIDR values for a given CPU model and a
|
||||
* range of variant/revision values.
|
||||
@ -266,31 +276,21 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
|
||||
return _model == model && rv >= rv_min && rv <= rv_max;
|
||||
}
|
||||
|
||||
static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
|
||||
static inline bool is_midr_in_range(struct midr_range const *range)
|
||||
{
|
||||
return midr_is_cpu_model_range(midr, range->model,
|
||||
return midr_is_cpu_model_range(read_cpuid_id(), range->model,
|
||||
range->rv_min, range->rv_max);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
|
||||
is_midr_in_range_list(struct midr_range const *ranges)
|
||||
{
|
||||
while (ranges->model)
|
||||
if (is_midr_in_range(midr, ranges++))
|
||||
if (is_midr_in_range(ranges++))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* The CPU ID never changes at run time, so we might as well tell the
|
||||
* compiler that it's constant. Use this function to read the CPU ID
|
||||
* rather than directly reading processor_id or read_cpuid() directly.
|
||||
*/
|
||||
static inline u32 __attribute_const__ read_cpuid_id(void)
|
||||
{
|
||||
return read_cpuid(MIDR_EL1);
|
||||
}
|
||||
|
||||
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
|
||||
{
|
||||
return read_cpuid(MPIDR_EL1);
|
||||
|
@ -101,8 +101,7 @@ static inline bool kaslr_requires_kpti(void)
|
||||
if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
|
||||
extern const struct midr_range cavium_erratum_27456_cpus[];
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(),
|
||||
cavium_erratum_27456_cpus))
|
||||
if (is_midr_in_range_list(cavium_erratum_27456_cpus))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -15,30 +15,34 @@
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
static bool __maybe_unused
|
||||
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
__is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
|
||||
u32 midr, u32 revidr)
|
||||
{
|
||||
const struct arm64_midr_revidr *fix;
|
||||
u32 midr = read_cpuid_id(), revidr;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
if (!is_midr_in_range(midr, &entry->midr_range))
|
||||
if (!is_midr_in_range(&entry->midr_range))
|
||||
return false;
|
||||
|
||||
midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
|
||||
revidr = read_cpuid(REVIDR_EL1);
|
||||
for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
|
||||
if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool __maybe_unused
|
||||
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
return __is_affected_midr_range(entry, read_cpuid_id(),
|
||||
read_cpuid(REVIDR_EL1));
|
||||
}
|
||||
|
||||
static bool __maybe_unused
|
||||
is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
|
||||
return is_midr_in_range_list(entry->midr_range_list);
|
||||
}
|
||||
|
||||
static bool __maybe_unused
|
||||
@ -186,12 +190,11 @@ static bool __maybe_unused
|
||||
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
u32 midr = read_cpuid_id();
|
||||
bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
|
||||
const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
return is_midr_in_range(midr, &range) && has_dic;
|
||||
return is_midr_in_range(&range) && has_dic;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
|
||||
|
@ -1792,7 +1792,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
char const *str = "kpti command line option";
|
||||
bool meltdown_safe;
|
||||
|
||||
meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
|
||||
meltdown_safe = is_midr_in_range_list(kpti_safe_list);
|
||||
|
||||
/* Defer to CPU feature registers */
|
||||
if (has_cpuid_feature(entry, scope))
|
||||
@ -1862,7 +1862,7 @@ static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
|
||||
return (__system_matches_cap(ARM64_HAS_NESTED_VIRT) &&
|
||||
!(has_cpuid_feature(entry, scope) ||
|
||||
is_midr_in_range_list(read_cpuid_id(), nv1_ni_list)));
|
||||
is_midr_in_range_list(nv1_ni_list)));
|
||||
}
|
||||
|
||||
#if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2)
|
||||
@ -2045,7 +2045,7 @@ static bool cpu_has_broken_dbm(void)
|
||||
{},
|
||||
};
|
||||
|
||||
return is_midr_in_range_list(read_cpuid_id(), cpus);
|
||||
return is_midr_in_range_list(cpus);
|
||||
}
|
||||
|
||||
static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
|
||||
|
@ -172,7 +172,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
|
||||
return SPECTRE_UNAFFECTED;
|
||||
|
||||
/* Alternatively, we have a list of unaffected CPUs */
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
|
||||
if (is_midr_in_range_list(spectre_v2_safe_list))
|
||||
return SPECTRE_UNAFFECTED;
|
||||
|
||||
return SPECTRE_VULNERABLE;
|
||||
@ -331,7 +331,7 @@ bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
};
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
|
||||
return is_midr_in_range_list(spectre_v3a_unsafe_list);
|
||||
}
|
||||
|
||||
void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
|
||||
@ -475,7 +475,7 @@ static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
|
||||
if (is_midr_in_range_list(spectre_v4_safe_list))
|
||||
return SPECTRE_UNAFFECTED;
|
||||
|
||||
/* CPU features are detected first */
|
||||
@ -878,13 +878,13 @@ u8 spectre_bhb_loop_affected(int scope)
|
||||
{},
|
||||
};
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
|
||||
if (is_midr_in_range_list(spectre_bhb_k32_list))
|
||||
k = 32;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
|
||||
else if (is_midr_in_range_list(spectre_bhb_k24_list))
|
||||
k = 24;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
|
||||
else if (is_midr_in_range_list(spectre_bhb_k11_list))
|
||||
k = 11;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
|
||||
else if (is_midr_in_range_list(spectre_bhb_k8_list))
|
||||
k = 8;
|
||||
|
||||
max_bhb_k = max(max_bhb_k, k);
|
||||
@ -926,8 +926,7 @@ static bool is_spectre_bhb_fw_affected(int scope)
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
|
||||
{},
|
||||
};
|
||||
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
|
||||
spectre_bhb_firmware_mitigated_list);
|
||||
bool cpu_in_list = is_midr_in_range_list(spectre_bhb_firmware_mitigated_list);
|
||||
|
||||
if (scope != SCOPE_LOCAL_CPU)
|
||||
return system_affected;
|
||||
|
@ -633,7 +633,7 @@ static const struct midr_range broken_seis[] = {
|
||||
static bool vgic_v3_broken_seis(void)
|
||||
{
|
||||
return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
|
||||
is_midr_in_range_list(read_cpuid_id(), broken_seis));
|
||||
is_midr_in_range_list(broken_seis));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -842,7 +842,7 @@ static u64 __arch_timer_check_delta(void)
|
||||
{},
|
||||
};
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
|
||||
if (is_midr_in_range_list(broken_cval_midrs)) {
|
||||
pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
|
||||
return CLOCKSOURCE_MASK(31);
|
||||
}
|
||||
|
@ -1216,7 +1216,7 @@ static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata)
|
||||
* recorded value for 'drvdata->ccitmin' to workaround
|
||||
* this problem.
|
||||
*/
|
||||
if (is_midr_in_range_list(read_cpuid_id(), etm_wrong_ccitmin_cpus)) {
|
||||
if (is_midr_in_range_list(etm_wrong_ccitmin_cpus)) {
|
||||
if (drvdata->ccitmin == 256)
|
||||
drvdata->ccitmin = 4;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user