mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-31 22:23:05 +00:00
arm64: errata: Work around AmpereOne's erratum AC04_CPU_23
On AmpereOne AC04, updates to HCR_EL2 can rarely corrupt simultaneous translations for data addresses initiated by load/store instructions. Only instruction initiated translations are vulnerable, not translations from prefetches for example. A DSB before the store to HCR_EL2 is sufficient to prevent older instructions from hitting the window for corruption, and an ISB after is sufficient to prevent younger instructions from hitting the window for corruption. Signed-off-by: D Scott Phillips <scott@os.amperecomputing.com> Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20250513184514.2678288-1-scott@os.amperecomputing.com Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
92c749e4aa
commit
fed55f49fa
@ -57,6 +57,8 @@ stable kernels.
|
|||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 |
|
| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| Ampere | AmpereOne AC04 | AC04_CPU_23 | AMPERE_ERRATUM_AC04_CPU_23 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
|
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
@ -464,6 +464,23 @@ config AMPERE_ERRATUM_AC03_CPU_38
|
|||||||
|
|
||||||
If unsure, say Y.
|
If unsure, say Y.
|
||||||
|
|
||||||
|
config AMPERE_ERRATUM_AC04_CPU_23
|
||||||
|
bool "AmpereOne: AC04_CPU_23: Failure to synchronize writes to HCR_EL2 may corrupt address translations."
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
This option adds an alternative code sequence to work around Ampere
|
||||||
|
errata AC04_CPU_23 on AmpereOne.
|
||||||
|
|
||||||
|
Updates to HCR_EL2 can rarely corrupt simultaneous translations for
|
||||||
|
data addresses initiated by load/store instructions. Only
|
||||||
|
instruction initiated translations are vulnerable, not translations
|
||||||
|
from prefetches for example. A DSB before the store to HCR_EL2 is
|
||||||
|
sufficient to prevent older instructions from hitting the window
|
||||||
|
for corruption, and an ISB after is sufficient to prevent younger
|
||||||
|
instructions from hitting the window for corruption.
|
||||||
|
|
||||||
|
If unsure, say Y.
|
||||||
|
|
||||||
config ARM64_WORKAROUND_CLEAN_CACHE
|
config ARM64_WORKAROUND_CLEAN_CACHE
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@
|
|||||||
|
|
||||||
orr x0, x0, #HCR_E2H
|
orr x0, x0, #HCR_E2H
|
||||||
.LnVHE_\@:
|
.LnVHE_\@:
|
||||||
msr hcr_el2, x0
|
msr_hcr_el2 x0
|
||||||
isb
|
isb
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ do { \
|
|||||||
\
|
\
|
||||||
___hcr = read_sysreg(hcr_el2); \
|
___hcr = read_sysreg(hcr_el2); \
|
||||||
if (!(___hcr & HCR_TGE)) { \
|
if (!(___hcr & HCR_TGE)) { \
|
||||||
write_sysreg(___hcr | HCR_TGE, hcr_el2); \
|
write_sysreg_hcr(___hcr | HCR_TGE); \
|
||||||
isb(); \
|
isb(); \
|
||||||
} \
|
} \
|
||||||
/* \
|
/* \
|
||||||
@ -82,7 +82,7 @@ do { \
|
|||||||
*/ \
|
*/ \
|
||||||
barrier(); \
|
barrier(); \
|
||||||
if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
|
if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
|
||||||
write_sysreg(___hcr, hcr_el2); \
|
write_sysreg_hcr(___hcr); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
static inline void ack_bad_irq(unsigned int irq)
|
static inline void ack_bad_irq(unsigned int irq)
|
||||||
|
@ -1091,6 +1091,15 @@
|
|||||||
__emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt))
|
__emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt))
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
.macro msr_hcr_el2, reg
|
||||||
|
#if IS_ENABLED(CONFIG_AMPERE_ERRATUM_AC04_CPU_23)
|
||||||
|
dsb nsh
|
||||||
|
msr hcr_el2, \reg
|
||||||
|
isb
|
||||||
|
#else
|
||||||
|
msr hcr_el2, \reg
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#include <linux/bitfield.h>
|
#include <linux/bitfield.h>
|
||||||
@ -1178,6 +1187,13 @@
|
|||||||
write_sysreg(__scs_new, sysreg); \
|
write_sysreg(__scs_new, sysreg); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define sysreg_clear_set_hcr(clear, set) do { \
|
||||||
|
u64 __scs_val = read_sysreg(hcr_el2); \
|
||||||
|
u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
|
||||||
|
if (__scs_new != __scs_val) \
|
||||||
|
write_sysreg_hcr(__scs_new); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define sysreg_clear_set_s(sysreg, clear, set) do { \
|
#define sysreg_clear_set_s(sysreg, clear, set) do { \
|
||||||
u64 __scs_val = read_sysreg_s(sysreg); \
|
u64 __scs_val = read_sysreg_s(sysreg); \
|
||||||
u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
|
u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
|
||||||
@ -1185,6 +1201,17 @@
|
|||||||
write_sysreg_s(__scs_new, sysreg); \
|
write_sysreg_s(__scs_new, sysreg); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define write_sysreg_hcr(__val) do { \
|
||||||
|
if (IS_ENABLED(CONFIG_AMPERE_ERRATUM_AC04_CPU_23) && \
|
||||||
|
(!system_capabilities_finalized() || \
|
||||||
|
alternative_has_cap_unlikely(ARM64_WORKAROUND_AMPERE_AC04_CPU_23))) \
|
||||||
|
asm volatile("dsb nsh; msr hcr_el2, %x0; isb" \
|
||||||
|
: : "rZ" (__val)); \
|
||||||
|
else \
|
||||||
|
asm volatile("msr hcr_el2, %x0" \
|
||||||
|
: : "rZ" (__val)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define read_sysreg_par() ({ \
|
#define read_sysreg_par() ({ \
|
||||||
u64 par; \
|
u64 par; \
|
||||||
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
|
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
|
||||||
|
@ -557,6 +557,13 @@ static const struct midr_range erratum_ac03_cpu_38_list[] = {
|
|||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
|
||||||
|
static const struct midr_range erratum_ac04_cpu_23_list[] = {
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||||
{
|
{
|
||||||
@ -875,6 +882,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||||||
.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
|
.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
|
||||||
ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
|
ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
|
||||||
},
|
},
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
|
||||||
|
{
|
||||||
|
.desc = "AmpereOne erratum AC04_CPU_23",
|
||||||
|
.capability = ARM64_WORKAROUND_AMPERE_AC04_CPU_23,
|
||||||
|
ERRATA_MIDR_RANGE_LIST(erratum_ac04_cpu_23_list),
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
.desc = "Broken CNTVOFF_EL2",
|
.desc = "Broken CNTVOFF_EL2",
|
||||||
|
@ -97,7 +97,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
|
|||||||
2:
|
2:
|
||||||
// Engage the VHE magic!
|
// Engage the VHE magic!
|
||||||
mov_q x0, HCR_HOST_VHE_FLAGS
|
mov_q x0, HCR_HOST_VHE_FLAGS
|
||||||
msr hcr_el2, x0
|
msr_hcr_el2 x0
|
||||||
isb
|
isb
|
||||||
|
|
||||||
// Use the EL1 allocated stack, per-cpu offset
|
// Use the EL1 allocated stack, per-cpu offset
|
||||||
|
@ -516,7 +516,7 @@ static void __mmu_config_save(struct mmu_config *config)
|
|||||||
|
|
||||||
static void __mmu_config_restore(struct mmu_config *config)
|
static void __mmu_config_restore(struct mmu_config *config)
|
||||||
{
|
{
|
||||||
write_sysreg(config->hcr, hcr_el2);
|
write_sysreg_hcr(config->hcr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ARM errata 1165522 and 1530923 require TGE to be 1 before
|
* ARM errata 1165522 and 1530923 require TGE to be 1 before
|
||||||
@ -1267,7 +1267,7 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
|
|||||||
|
|
||||||
skip_mmu_switch:
|
skip_mmu_switch:
|
||||||
/* Clear TGE, enable S2 translation, we're rolling */
|
/* Clear TGE, enable S2 translation, we're rolling */
|
||||||
write_sysreg((config.hcr & ~HCR_TGE) | HCR_VM, hcr_el2);
|
write_sysreg_hcr((config.hcr & ~HCR_TGE) | HCR_VM);
|
||||||
isb();
|
isb();
|
||||||
|
|
||||||
switch (op) {
|
switch (op) {
|
||||||
@ -1350,7 +1350,7 @@ void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
|
|||||||
if (!vcpu_el2_e2h_is_set(vcpu))
|
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||||
val |= HCR_NV | HCR_NV1;
|
val |= HCR_NV | HCR_NV1;
|
||||||
|
|
||||||
write_sysreg(val, hcr_el2);
|
write_sysreg_hcr(val);
|
||||||
isb();
|
isb();
|
||||||
|
|
||||||
par = SYS_PAR_EL1_F;
|
par = SYS_PAR_EL1_F;
|
||||||
@ -1375,7 +1375,7 @@ void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
|
|||||||
if (!fail)
|
if (!fail)
|
||||||
par = read_sysreg_par();
|
par = read_sysreg_par();
|
||||||
|
|
||||||
write_sysreg(hcr, hcr_el2);
|
write_sysreg_hcr(hcr);
|
||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -301,7 +301,7 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
|
|||||||
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
|
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
|
||||||
hcr |= HCR_TVM;
|
hcr |= HCR_TVM;
|
||||||
|
|
||||||
write_sysreg(hcr, hcr_el2);
|
write_sysreg_hcr(hcr);
|
||||||
|
|
||||||
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
|
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
|
||||||
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
|
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
|
||||||
|
@ -124,7 +124,7 @@ SYM_FUNC_START(__hyp_do_panic)
|
|||||||
/* Ensure host stage-2 is disabled */
|
/* Ensure host stage-2 is disabled */
|
||||||
mrs x0, hcr_el2
|
mrs x0, hcr_el2
|
||||||
bic x0, x0, #HCR_VM
|
bic x0, x0, #HCR_VM
|
||||||
msr hcr_el2, x0
|
msr_hcr_el2 x0
|
||||||
isb
|
isb
|
||||||
tlbi vmalls12e1
|
tlbi vmalls12e1
|
||||||
dsb nsh
|
dsb nsh
|
||||||
|
@ -100,7 +100,7 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init)
|
|||||||
msr mair_el2, x1
|
msr mair_el2, x1
|
||||||
|
|
||||||
ldr x1, [x0, #NVHE_INIT_HCR_EL2]
|
ldr x1, [x0, #NVHE_INIT_HCR_EL2]
|
||||||
msr hcr_el2, x1
|
msr_hcr_el2 x1
|
||||||
|
|
||||||
mov x2, #HCR_E2H
|
mov x2, #HCR_E2H
|
||||||
and x2, x1, x2
|
and x2, x1, x2
|
||||||
@ -262,7 +262,7 @@ reset:
|
|||||||
|
|
||||||
alternative_if ARM64_KVM_PROTECTED_MODE
|
alternative_if ARM64_KVM_PROTECTED_MODE
|
||||||
mov_q x5, HCR_HOST_NVHE_FLAGS
|
mov_q x5, HCR_HOST_NVHE_FLAGS
|
||||||
msr hcr_el2, x5
|
msr_hcr_el2 x5
|
||||||
alternative_else_nop_endif
|
alternative_else_nop_endif
|
||||||
|
|
||||||
/* Install stub vectors */
|
/* Install stub vectors */
|
||||||
|
@ -309,7 +309,7 @@ int __pkvm_prot_finalize(void)
|
|||||||
*/
|
*/
|
||||||
kvm_flush_dcache_to_poc(params, sizeof(*params));
|
kvm_flush_dcache_to_poc(params, sizeof(*params));
|
||||||
|
|
||||||
write_sysreg(params->hcr_el2, hcr_el2);
|
write_sysreg_hcr(params->hcr_el2);
|
||||||
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
|
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -142,7 +142,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
__deactivate_traps_common(vcpu);
|
__deactivate_traps_common(vcpu);
|
||||||
|
|
||||||
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
|
write_sysreg_hcr(this_cpu_ptr(&kvm_init_params)->hcr_el2);
|
||||||
|
|
||||||
__deactivate_cptr_traps(vcpu);
|
__deactivate_cptr_traps(vcpu);
|
||||||
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
|
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
|
||||||
|
@ -446,7 +446,7 @@ u64 __vgic_v3_get_gic_config(void)
|
|||||||
if (has_vhe()) {
|
if (has_vhe()) {
|
||||||
flags = local_daif_save();
|
flags = local_daif_save();
|
||||||
} else {
|
} else {
|
||||||
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
|
sysreg_clear_set_hcr(0, HCR_AMO | HCR_FMO | HCR_IMO);
|
||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -461,7 +461,7 @@ u64 __vgic_v3_get_gic_config(void)
|
|||||||
if (has_vhe()) {
|
if (has_vhe()) {
|
||||||
local_daif_restore(flags);
|
local_daif_restore(flags);
|
||||||
} else {
|
} else {
|
||||||
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
|
sysreg_clear_set_hcr(HCR_AMO | HCR_FMO | HCR_IMO, 0);
|
||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,7 +184,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
___deactivate_traps(vcpu);
|
___deactivate_traps(vcpu);
|
||||||
|
|
||||||
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
|
write_sysreg_hcr(HCR_HOST_VHE_FLAGS);
|
||||||
|
|
||||||
if (has_cntpoff()) {
|
if (has_cntpoff()) {
|
||||||
struct timer_map map;
|
struct timer_map map;
|
||||||
|
@ -63,7 +63,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
|
|||||||
__load_stage2(mmu, mmu->arch);
|
__load_stage2(mmu, mmu->arch);
|
||||||
val = read_sysreg(hcr_el2);
|
val = read_sysreg(hcr_el2);
|
||||||
val &= ~HCR_TGE;
|
val &= ~HCR_TGE;
|
||||||
write_sysreg(val, hcr_el2);
|
write_sysreg_hcr(val);
|
||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ static void exit_vmid_context(struct tlb_inv_context *cxt)
|
|||||||
* We're done with the TLB operation, let's restore the host's
|
* We're done with the TLB operation, let's restore the host's
|
||||||
* view of HCR_EL2.
|
* view of HCR_EL2.
|
||||||
*/
|
*/
|
||||||
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
|
write_sysreg_hcr(HCR_HOST_VHE_FLAGS);
|
||||||
isb();
|
isb();
|
||||||
|
|
||||||
/* ... and the stage-2 MMU context that we switched away from */
|
/* ... and the stage-2 MMU context that we switched away from */
|
||||||
|
@ -94,6 +94,7 @@ WORKAROUND_2457168
|
|||||||
WORKAROUND_2645198
|
WORKAROUND_2645198
|
||||||
WORKAROUND_2658417
|
WORKAROUND_2658417
|
||||||
WORKAROUND_AMPERE_AC03_CPU_38
|
WORKAROUND_AMPERE_AC03_CPU_38
|
||||||
|
WORKAROUND_AMPERE_AC04_CPU_23
|
||||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||||
WORKAROUND_TSB_FLUSH_FAILURE
|
WORKAROUND_TSB_FLUSH_FAILURE
|
||||||
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
|
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
|
||||||
|
Loading…
Reference in New Issue
Block a user