mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-04 02:25:58 +00:00
iommu/arm-smmu-v3: Build the whole CD in arm_smmu_make_s1_cd()
Half the code was living in arm_smmu_domain_finalise_s1(), just move it here and take the values directly from the pgtbl_ops instead of storing copies. Tested-by: Nicolin Chen <nicolinc@nvidia.com> Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Reviewed-by: Michael Shavit <mshavit@google.com> Reviewed-by: Mostafa Saleh <smostafa@google.com> Reviewed-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/8-v9-5040dc602008+177d7-smmuv3_newapi_p2_jgg@nvidia.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
7b87c93c8b
commit
04905c17f6
@ -1309,15 +1309,25 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
|
|||||||
struct arm_smmu_domain *smmu_domain)
|
struct arm_smmu_domain *smmu_domain)
|
||||||
{
|
{
|
||||||
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
|
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
|
||||||
|
const struct io_pgtable_cfg *pgtbl_cfg =
|
||||||
|
&io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
|
||||||
|
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr =
|
||||||
|
&pgtbl_cfg->arm_lpae_s1_cfg.tcr;
|
||||||
|
|
||||||
memset(target, 0, sizeof(*target));
|
memset(target, 0, sizeof(*target));
|
||||||
|
|
||||||
target->data[0] = cpu_to_le64(
|
target->data[0] = cpu_to_le64(
|
||||||
cd->tcr |
|
FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
|
||||||
|
FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
|
||||||
|
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
|
||||||
|
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
|
||||||
|
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
|
||||||
#ifdef __BIG_ENDIAN
|
#ifdef __BIG_ENDIAN
|
||||||
CTXDESC_CD_0_ENDI |
|
CTXDESC_CD_0_ENDI |
|
||||||
#endif
|
#endif
|
||||||
|
CTXDESC_CD_0_TCR_EPD1 |
|
||||||
CTXDESC_CD_0_V |
|
CTXDESC_CD_0_V |
|
||||||
|
FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
|
||||||
CTXDESC_CD_0_AA64 |
|
CTXDESC_CD_0_AA64 |
|
||||||
(master->stall_enabled ? CTXDESC_CD_0_S : 0) |
|
(master->stall_enabled ? CTXDESC_CD_0_S : 0) |
|
||||||
CTXDESC_CD_0_R |
|
CTXDESC_CD_0_R |
|
||||||
@ -1325,9 +1335,9 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
|
|||||||
CTXDESC_CD_0_ASET |
|
CTXDESC_CD_0_ASET |
|
||||||
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid)
|
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid)
|
||||||
);
|
);
|
||||||
|
target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
|
||||||
target->data[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
|
CTXDESC_CD_1_TTB0_MASK);
|
||||||
target->data[3] = cpu_to_le64(cd->mair);
|
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
|
void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
|
||||||
@ -2284,13 +2294,11 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
|
static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
|
||||||
struct arm_smmu_domain *smmu_domain,
|
struct arm_smmu_domain *smmu_domain)
|
||||||
struct io_pgtable_cfg *pgtbl_cfg)
|
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
u32 asid;
|
u32 asid;
|
||||||
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
|
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
|
||||||
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
|
|
||||||
|
|
||||||
refcount_set(&cd->refs, 1);
|
refcount_set(&cd->refs, 1);
|
||||||
|
|
||||||
@ -2298,31 +2306,13 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
|
|||||||
mutex_lock(&arm_smmu_asid_lock);
|
mutex_lock(&arm_smmu_asid_lock);
|
||||||
ret = xa_alloc(&arm_smmu_asid_xa, &asid, cd,
|
ret = xa_alloc(&arm_smmu_asid_xa, &asid, cd,
|
||||||
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
|
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
|
||||||
if (ret)
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
cd->asid = (u16)asid;
|
cd->asid = (u16)asid;
|
||||||
cd->ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
|
|
||||||
cd->tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
|
|
||||||
FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
|
|
||||||
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
|
|
||||||
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
|
|
||||||
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
|
|
||||||
FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
|
|
||||||
CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
|
|
||||||
cd->mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
|
|
||||||
|
|
||||||
mutex_unlock(&arm_smmu_asid_lock);
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
mutex_unlock(&arm_smmu_asid_lock);
|
mutex_unlock(&arm_smmu_asid_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
|
static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
|
||||||
struct arm_smmu_domain *smmu_domain,
|
struct arm_smmu_domain *smmu_domain)
|
||||||
struct io_pgtable_cfg *pgtbl_cfg)
|
|
||||||
{
|
{
|
||||||
int vmid;
|
int vmid;
|
||||||
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
|
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
|
||||||
@ -2346,8 +2336,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
|
|||||||
struct io_pgtable_cfg pgtbl_cfg;
|
struct io_pgtable_cfg pgtbl_cfg;
|
||||||
struct io_pgtable_ops *pgtbl_ops;
|
struct io_pgtable_ops *pgtbl_ops;
|
||||||
int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
|
int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
|
||||||
struct arm_smmu_domain *smmu_domain,
|
struct arm_smmu_domain *smmu_domain);
|
||||||
struct io_pgtable_cfg *pgtbl_cfg);
|
|
||||||
|
|
||||||
/* Restrict the stage to what we can actually support */
|
/* Restrict the stage to what we can actually support */
|
||||||
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
|
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
|
||||||
@ -2390,7 +2379,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
|
|||||||
smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
|
smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
|
||||||
smmu_domain->domain.geometry.force_aperture = true;
|
smmu_domain->domain.geometry.force_aperture = true;
|
||||||
|
|
||||||
ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
|
ret = finalise_stage_fn(smmu, smmu_domain);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
free_io_pgtable_ops(pgtbl_ops);
|
free_io_pgtable_ops(pgtbl_ops);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -587,9 +587,6 @@ struct arm_smmu_strtab_l1_desc {
|
|||||||
|
|
||||||
struct arm_smmu_ctx_desc {
|
struct arm_smmu_ctx_desc {
|
||||||
u16 asid;
|
u16 asid;
|
||||||
u64 ttbr;
|
|
||||||
u64 tcr;
|
|
||||||
u64 mair;
|
|
||||||
|
|
||||||
refcount_t refs;
|
refcount_t refs;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
|
Loading…
Reference in New Issue
Block a user