mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-01 15:14:52 +00:00
iommu/amd: Reduce domain lock scope in attach device path
Currently attach device path takes protection domain lock followed by dev_data lock. Most of the operations in this function is specific to device data except pdom_attach_iommu() where it updates protection domain structure. Hence reduce the scope of protection domain lock. Note that this changes the locking order. Now it takes device lock before taking doamin lock (group->mutex -> dev_data->lock -> pdom->lock). dev_data->lock is used only in device attachment path. So changing order is fine. It will not create any issue. Finally move numa node assignment to pdom_attach_iommu(). Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20241030063556.6104-8-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
07bbd660db
commit
d6b47dec36
@ -2020,16 +2020,23 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
|
||||
struct protection_domain *pdom)
|
||||
{
|
||||
struct pdom_iommu_info *pdom_iommu_info, *curr;
|
||||
struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&pdom->lock, flags);
|
||||
|
||||
pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
|
||||
if (pdom_iommu_info) {
|
||||
pdom_iommu_info->refcnt++;
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
pdom_iommu_info = kzalloc(sizeof(*pdom_iommu_info), GFP_ATOMIC);
|
||||
if (!pdom_iommu_info)
|
||||
return -ENOMEM;
|
||||
if (!pdom_iommu_info) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
pdom_iommu_info->iommu = iommu;
|
||||
pdom_iommu_info->refcnt = 1;
|
||||
@ -2038,43 +2045,52 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
|
||||
NULL, pdom_iommu_info, GFP_ATOMIC);
|
||||
if (curr) {
|
||||
kfree(pdom_iommu_info);
|
||||
return -ENOSPC;
|
||||
ret = -ENOSPC;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
return 0;
|
||||
/* Update NUMA Node ID */
|
||||
if (cfg->amd.nid == NUMA_NO_NODE)
|
||||
cfg->amd.nid = dev_to_node(&iommu->dev->dev);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&pdom->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pdom_detach_iommu(struct amd_iommu *iommu,
|
||||
struct protection_domain *pdom)
|
||||
{
|
||||
struct pdom_iommu_info *pdom_iommu_info;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pdom->lock, flags);
|
||||
|
||||
pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
|
||||
if (!pdom_iommu_info)
|
||||
if (!pdom_iommu_info) {
|
||||
spin_unlock_irqrestore(&pdom->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
pdom_iommu_info->refcnt--;
|
||||
if (pdom_iommu_info->refcnt == 0) {
|
||||
xa_erase(&pdom->iommu_array, iommu->index);
|
||||
kfree(pdom_iommu_info);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&pdom->lock, flags);
|
||||
}
|
||||
|
||||
static int do_attach(struct iommu_dev_data *dev_data,
|
||||
struct protection_domain *domain)
|
||||
{
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
|
||||
struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg;
|
||||
int ret = 0;
|
||||
|
||||
/* Update data structures */
|
||||
dev_data->domain = domain;
|
||||
list_add(&dev_data->list, &domain->dev_list);
|
||||
|
||||
/* Update NUMA Node ID */
|
||||
if (cfg->amd.nid == NUMA_NO_NODE)
|
||||
cfg->amd.nid = dev_to_node(dev_data->dev);
|
||||
|
||||
/* Do reference counting */
|
||||
ret = pdom_attach_iommu(iommu, domain);
|
||||
if (ret)
|
||||
@ -2096,12 +2112,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
||||
{
|
||||
struct protection_domain *domain = dev_data->domain;
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
|
||||
unsigned long flags;
|
||||
|
||||
/* Clear DTE and flush the entry */
|
||||
dev_update_dte(dev_data, false);
|
||||
|
||||
/* Flush IOTLB and wait for the flushes to finish */
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
amd_iommu_domain_flush_all(domain);
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
/* Clear GCR3 table */
|
||||
if (pdom_is_sva_capable(domain))
|
||||
@ -2123,11 +2142,8 @@ static int attach_device(struct device *dev,
|
||||
struct protection_domain *domain)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
dev_data = dev_iommu_priv_get(dev);
|
||||
|
||||
spin_lock(&dev_data->lock);
|
||||
@ -2142,8 +2158,6 @@ static int attach_device(struct device *dev,
|
||||
out:
|
||||
spin_unlock(&dev_data->lock);
|
||||
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2153,13 +2167,9 @@ static int attach_device(struct device *dev,
|
||||
static void detach_device(struct device *dev)
|
||||
{
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
|
||||
struct protection_domain *domain = dev_data->domain;
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
|
||||
unsigned long flags;
|
||||
bool ppr = dev_data->ppr;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
spin_lock(&dev_data->lock);
|
||||
|
||||
/*
|
||||
@ -2183,8 +2193,6 @@ static void detach_device(struct device *dev)
|
||||
out:
|
||||
spin_unlock(&dev_data->lock);
|
||||
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
/* Remove IOPF handler */
|
||||
if (ppr)
|
||||
amd_iommu_iopf_remove_device(iommu, dev_data);
|
||||
|
Loading…
Reference in New Issue
Block a user