mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-01 15:14:52 +00:00
iommu/amd: Lock DTE before updating the entry with WRITE_ONCE()
When updating only within a 64-bit tuple of a DTE, just lock the DTE and use WRITE_ONCE() because it is writing to memory read back by HW. Suggested-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Link: https://lore.kernel.org/r/20241118054937.5203-9-suravee.suthikulpanit@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
66ea3f96ae
commit
457da57646
@ -186,3 +186,4 @@ struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
|
||||
#endif
|
||||
|
||||
struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid);
|
||||
struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid);
|
||||
|
@ -347,7 +347,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
|
||||
return dev_data;
|
||||
}
|
||||
|
||||
static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
|
||||
struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
struct llist_node *node;
|
||||
@ -2845,12 +2845,12 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
|
||||
bool enable)
|
||||
{
|
||||
struct protection_domain *pdomain = to_pdomain(domain);
|
||||
struct dev_table_entry *dev_table;
|
||||
struct dev_table_entry *dte;
|
||||
struct iommu_dev_data *dev_data;
|
||||
bool domain_flush = false;
|
||||
struct amd_iommu *iommu;
|
||||
unsigned long flags;
|
||||
u64 pte_root;
|
||||
u64 new;
|
||||
|
||||
spin_lock_irqsave(&pdomain->lock, flags);
|
||||
if (!(pdomain->dirty_tracking ^ enable)) {
|
||||
@ -2859,16 +2859,15 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
|
||||
}
|
||||
|
||||
list_for_each_entry(dev_data, &pdomain->dev_list, list) {
|
||||
spin_lock(&dev_data->dte_lock);
|
||||
iommu = get_amd_iommu_from_dev_data(dev_data);
|
||||
|
||||
dev_table = get_dev_table(iommu);
|
||||
pte_root = dev_table[dev_data->devid].data[0];
|
||||
|
||||
pte_root = (enable ? pte_root | DTE_FLAG_HAD :
|
||||
pte_root & ~DTE_FLAG_HAD);
|
||||
dte = &get_dev_table(iommu)[dev_data->devid];
|
||||
new = dte->data[0];
|
||||
new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD);
|
||||
dte->data[0] = new;
|
||||
spin_unlock(&dev_data->dte_lock);
|
||||
|
||||
/* Flush device DTE */
|
||||
dev_table[dev_data->devid].data[0] = pte_root;
|
||||
device_flush_dte(dev_data);
|
||||
domain_flush = true;
|
||||
}
|
||||
@ -3135,17 +3134,23 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
|
||||
static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
|
||||
struct irq_remap_table *table)
|
||||
{
|
||||
u64 dte;
|
||||
struct dev_table_entry *dev_table = get_dev_table(iommu);
|
||||
u64 new;
|
||||
struct dev_table_entry *dte = &get_dev_table(iommu)[devid];
|
||||
struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
|
||||
|
||||
dte = dev_table[devid].data[2];
|
||||
dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
|
||||
dte |= iommu_virt_to_phys(table->table);
|
||||
dte |= DTE_IRQ_REMAP_INTCTL;
|
||||
dte |= DTE_INTTABLEN;
|
||||
dte |= DTE_IRQ_REMAP_ENABLE;
|
||||
if (dev_data)
|
||||
spin_lock(&dev_data->dte_lock);
|
||||
|
||||
dev_table[devid].data[2] = dte;
|
||||
new = READ_ONCE(dte->data[2]);
|
||||
new &= ~DTE_IRQ_PHYS_ADDR_MASK;
|
||||
new |= iommu_virt_to_phys(table->table);
|
||||
new |= DTE_IRQ_REMAP_INTCTL;
|
||||
new |= DTE_INTTABLEN;
|
||||
new |= DTE_IRQ_REMAP_ENABLE;
|
||||
WRITE_ONCE(dte->data[2], new);
|
||||
|
||||
if (dev_data)
|
||||
spin_unlock(&dev_data->dte_lock);
|
||||
}
|
||||
|
||||
static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
|
||||
|
Loading…
Reference in New Issue
Block a user