iommu: Remove struct iommu_ops *iommu from arch_setup_dma_ops()

This is not being used to pass ops, it is just a way to tell if an
iommu driver was probed. These days this can be detected directly via
device_iommu_mapped(). Call device_iommu_mapped() in the two places that
need to check it and remove the iommu parameter everywhere.

Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Moritz Fischer <mdf@kernel.org>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rob Herring <robh@kernel.org>
Tested-by: Hector Martin <marcan@marcan.st>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/1-v2-16e4def25ebb+820-iommu_fwspec_p1_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Jason Gunthorpe 2023-12-07 14:03:08 -04:00 committed by Joerg Roedel
parent 7be423336e
commit 4720287c7b
10 changed files with 16 additions and 17 deletions

View File

@ -91,7 +91,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
* Plug in direct dma map ops. * Plug in direct dma map ops.
*/ */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
/* /*
* IOC hardware snoops all DMA traffic keeping the caches consistent * IOC hardware snoops all DMA traffic keeping the caches consistent

View File

@ -34,7 +34,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
} }
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
if (IS_ENABLED(CONFIG_CPU_V7M)) { if (IS_ENABLED(CONFIG_CPU_V7M)) {
/* /*

View File

@ -1713,7 +1713,7 @@ void arm_iommu_detach_device(struct device *dev)
EXPORT_SYMBOL_GPL(arm_iommu_detach_device); EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
struct dma_iommu_mapping *mapping; struct dma_iommu_mapping *mapping;
@ -1748,7 +1748,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
#else #else
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
} }
@ -1757,7 +1757,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
#endif /* CONFIG_ARM_DMA_USE_IOMMU */ #endif /* CONFIG_ARM_DMA_USE_IOMMU */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
/* /*
* Due to legacy code that sets the ->dma_coherent flag from a bus * Due to legacy code that sets the ->dma_coherent flag from a bus
@ -1776,8 +1776,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
if (dev->dma_ops) if (dev->dma_ops)
return; return;
if (iommu) if (device_iommu_mapped(dev))
arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent); arm_setup_iommu_dma_ops(dev, dma_base, size, coherent);
xen_setup_dma_ops(dev); xen_setup_dma_ops(dev);
dev->archdata.dma_ops_setup = true; dev->archdata.dma_ops_setup = true;

View File

@ -47,7 +47,7 @@ void arch_teardown_dma_ops(struct device *dev)
#endif #endif
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
int cls = cache_line_size_of_cpu(); int cls = cache_line_size_of_cpu();
@ -58,7 +58,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
ARCH_DMA_MINALIGN, cls); ARCH_DMA_MINALIGN, cls);
dev->dma_coherent = coherent; dev->dma_coherent = coherent;
if (iommu) if (device_iommu_mapped(dev))
iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1); iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
xen_setup_dma_ops(dev); xen_setup_dma_ops(dev);

View File

@ -138,7 +138,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
dev->dma_coherent = coherent; dev->dma_coherent = coherent;
} }

View File

@ -129,7 +129,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
} }
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN, WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
TAINT_CPU_OUT_OF_SPEC, TAINT_CPU_OUT_OF_SPEC,

View File

@ -1641,8 +1641,7 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
if (PTR_ERR(iommu) == -EPROBE_DEFER) if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER; return -EPROBE_DEFER;
arch_setup_dma_ops(dev, 0, U64_MAX, arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
iommu, attr == DEV_DMA_COHERENT);
return 0; return 0;
} }

View File

@ -488,7 +488,7 @@ void hv_setup_dma_ops(struct device *dev, bool coherent)
* Hyper-V does not offer a vIOMMU in the guest * Hyper-V does not offer a vIOMMU in the guest
* VM, so pass 0/NULL for the IOMMU settings * VM, so pass 0/NULL for the IOMMU settings
*/ */
arch_setup_dma_ops(dev, 0, 0, NULL, coherent); arch_setup_dma_ops(dev, 0, 0, coherent);
} }
EXPORT_SYMBOL_GPL(hv_setup_dma_ops); EXPORT_SYMBOL_GPL(hv_setup_dma_ops);

View File

@ -193,7 +193,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
dev_dbg(dev, "device is%sbehind an iommu\n", dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not "); iommu ? " " : " not ");
arch_setup_dma_ops(dev, dma_start, size, iommu, coherent); arch_setup_dma_ops(dev, dma_start, size, coherent);
if (!iommu) if (!iommu)
of_dma_set_restricted_buffer(dev, np); of_dma_set_restricted_buffer(dev, np);

View File

@ -427,10 +427,10 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent); bool coherent);
#else #else
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, const struct iommu_ops *iommu, bool coherent) u64 size, bool coherent)
{ {
} }
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */