mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-28 00:19:36 +00:00
iommu: Introduce iommu_paging_domain_alloc_flags()
Currently drivers calls iommu_paging_domain_alloc(dev) to get an UNMANAGED domain. This is not sufficient to support PASID with UNMANAGED domain as some HW like AMD requires certain page table type to support PASIDs. Also the domain_alloc_paging op only passes device as param for domain allocation. This is not sufficient for AMD driver to decide the right page table. Instead of extending ops->domain_alloc_paging() it was decided to enhance ops->domain_alloc_user() so that caller can pass various additional flags. Hence add iommu_paging_domain_alloc_flags() API which takes flags as parameter. Caller can pass additional parameter to indicate type of domain required, etc. iommu_paging_domain_alloc_flags() internally calls appropriate callback function to allocate a domain. Signed-off-by: Jason Gunthorpe <jgg@ziepe.ca> [Added description - Vasant] Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Yi Liu <yi.l.liu@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20241028093810.5901-3-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
541b967f5a
commit
20858d4ebb
@ -1962,20 +1962,42 @@ __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_paging_domain_alloc() - Allocate a paging domain
|
||||
* iommu_paging_domain_alloc_flags() - Allocate a paging domain
|
||||
* @dev: device for which the domain is allocated
|
||||
* @flags: Enum of iommufd_hwpt_alloc_flags
|
||||
*
|
||||
* Allocate a paging domain which will be managed by a kernel driver. Return
|
||||
* allocated domain if successful, or a ERR pointer for failure.
|
||||
* allocated domain if successful, or an ERR pointer for failure.
|
||||
*/
|
||||
struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
|
||||
struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
|
||||
unsigned int flags)
|
||||
{
|
||||
const struct iommu_ops *ops;
|
||||
struct iommu_domain *domain;
|
||||
|
||||
if (!dev_has_iommu(dev))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
return __iommu_domain_alloc(dev_iommu_ops(dev), dev, IOMMU_DOMAIN_UNMANAGED);
|
||||
ops = dev_iommu_ops(dev);
|
||||
|
||||
if (ops->domain_alloc_paging && !flags)
|
||||
domain = ops->domain_alloc_paging(dev);
|
||||
else if (ops->domain_alloc_user)
|
||||
domain = ops->domain_alloc_user(dev, flags, NULL, NULL);
|
||||
else if (ops->domain_alloc && !flags)
|
||||
domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
|
||||
else
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
if (IS_ERR(domain))
|
||||
return domain;
|
||||
if (!domain)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
iommu_domain_init(domain, IOMMU_DOMAIN_UNMANAGED, ops);
|
||||
return domain;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc);
|
||||
EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags);
|
||||
|
||||
void iommu_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
|
@ -511,8 +511,6 @@ static inline int __iommu_copy_struct_from_user_array(
|
||||
* the caller iommu_domain_alloc() returns.
|
||||
* @domain_alloc_user: Allocate an iommu domain corresponding to the input
|
||||
* parameters as defined in include/uapi/linux/iommufd.h.
|
||||
* Unlike @domain_alloc, it is called only by IOMMUFD and
|
||||
* must fully initialize the new domain before return.
|
||||
* Upon success, if the @user_data is valid and the @parent
|
||||
* points to a kernel-managed domain, the new domain must be
|
||||
* IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be
|
||||
@ -787,7 +785,11 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
|
||||
extern int bus_iommu_probe(const struct bus_type *bus);
|
||||
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
|
||||
extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
|
||||
struct iommu_domain *iommu_paging_domain_alloc(struct device *dev);
|
||||
struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags);
|
||||
static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
|
||||
{
|
||||
return iommu_paging_domain_alloc_flags(dev, 0);
|
||||
}
|
||||
extern void iommu_domain_free(struct iommu_domain *domain);
|
||||
extern int iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
@ -1078,6 +1080,12 @@ static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
|
||||
return false;
|
||||
}
|
||||
|
||||
struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
|
||||
unsigned int flags)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
Loading…
Reference in New Issue
Block a user