mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-06 13:09:58 +00:00
vfio/iommu_type1: Mantain a counter for non_pinned_groups
With this counter, we never need to traverse all groups to update pinned_scope of vfio_iommu. Suggested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
parent
4a19f37a3d
commit
010321565a
@ -70,10 +70,10 @@ struct vfio_iommu {
|
|||||||
struct blocking_notifier_head notifier;
|
struct blocking_notifier_head notifier;
|
||||||
unsigned int dma_avail;
|
unsigned int dma_avail;
|
||||||
uint64_t pgsize_bitmap;
|
uint64_t pgsize_bitmap;
|
||||||
|
uint64_t num_non_pinned_groups;
|
||||||
bool v2;
|
bool v2;
|
||||||
bool nesting;
|
bool nesting;
|
||||||
bool dirty_page_tracking;
|
bool dirty_page_tracking;
|
||||||
bool pinned_page_dirty_scope;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vfio_domain {
|
struct vfio_domain {
|
||||||
@ -148,7 +148,6 @@ static int put_pfn(unsigned long pfn, int prot);
|
|||||||
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
|
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
|
||||||
struct iommu_group *iommu_group);
|
struct iommu_group *iommu_group);
|
||||||
|
|
||||||
static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
|
|
||||||
/*
|
/*
|
||||||
* This code handles mapping and unmapping of user data buffers
|
* This code handles mapping and unmapping of user data buffers
|
||||||
* into DMA'ble space using the IOMMU
|
* into DMA'ble space using the IOMMU
|
||||||
@ -726,7 +725,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
|
|||||||
group = vfio_iommu_find_iommu_group(iommu, iommu_group);
|
group = vfio_iommu_find_iommu_group(iommu, iommu_group);
|
||||||
if (!group->pinned_page_dirty_scope) {
|
if (!group->pinned_page_dirty_scope) {
|
||||||
group->pinned_page_dirty_scope = true;
|
group->pinned_page_dirty_scope = true;
|
||||||
update_pinned_page_dirty_scope(iommu);
|
iommu->num_non_pinned_groups--;
|
||||||
}
|
}
|
||||||
|
|
||||||
goto pin_done;
|
goto pin_done;
|
||||||
@ -1004,7 +1003,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
|
|||||||
* mark all pages dirty if any IOMMU capable device is not able
|
* mark all pages dirty if any IOMMU capable device is not able
|
||||||
* to report dirty pages and all pages are pinned and mapped.
|
* to report dirty pages and all pages are pinned and mapped.
|
||||||
*/
|
*/
|
||||||
if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
|
if (iommu->num_non_pinned_groups && dma->iommu_mapped)
|
||||||
bitmap_set(dma->bitmap, 0, nbits);
|
bitmap_set(dma->bitmap, 0, nbits);
|
||||||
|
|
||||||
if (shift) {
|
if (shift) {
|
||||||
@ -1635,33 +1634,6 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
|
|||||||
return group;
|
return group;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
|
|
||||||
{
|
|
||||||
struct vfio_domain *domain;
|
|
||||||
struct vfio_group *group;
|
|
||||||
|
|
||||||
list_for_each_entry(domain, &iommu->domain_list, next) {
|
|
||||||
list_for_each_entry(group, &domain->group_list, next) {
|
|
||||||
if (!group->pinned_page_dirty_scope) {
|
|
||||||
iommu->pinned_page_dirty_scope = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (iommu->external_domain) {
|
|
||||||
domain = iommu->external_domain;
|
|
||||||
list_for_each_entry(group, &domain->group_list, next) {
|
|
||||||
if (!group->pinned_page_dirty_scope) {
|
|
||||||
iommu->pinned_page_dirty_scope = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
iommu->pinned_page_dirty_scope = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
|
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
|
||||||
phys_addr_t *base)
|
phys_addr_t *base)
|
||||||
{
|
{
|
||||||
@ -2070,8 +2042,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
|||||||
* addition of a dirty tracking group.
|
* addition of a dirty tracking group.
|
||||||
*/
|
*/
|
||||||
group->pinned_page_dirty_scope = true;
|
group->pinned_page_dirty_scope = true;
|
||||||
if (!iommu->pinned_page_dirty_scope)
|
|
||||||
update_pinned_page_dirty_scope(iommu);
|
|
||||||
mutex_unlock(&iommu->lock);
|
mutex_unlock(&iommu->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -2201,7 +2171,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
|||||||
* demotes the iommu scope until it declares itself dirty tracking
|
* demotes the iommu scope until it declares itself dirty tracking
|
||||||
* capable via the page pinning interface.
|
* capable via the page pinning interface.
|
||||||
*/
|
*/
|
||||||
iommu->pinned_page_dirty_scope = false;
|
iommu->num_non_pinned_groups++;
|
||||||
mutex_unlock(&iommu->lock);
|
mutex_unlock(&iommu->lock);
|
||||||
vfio_iommu_resv_free(&group_resv_regions);
|
vfio_iommu_resv_free(&group_resv_regions);
|
||||||
|
|
||||||
@ -2414,7 +2384,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
|
|||||||
* to be promoted.
|
* to be promoted.
|
||||||
*/
|
*/
|
||||||
if (update_dirty_scope) {
|
if (update_dirty_scope) {
|
||||||
update_pinned_page_dirty_scope(iommu);
|
iommu->num_non_pinned_groups--;
|
||||||
if (iommu->dirty_page_tracking)
|
if (iommu->dirty_page_tracking)
|
||||||
vfio_iommu_populate_bitmap_full(iommu);
|
vfio_iommu_populate_bitmap_full(iommu);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user