mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-04 02:25:58 +00:00
drm/shmem-helper: Use refcount_t for vmap_use_count
Use refcount_t helper for vmap_use_count to make refcounting consistent with pages_use_count and pages_pin_count that use refcount_t. This also makes vmapping to benefit from the refcount_t's overflow checks. Acked-by: Maxime Ripard <mripard@kernel.org> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Suggested-by: Boris Brezillon <boris.brezillon@collabora.com> Acked-by: Thomas Zimmermann <tzimmermann@suse.d> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250322212608.40511-11-dmitry.osipenko@collabora.com
This commit is contained in:
parent
0271cc484f
commit
e1fc39a923
@ -165,7 +165,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
|
||||
} else {
|
||||
dma_resv_lock(shmem->base.resv, NULL);
|
||||
|
||||
drm_WARN_ON(obj->dev, shmem->vmap_use_count);
|
||||
drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
|
||||
|
||||
if (shmem->sgt) {
|
||||
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
|
||||
@ -355,23 +355,25 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
|
||||
|
||||
dma_resv_assert_held(shmem->base.resv);
|
||||
|
||||
if (shmem->vmap_use_count++ > 0) {
|
||||
if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
|
||||
iosys_map_set_vaddr(map, shmem->vaddr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = drm_gem_shmem_pin_locked(shmem);
|
||||
if (ret)
|
||||
goto err_zero_use;
|
||||
return ret;
|
||||
|
||||
if (shmem->map_wc)
|
||||
prot = pgprot_writecombine(prot);
|
||||
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
|
||||
VM_MAP, prot);
|
||||
if (!shmem->vaddr)
|
||||
if (!shmem->vaddr) {
|
||||
ret = -ENOMEM;
|
||||
else
|
||||
} else {
|
||||
iosys_map_set_vaddr(map, shmem->vaddr);
|
||||
refcount_set(&shmem->vmap_use_count, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
@ -384,8 +386,6 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
|
||||
err_put_pages:
|
||||
if (!drm_gem_is_imported(obj))
|
||||
drm_gem_shmem_unpin_locked(shmem);
|
||||
err_zero_use:
|
||||
shmem->vmap_use_count = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -413,14 +413,10 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
|
||||
} else {
|
||||
dma_resv_assert_held(shmem->base.resv);
|
||||
|
||||
if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
|
||||
return;
|
||||
|
||||
if (--shmem->vmap_use_count > 0)
|
||||
return;
|
||||
|
||||
vunmap(shmem->vaddr);
|
||||
drm_gem_shmem_unpin_locked(shmem);
|
||||
if (refcount_dec_and_test(&shmem->vmap_use_count)) {
|
||||
vunmap(shmem->vaddr);
|
||||
drm_gem_shmem_unpin_locked(shmem);
|
||||
}
|
||||
}
|
||||
|
||||
shmem->vaddr = NULL;
|
||||
@ -672,7 +668,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
|
||||
|
||||
drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
|
||||
drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
|
||||
drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
|
||||
drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
|
||||
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
|
||||
|
@ -168,7 +168,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
|
||||
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
|
||||
KUNIT_EXPECT_NULL(test, shmem->vaddr);
|
||||
KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
|
||||
KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
|
||||
|
||||
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
|
||||
KUNIT_ASSERT_EQ(test, ret, 0);
|
||||
@ -177,7 +177,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
|
||||
KUNIT_ASSERT_EQ(test, ret, 0);
|
||||
KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
|
||||
KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
|
||||
KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
|
||||
KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1);
|
||||
|
||||
iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
|
||||
for (i = 0; i < TEST_SIZE; i++)
|
||||
@ -185,7 +185,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
|
||||
|
||||
drm_gem_shmem_vunmap_locked(shmem, &map);
|
||||
KUNIT_EXPECT_NULL(test, shmem->vaddr);
|
||||
KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
|
||||
KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -82,7 +82,7 @@ struct drm_gem_shmem_object {
|
||||
* Reference count on the virtual address.
|
||||
* The address are un-mapped when the count reaches zero.
|
||||
*/
|
||||
unsigned int vmap_use_count;
|
||||
refcount_t vmap_use_count;
|
||||
|
||||
/**
|
||||
* @pages_mark_dirty_on_put:
|
||||
|
Loading…
Reference in New Issue
Block a user