drm/ttm: Add ttm_bo_kmap_try_from_panic()

If the ttm bo is backed by pages, then it's possible to safely kmap
one page at a time, using kmap_try_from_panic().
Unfortunately there is no way to do the same with ioremap, so it
only supports the kmap case.
This is needed for proper drm_panic support with xe driver.

Signed-off-by: Jocelyn Falempe <jfalempe@redhat.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://lore.kernel.org/r/20250624091501.257661-6-jfalempe@redhat.com
Signed-off-by: Maarten Lankhorst <dev@lankhorst.se>
This commit is contained in:
Jocelyn Falempe 2025-06-24 11:01:14 +02:00 committed by Maarten Lankhorst
parent 796f437d7b
commit 718370ff28
2 changed files with 28 additions and 0 deletions

View File

@ -381,6 +381,33 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
return (!map->virtual) ? -ENOMEM : 0; return (!map->virtual) ? -ENOMEM : 0;
} }
/**
*
* ttm_bo_kmap_try_from_panic
*
* @bo: The buffer object
* @page: The page to map
*
* Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
* This should only be called from the panic handler, if you make sure the bo
* is the one being displayed, so is properly allocated, and protected.
*
* Returns the vaddr, that you can use to write to the bo, and that you should
* pass to kunmap_local() when you're done with this page, or NULL if the bo
* is in iomem.
*/
void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
{
if (page + 1 > PFN_UP(bo->resource->size))
return NULL;
if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
return NULL;
}
EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
/** /**
* ttm_bo_kmap * ttm_bo_kmap
* *

View File

@ -401,6 +401,7 @@ int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct ttm_bo_kmap_obj *map); unsigned long num_pages, struct ttm_bo_kmap_obj *map);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map); int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map); void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);