mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-31 22:23:05 +00:00
drm/gpuvm: Add locking helpers
For UNMAP/REMAP steps we could be needing to lock objects that are not explicitly listed in the VM_BIND ioctl in order to tear-down unmapped VAs. These helpers handle locking/preparing the needed objects. Note that these functions do not strictly require the VM changes to be applied before the next drm_gpuvm_sm_map_lock()/_unmap_lock() call. In the case that VM changes from an earlier drm_gpuvm_sm_map()/_unmap() call result in a differing sequence of steps when the VM changes are actually applied, it will be the same set of GEM objects involved, so the locking is still correct. v2: Rename to drm_gpuvm_sm_*_exec_locked() [Danilo] v3: Expand comments to show expected usage, and explain how the usage is safe in the case of overlapping driver VM_BIND ops. Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com> Tested-by: Antonino Maniscalco <antomani103@gmail.com> Reviewed-by: Antonino Maniscalco <antomani103@gmail.com> Acked-by: Danilo Krummrich <dakr@kernel.org> Patchwork: https://patchwork.freedesktop.org/patch/661458/
This commit is contained in:
parent
9d712c50c3
commit
471920ce25
@ -2391,6 +2391,132 @@ drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
|
||||
|
||||
static int
|
||||
drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv)
|
||||
{
|
||||
struct drm_exec *exec = priv;
|
||||
|
||||
switch (op->op) {
|
||||
case DRM_GPUVA_OP_REMAP:
|
||||
if (op->remap.unmap->va->gem.obj)
|
||||
return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj);
|
||||
return 0;
|
||||
case DRM_GPUVA_OP_UNMAP:
|
||||
if (op->unmap.va->gem.obj)
|
||||
return drm_exec_lock_obj(exec, op->unmap.va->gem.obj);
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct drm_gpuvm_ops lock_ops = {
|
||||
.sm_step_map = drm_gpuva_sm_step_lock,
|
||||
.sm_step_remap = drm_gpuva_sm_step_lock,
|
||||
.sm_step_unmap = drm_gpuva_sm_step_lock,
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
|
||||
* @gpuvm: the &drm_gpuvm representing the GPU VA space
|
||||
* @exec: the &drm_exec locking context
|
||||
* @num_fences: for newly mapped objects, the # of fences to reserve
|
||||
* @req_addr: the start address of the range to unmap
|
||||
* @req_range: the range of the mappings to unmap
|
||||
* @req_obj: the &drm_gem_object to map
|
||||
* @req_offset: the offset within the &drm_gem_object
|
||||
*
|
||||
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
|
||||
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
|
||||
* will be newly mapped.
|
||||
*
|
||||
* The expected usage is:
|
||||
*
|
||||
* vm_bind {
|
||||
* struct drm_exec exec;
|
||||
*
|
||||
* // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended:
|
||||
* drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0);
|
||||
*
|
||||
* drm_exec_until_all_locked (&exec) {
|
||||
* for_each_vm_bind_operation {
|
||||
* switch (op->op) {
|
||||
* case DRIVER_OP_UNMAP:
|
||||
* ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
|
||||
* break;
|
||||
* case DRIVER_OP_MAP:
|
||||
* ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
|
||||
* op->addr, op->range,
|
||||
* obj, op->obj_offset);
|
||||
* break;
|
||||
* }
|
||||
*
|
||||
* drm_exec_retry_on_contention(&exec);
|
||||
* if (ret)
|
||||
* return ret;
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
*
|
||||
* This enables all locking to be performed before the driver begins modifying
|
||||
* the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
|
||||
* where an earlier op can alter the sequence of steps generated for a later
|
||||
* op, because the later altered step will involve the same GEM object(s)
|
||||
* already seen in the earlier locking step. For example:
|
||||
*
|
||||
* 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a
|
||||
* DRM_GPUVA_OP_REMAP/UNMAP step. This is safe because we've already
|
||||
* locked the GEM object in the earlier DRIVER_OP_UNMAP op.
|
||||
*
|
||||
* 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP
|
||||
* op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been
|
||||
* required without the earlier DRIVER_OP_MAP. This is safe because we've
|
||||
* already locked the GEM object in the earlier DRIVER_OP_MAP step.
|
||||
*
|
||||
* Returns: 0 on success or a negative error codec
|
||||
*/
|
||||
int
|
||||
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
|
||||
struct drm_exec *exec, unsigned int num_fences,
|
||||
u64 req_addr, u64 req_range,
|
||||
struct drm_gem_object *req_obj, u64 req_offset)
|
||||
{
|
||||
if (req_obj) {
|
||||
int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
|
||||
req_addr, req_range,
|
||||
req_obj, req_offset);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
|
||||
|
||||
/**
|
||||
* drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
|
||||
* @gpuvm: the &drm_gpuvm representing the GPU VA space
|
||||
* @exec: the &drm_exec locking context
|
||||
* @req_addr: the start address of the range to unmap
|
||||
* @req_range: the range of the mappings to unmap
|
||||
*
|
||||
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
|
||||
* remapped by drm_gpuvm_sm_unmap().
|
||||
*
|
||||
* See drm_gpuvm_sm_map_exec_lock() for expected usage.
|
||||
*
|
||||
* Returns: 0 on success or a negative error code
|
||||
*/
|
||||
int
|
||||
drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
|
||||
u64 req_addr, u64 req_range)
|
||||
{
|
||||
return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec,
|
||||
req_addr, req_range);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock);
|
||||
|
||||
static struct drm_gpuva_op *
|
||||
gpuva_op_alloc(struct drm_gpuvm *gpuvm)
|
||||
{
|
||||
|
@ -1211,6 +1211,14 @@ int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
|
||||
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
|
||||
u64 addr, u64 range);
|
||||
|
||||
int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
|
||||
struct drm_exec *exec, unsigned int num_fences,
|
||||
u64 req_addr, u64 req_range,
|
||||
struct drm_gem_object *obj, u64 offset);
|
||||
|
||||
int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
|
||||
u64 req_addr, u64 req_range);
|
||||
|
||||
void drm_gpuva_map(struct drm_gpuvm *gpuvm,
|
||||
struct drm_gpuva *va,
|
||||
struct drm_gpuva_op_map *op);
|
||||
|
Loading…
Reference in New Issue
Block a user