mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-09-04 01:30:16 +00:00
drm/xe: Rename engine to exec_queue
Engine was inappropriately used to refer to execution queues and it also created some confusion with hardware engines. Where it applies the exec_queue variable name is changed to q and comments are also updated. Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/162 Signed-off-by: Francois Dugast <francois.dugast@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
c22a4ed0c3
commit
9b9529ce37
@ -38,7 +38,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
|
||||
struct kunit *test)
|
||||
{
|
||||
u64 batch_base = xe_migrate_batch_base(m, xe->info.supports_usm);
|
||||
struct xe_sched_job *job = xe_bb_create_migration_job(m->eng, bb,
|
||||
struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
|
||||
batch_base,
|
||||
second_idx);
|
||||
struct dma_fence *fence;
|
||||
@ -215,7 +215,7 @@ static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
|
||||
xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size);
|
||||
|
||||
then = ktime_get();
|
||||
fence = xe_migrate_update_pgtables(m, NULL, NULL, m->eng, &update, 1,
|
||||
fence = xe_migrate_update_pgtables(m, NULL, NULL, m->q, &update, 1,
|
||||
NULL, 0, &pt_update);
|
||||
now = ktime_get();
|
||||
if (sanity_fence_failed(xe, fence, "Migration pagetable update", test))
|
||||
@ -257,7 +257,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
|
||||
return;
|
||||
}
|
||||
|
||||
big = xe_bo_create_pin_map(xe, tile, m->eng->vm, SZ_4M,
|
||||
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
|
||||
ttm_bo_type_kernel,
|
||||
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
|
||||
XE_BO_CREATE_PINNED_BIT);
|
||||
@ -266,7 +266,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
|
||||
goto vunmap;
|
||||
}
|
||||
|
||||
pt = xe_bo_create_pin_map(xe, tile, m->eng->vm, XE_PAGE_SIZE,
|
||||
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
|
||||
ttm_bo_type_kernel,
|
||||
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
|
||||
XE_BO_CREATE_PINNED_BIT);
|
||||
@ -276,7 +276,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
|
||||
goto free_big;
|
||||
}
|
||||
|
||||
tiny = xe_bo_create_pin_map(xe, tile, m->eng->vm,
|
||||
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
|
||||
2 * SZ_4K,
|
||||
ttm_bo_type_kernel,
|
||||
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
|
||||
@ -295,14 +295,14 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
|
||||
}
|
||||
|
||||
kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
|
||||
(unsigned long)xe_bo_main_addr(m->eng->vm->pt_root[id]->bo, XE_PAGE_SIZE),
|
||||
(unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
|
||||
(unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
|
||||
|
||||
/* First part of the test, are we updating our pagetable bo with a new entry? */
|
||||
xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
|
||||
0xdeaddeadbeefbeef);
|
||||
expected = xe_pte_encode(pt, 0, XE_CACHE_WB, 0);
|
||||
if (m->eng->vm->flags & XE_VM_FLAG_64K)
|
||||
if (m->q->vm->flags & XE_VM_FLAG_64K)
|
||||
expected |= XE_PTE_PS64;
|
||||
if (xe_bo_is_vram(pt))
|
||||
xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
|
||||
@ -399,11 +399,11 @@ static int migrate_test_run_device(struct xe_device *xe)
|
||||
struct ww_acquire_ctx ww;
|
||||
|
||||
kunit_info(test, "Testing tile id %d.\n", id);
|
||||
xe_vm_lock(m->eng->vm, &ww, 0, true);
|
||||
xe_vm_lock(m->q->vm, &ww, 0, true);
|
||||
xe_device_mem_access_get(xe);
|
||||
xe_migrate_sanity_test(m, test);
|
||||
xe_device_mem_access_put(xe);
|
||||
xe_vm_unlock(m->eng->vm, &ww);
|
||||
xe_vm_unlock(m->q->vm, &ww);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
#include "regs/xe_gpu_commands.h"
|
||||
#include "xe_device.h"
|
||||
#include "xe_engine_types.h"
|
||||
#include "xe_exec_queue_types.h"
|
||||
#include "xe_gt.h"
|
||||
#include "xe_hw_fence.h"
|
||||
#include "xe_sa.h"
|
||||
@ -60,30 +60,30 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
|
||||
}
|
||||
|
||||
static struct xe_sched_job *
|
||||
__xe_bb_create_job(struct xe_engine *kernel_eng, struct xe_bb *bb, u64 *addr)
|
||||
__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
|
||||
{
|
||||
u32 size = drm_suballoc_size(bb->bo);
|
||||
|
||||
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
|
||||
|
||||
WARN_ON(bb->len * 4 + bb_prefetch(kernel_eng->gt) > size);
|
||||
WARN_ON(bb->len * 4 + bb_prefetch(q->gt) > size);
|
||||
|
||||
xe_sa_bo_flush_write(bb->bo);
|
||||
|
||||
return xe_sched_job_create(kernel_eng, addr);
|
||||
return xe_sched_job_create(q, addr);
|
||||
}
|
||||
|
||||
struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng,
|
||||
struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
|
||||
struct xe_bb *bb, u64 batch_base_ofs)
|
||||
{
|
||||
u64 addr = batch_base_ofs + drm_suballoc_soffset(bb->bo);
|
||||
|
||||
XE_WARN_ON(!(wa_eng->vm->flags & XE_VM_FLAG_MIGRATION));
|
||||
XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION));
|
||||
|
||||
return __xe_bb_create_job(wa_eng, bb, &addr);
|
||||
return __xe_bb_create_job(q, bb, &addr);
|
||||
}
|
||||
|
||||
struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
|
||||
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
|
||||
struct xe_bb *bb,
|
||||
u64 batch_base_ofs,
|
||||
u32 second_idx)
|
||||
@ -95,18 +95,18 @@ struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
|
||||
};
|
||||
|
||||
XE_WARN_ON(second_idx > bb->len);
|
||||
XE_WARN_ON(!(kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION));
|
||||
XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION));
|
||||
|
||||
return __xe_bb_create_job(kernel_eng, bb, addr);
|
||||
return __xe_bb_create_job(q, bb, addr);
|
||||
}
|
||||
|
||||
struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng,
|
||||
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
|
||||
struct xe_bb *bb)
|
||||
{
|
||||
u64 addr = xe_sa_bo_gpu_addr(bb->bo);
|
||||
|
||||
XE_WARN_ON(kernel_eng->vm && kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION);
|
||||
return __xe_bb_create_job(kernel_eng, bb, &addr);
|
||||
XE_WARN_ON(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION);
|
||||
return __xe_bb_create_job(q, bb, &addr);
|
||||
}
|
||||
|
||||
void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence)
|
||||
|
@ -11,16 +11,16 @@
|
||||
struct dma_fence;
|
||||
|
||||
struct xe_gt;
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
struct xe_sched_job;
|
||||
|
||||
struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm);
|
||||
struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng,
|
||||
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
|
||||
struct xe_bb *bb);
|
||||
struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
|
||||
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
|
||||
struct xe_bb *bb, u64 batch_ofs,
|
||||
u32 second_idx);
|
||||
struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng,
|
||||
struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
|
||||
struct xe_bb *bb, u64 batch_ofs);
|
||||
void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence);
|
||||
|
||||
|
@ -53,9 +53,9 @@ static struct xe_device *coredump_to_xe(const struct xe_devcoredump *coredump)
|
||||
return container_of(coredump, struct xe_device, devcoredump);
|
||||
}
|
||||
|
||||
static struct xe_guc *engine_to_guc(struct xe_engine *e)
|
||||
static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
|
||||
{
|
||||
return &e->gt->uc.guc;
|
||||
return &q->gt->uc.guc;
|
||||
}
|
||||
|
||||
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
|
||||
@ -91,7 +91,7 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
|
||||
|
||||
drm_printf(&p, "\n**** GuC CT ****\n");
|
||||
xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
|
||||
xe_guc_engine_snapshot_print(coredump->snapshot.ge, &p);
|
||||
xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
|
||||
|
||||
drm_printf(&p, "\n**** HW Engines ****\n");
|
||||
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
|
||||
@ -112,7 +112,7 @@ static void xe_devcoredump_free(void *data)
|
||||
return;
|
||||
|
||||
xe_guc_ct_snapshot_free(coredump->snapshot.ct);
|
||||
xe_guc_engine_snapshot_free(coredump->snapshot.ge);
|
||||
xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge);
|
||||
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
|
||||
if (coredump->snapshot.hwe[i])
|
||||
xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]);
|
||||
@ -123,14 +123,14 @@ static void xe_devcoredump_free(void *data)
|
||||
}
|
||||
|
||||
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
|
||||
struct xe_engine *e)
|
||||
struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
|
||||
struct xe_guc *guc = engine_to_guc(e);
|
||||
struct xe_guc *guc = exec_queue_to_guc(q);
|
||||
struct xe_hw_engine *hwe;
|
||||
enum xe_hw_engine_id id;
|
||||
u32 adj_logical_mask = e->logical_mask;
|
||||
u32 width_mask = (0x1 << e->width) - 1;
|
||||
u32 adj_logical_mask = q->logical_mask;
|
||||
u32 width_mask = (0x1 << q->width) - 1;
|
||||
int i;
|
||||
bool cookie;
|
||||
|
||||
@ -138,22 +138,22 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
|
||||
ss->boot_time = ktime_get_boottime();
|
||||
|
||||
cookie = dma_fence_begin_signalling();
|
||||
for (i = 0; e->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
|
||||
for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
|
||||
if (adj_logical_mask & BIT(i)) {
|
||||
adj_logical_mask |= width_mask << i;
|
||||
i += e->width;
|
||||
i += q->width;
|
||||
} else {
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
xe_force_wake_get(gt_to_fw(e->gt), XE_FORCEWAKE_ALL);
|
||||
xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
|
||||
|
||||
coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
|
||||
coredump->snapshot.ge = xe_guc_engine_snapshot_capture(e);
|
||||
coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
|
||||
|
||||
for_each_hw_engine(hwe, e->gt, id) {
|
||||
if (hwe->class != e->hwe->class ||
|
||||
for_each_hw_engine(hwe, q->gt, id) {
|
||||
if (hwe->class != q->hwe->class ||
|
||||
!(BIT(hwe->logical_instance) & adj_logical_mask)) {
|
||||
coredump->snapshot.hwe[id] = NULL;
|
||||
continue;
|
||||
@ -161,21 +161,21 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
|
||||
coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
|
||||
}
|
||||
|
||||
xe_force_wake_put(gt_to_fw(e->gt), XE_FORCEWAKE_ALL);
|
||||
xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
|
||||
dma_fence_end_signalling(cookie);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_devcoredump - Take the required snapshots and initialize coredump device.
|
||||
* @e: The faulty xe_engine, where the issue was detected.
|
||||
* @q: The faulty xe_exec_queue, where the issue was detected.
|
||||
*
|
||||
* This function should be called at the crash time within the serialized
|
||||
* gt_reset. It is skipped if we still have the core dump device available
|
||||
* with the information of the 'first' snapshot.
|
||||
*/
|
||||
void xe_devcoredump(struct xe_engine *e)
|
||||
void xe_devcoredump(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_device *xe = gt_to_xe(e->gt);
|
||||
struct xe_device *xe = gt_to_xe(q->gt);
|
||||
struct xe_devcoredump *coredump = &xe->devcoredump;
|
||||
|
||||
if (coredump->captured) {
|
||||
@ -184,7 +184,7 @@ void xe_devcoredump(struct xe_engine *e)
|
||||
}
|
||||
|
||||
coredump->captured = true;
|
||||
devcoredump_snapshot(coredump, e);
|
||||
devcoredump_snapshot(coredump, q);
|
||||
|
||||
drm_info(&xe->drm, "Xe device coredump has been created\n");
|
||||
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
|
||||
|
@ -7,12 +7,12 @@
|
||||
#define _XE_DEVCOREDUMP_H_
|
||||
|
||||
struct xe_device;
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
|
||||
#ifdef CONFIG_DEV_COREDUMP
|
||||
void xe_devcoredump(struct xe_engine *e);
|
||||
void xe_devcoredump(struct xe_exec_queue *q);
|
||||
#else
|
||||
static inline void xe_devcoredump(struct xe_engine *e)
|
||||
static inline void xe_devcoredump(struct xe_exec_queue *q)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@ -30,7 +30,7 @@ struct xe_devcoredump_snapshot {
|
||||
/** @ct: GuC CT snapshot */
|
||||
struct xe_guc_ct_snapshot *ct;
|
||||
/** @ge: Guc Engine snapshot */
|
||||
struct xe_guc_submit_engine_snapshot *ge;
|
||||
struct xe_guc_submit_exec_queue_snapshot *ge;
|
||||
/** @hwe: HW Engine snapshot array */
|
||||
struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES];
|
||||
};
|
||||
|
@ -53,33 +53,33 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
|
||||
mutex_init(&xef->vm.lock);
|
||||
xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
|
||||
|
||||
mutex_init(&xef->engine.lock);
|
||||
xa_init_flags(&xef->engine.xa, XA_FLAGS_ALLOC1);
|
||||
mutex_init(&xef->exec_queue.lock);
|
||||
xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
|
||||
|
||||
file->driver_priv = xef;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void device_kill_persistent_engines(struct xe_device *xe,
|
||||
struct xe_file *xef);
|
||||
static void device_kill_persistent_exec_queues(struct xe_device *xe,
|
||||
struct xe_file *xef);
|
||||
|
||||
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct xe_device *xe = to_xe_device(dev);
|
||||
struct xe_file *xef = file->driver_priv;
|
||||
struct xe_vm *vm;
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
unsigned long idx;
|
||||
|
||||
mutex_lock(&xef->engine.lock);
|
||||
xa_for_each(&xef->engine.xa, idx, e) {
|
||||
xe_engine_kill(e);
|
||||
xe_engine_put(e);
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
xa_for_each(&xef->exec_queue.xa, idx, q) {
|
||||
xe_exec_queue_kill(q);
|
||||
xe_exec_queue_put(q);
|
||||
}
|
||||
mutex_unlock(&xef->engine.lock);
|
||||
xa_destroy(&xef->engine.xa);
|
||||
mutex_destroy(&xef->engine.lock);
|
||||
device_kill_persistent_engines(xe, xef);
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
xa_destroy(&xef->exec_queue.xa);
|
||||
mutex_destroy(&xef->exec_queue.lock);
|
||||
device_kill_persistent_exec_queues(xe, xef);
|
||||
|
||||
mutex_lock(&xef->vm.lock);
|
||||
xa_for_each(&xef->vm.xa, idx, vm)
|
||||
@ -99,15 +99,15 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(XE_ENGINE_CREATE, xe_engine_create_ioctl,
|
||||
DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
|
||||
DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(XE_ENGINE_GET_PROPERTY, xe_engine_get_property_ioctl,
|
||||
DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
|
||||
DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(XE_ENGINE_DESTROY, xe_engine_destroy_ioctl,
|
||||
DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
|
||||
DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(XE_MMIO, xe_mmio_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(XE_ENGINE_SET_PROPERTY, xe_engine_set_property_ioctl,
|
||||
DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_SET_PROPERTY, xe_exec_queue_set_property_ioctl,
|
||||
DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
|
||||
DRM_RENDER_ALLOW),
|
||||
@ -324,33 +324,33 @@ void xe_device_shutdown(struct xe_device *xe)
|
||||
{
|
||||
}
|
||||
|
||||
void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e)
|
||||
void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
|
||||
{
|
||||
mutex_lock(&xe->persistent_engines.lock);
|
||||
list_add_tail(&e->persistent.link, &xe->persistent_engines.list);
|
||||
list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
|
||||
mutex_unlock(&xe->persistent_engines.lock);
|
||||
}
|
||||
|
||||
void xe_device_remove_persistent_engines(struct xe_device *xe,
|
||||
struct xe_engine *e)
|
||||
void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
|
||||
struct xe_exec_queue *q)
|
||||
{
|
||||
mutex_lock(&xe->persistent_engines.lock);
|
||||
if (!list_empty(&e->persistent.link))
|
||||
list_del(&e->persistent.link);
|
||||
if (!list_empty(&q->persistent.link))
|
||||
list_del(&q->persistent.link);
|
||||
mutex_unlock(&xe->persistent_engines.lock);
|
||||
}
|
||||
|
||||
static void device_kill_persistent_engines(struct xe_device *xe,
|
||||
struct xe_file *xef)
|
||||
static void device_kill_persistent_exec_queues(struct xe_device *xe,
|
||||
struct xe_file *xef)
|
||||
{
|
||||
struct xe_engine *e, *next;
|
||||
struct xe_exec_queue *q, *next;
|
||||
|
||||
mutex_lock(&xe->persistent_engines.lock);
|
||||
list_for_each_entry_safe(e, next, &xe->persistent_engines.list,
|
||||
list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
|
||||
persistent.link)
|
||||
if (e->persistent.xef == xef) {
|
||||
xe_engine_kill(e);
|
||||
list_del_init(&e->persistent.link);
|
||||
if (q->persistent.xef == xef) {
|
||||
xe_exec_queue_kill(q);
|
||||
list_del_init(&q->persistent.link);
|
||||
}
|
||||
mutex_unlock(&xe->persistent_engines.lock);
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
#ifndef _XE_DEVICE_H_
|
||||
#define _XE_DEVICE_H_
|
||||
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
struct xe_file;
|
||||
|
||||
#include <drm/drm_util.h>
|
||||
@ -41,9 +41,9 @@ int xe_device_probe(struct xe_device *xe);
|
||||
void xe_device_remove(struct xe_device *xe);
|
||||
void xe_device_shutdown(struct xe_device *xe);
|
||||
|
||||
void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e);
|
||||
void xe_device_remove_persistent_engines(struct xe_device *xe,
|
||||
struct xe_engine *e);
|
||||
void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
|
||||
void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
|
||||
struct xe_exec_queue *q);
|
||||
|
||||
void xe_device_wmb(struct xe_device *xe);
|
||||
|
||||
|
@ -377,13 +377,13 @@ struct xe_file {
|
||||
struct mutex lock;
|
||||
} vm;
|
||||
|
||||
/** @engine: Submission engine state for file */
|
||||
/** @exec_queue: Submission exec queue state for file */
|
||||
struct {
|
||||
/** @xe: xarray to store engines */
|
||||
struct xarray xa;
|
||||
/** @lock: protects file engine state */
|
||||
struct mutex lock;
|
||||
} engine;
|
||||
} exec_queue;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -1,209 +0,0 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2022 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_ENGINE_TYPES_H_
|
||||
#define _XE_ENGINE_TYPES_H_
|
||||
|
||||
#include <linux/kref.h>
|
||||
|
||||
#include <drm/gpu_scheduler.h>
|
||||
|
||||
#include "xe_gpu_scheduler_types.h"
|
||||
#include "xe_hw_engine_types.h"
|
||||
#include "xe_hw_fence_types.h"
|
||||
#include "xe_lrc_types.h"
|
||||
|
||||
struct xe_execlist_engine;
|
||||
struct xe_gt;
|
||||
struct xe_guc_engine;
|
||||
struct xe_hw_engine;
|
||||
struct xe_vm;
|
||||
|
||||
enum xe_engine_priority {
|
||||
XE_ENGINE_PRIORITY_UNSET = -2, /* For execlist usage only */
|
||||
XE_ENGINE_PRIORITY_LOW = 0,
|
||||
XE_ENGINE_PRIORITY_NORMAL,
|
||||
XE_ENGINE_PRIORITY_HIGH,
|
||||
XE_ENGINE_PRIORITY_KERNEL,
|
||||
|
||||
XE_ENGINE_PRIORITY_COUNT
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_engine - Submission engine
|
||||
*
|
||||
* Contains all state necessary for submissions. Can either be a user object or
|
||||
* a kernel object.
|
||||
*/
|
||||
struct xe_engine {
|
||||
/** @gt: graphics tile this engine can submit to */
|
||||
struct xe_gt *gt;
|
||||
/**
|
||||
* @hwe: A hardware of the same class. May (physical engine) or may not
|
||||
* (virtual engine) be where jobs actual engine up running. Should never
|
||||
* really be used for submissions.
|
||||
*/
|
||||
struct xe_hw_engine *hwe;
|
||||
/** @refcount: ref count of this engine */
|
||||
struct kref refcount;
|
||||
/** @vm: VM (address space) for this engine */
|
||||
struct xe_vm *vm;
|
||||
/** @class: class of this engine */
|
||||
enum xe_engine_class class;
|
||||
/** @priority: priority of this exec queue */
|
||||
enum xe_engine_priority priority;
|
||||
/**
|
||||
* @logical_mask: logical mask of where job submitted to engine can run
|
||||
*/
|
||||
u32 logical_mask;
|
||||
/** @name: name of this engine */
|
||||
char name[MAX_FENCE_NAME_LEN];
|
||||
/** @width: width (number BB submitted per exec) of this engine */
|
||||
u16 width;
|
||||
/** @fence_irq: fence IRQ used to signal job completion */
|
||||
struct xe_hw_fence_irq *fence_irq;
|
||||
|
||||
#define ENGINE_FLAG_BANNED BIT(0)
|
||||
#define ENGINE_FLAG_KERNEL BIT(1)
|
||||
#define ENGINE_FLAG_PERSISTENT BIT(2)
|
||||
#define ENGINE_FLAG_COMPUTE_MODE BIT(3)
|
||||
/* Caller needs to hold rpm ref when creating engine with ENGINE_FLAG_VM */
|
||||
#define ENGINE_FLAG_VM BIT(4)
|
||||
#define ENGINE_FLAG_BIND_ENGINE_CHILD BIT(5)
|
||||
#define ENGINE_FLAG_WA BIT(6)
|
||||
|
||||
/**
|
||||
* @flags: flags for this engine, should statically setup aside from ban
|
||||
* bit
|
||||
*/
|
||||
unsigned long flags;
|
||||
|
||||
union {
|
||||
/** @multi_gt_list: list head for VM bind engines if multi-GT */
|
||||
struct list_head multi_gt_list;
|
||||
/** @multi_gt_link: link for VM bind engines if multi-GT */
|
||||
struct list_head multi_gt_link;
|
||||
};
|
||||
|
||||
union {
|
||||
/** @execlist: execlist backend specific state for engine */
|
||||
struct xe_execlist_engine *execlist;
|
||||
/** @guc: GuC backend specific state for engine */
|
||||
struct xe_guc_engine *guc;
|
||||
};
|
||||
|
||||
/**
|
||||
* @persistent: persistent engine state
|
||||
*/
|
||||
struct {
|
||||
/** @xef: file which this engine belongs to */
|
||||
struct xe_file *xef;
|
||||
/** @link: link in list of persistent engines */
|
||||
struct list_head link;
|
||||
} persistent;
|
||||
|
||||
union {
|
||||
/**
|
||||
* @parallel: parallel submission state
|
||||
*/
|
||||
struct {
|
||||
/** @composite_fence_ctx: context composite fence */
|
||||
u64 composite_fence_ctx;
|
||||
/** @composite_fence_seqno: seqno for composite fence */
|
||||
u32 composite_fence_seqno;
|
||||
} parallel;
|
||||
/**
|
||||
* @bind: bind submission state
|
||||
*/
|
||||
struct {
|
||||
/** @fence_ctx: context bind fence */
|
||||
u64 fence_ctx;
|
||||
/** @fence_seqno: seqno for bind fence */
|
||||
u32 fence_seqno;
|
||||
} bind;
|
||||
};
|
||||
|
||||
/** @sched_props: scheduling properties */
|
||||
struct {
|
||||
/** @timeslice_us: timeslice period in micro-seconds */
|
||||
u32 timeslice_us;
|
||||
/** @preempt_timeout_us: preemption timeout in micro-seconds */
|
||||
u32 preempt_timeout_us;
|
||||
} sched_props;
|
||||
|
||||
/** @compute: compute engine state */
|
||||
struct {
|
||||
/** @pfence: preemption fence */
|
||||
struct dma_fence *pfence;
|
||||
/** @context: preemption fence context */
|
||||
u64 context;
|
||||
/** @seqno: preemption fence seqno */
|
||||
u32 seqno;
|
||||
/** @link: link into VM's list of engines */
|
||||
struct list_head link;
|
||||
/** @lock: preemption fences lock */
|
||||
spinlock_t lock;
|
||||
} compute;
|
||||
|
||||
/** @usm: unified shared memory state */
|
||||
struct {
|
||||
/** @acc_trigger: access counter trigger */
|
||||
u32 acc_trigger;
|
||||
/** @acc_notify: access counter notify */
|
||||
u32 acc_notify;
|
||||
/** @acc_granularity: access counter granularity */
|
||||
u32 acc_granularity;
|
||||
} usm;
|
||||
|
||||
/** @ops: submission backend engine operations */
|
||||
const struct xe_engine_ops *ops;
|
||||
|
||||
/** @ring_ops: ring operations for this engine */
|
||||
const struct xe_ring_ops *ring_ops;
|
||||
/** @entity: DRM sched entity for this engine (1 to 1 relationship) */
|
||||
struct drm_sched_entity *entity;
|
||||
/** @lrc: logical ring context for this engine */
|
||||
struct xe_lrc lrc[];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_engine_ops - Submission backend engine operations
|
||||
*/
|
||||
struct xe_engine_ops {
|
||||
/** @init: Initialize engine for submission backend */
|
||||
int (*init)(struct xe_engine *e);
|
||||
/** @kill: Kill inflight submissions for backend */
|
||||
void (*kill)(struct xe_engine *e);
|
||||
/** @fini: Fini engine for submission backend */
|
||||
void (*fini)(struct xe_engine *e);
|
||||
/** @set_priority: Set priority for engine */
|
||||
int (*set_priority)(struct xe_engine *e,
|
||||
enum xe_engine_priority priority);
|
||||
/** @set_timeslice: Set timeslice for engine */
|
||||
int (*set_timeslice)(struct xe_engine *e, u32 timeslice_us);
|
||||
/** @set_preempt_timeout: Set preemption timeout for engine */
|
||||
int (*set_preempt_timeout)(struct xe_engine *e, u32 preempt_timeout_us);
|
||||
/** @set_job_timeout: Set job timeout for engine */
|
||||
int (*set_job_timeout)(struct xe_engine *e, u32 job_timeout_ms);
|
||||
/**
|
||||
* @suspend: Suspend engine from executing, allowed to be called
|
||||
* multiple times in a row before resume with the caveat that
|
||||
* suspend_wait returns before calling suspend again.
|
||||
*/
|
||||
int (*suspend)(struct xe_engine *e);
|
||||
/**
|
||||
* @suspend_wait: Wait for an engine to suspend executing, should be
|
||||
* call after suspend.
|
||||
*/
|
||||
void (*suspend_wait)(struct xe_engine *e);
|
||||
/**
|
||||
* @resume: Resume engine execution, engine must be in a suspended
|
||||
* state and dma fence returned from most recent suspend call must be
|
||||
* signalled when this function is called.
|
||||
*/
|
||||
void (*resume)(struct xe_engine *e);
|
||||
};
|
||||
|
||||
#endif
|
@ -95,19 +95,19 @@
|
||||
|
||||
#define XE_EXEC_BIND_RETRY_TIMEOUT_MS 1000
|
||||
|
||||
static int xe_exec_begin(struct xe_engine *e, struct ww_acquire_ctx *ww,
|
||||
static int xe_exec_begin(struct xe_exec_queue *q, struct ww_acquire_ctx *ww,
|
||||
struct ttm_validate_buffer tv_onstack[],
|
||||
struct ttm_validate_buffer **tv,
|
||||
struct list_head *objs)
|
||||
{
|
||||
struct xe_vm *vm = e->vm;
|
||||
struct xe_vm *vm = q->vm;
|
||||
struct xe_vma *vma;
|
||||
LIST_HEAD(dups);
|
||||
ktime_t end = 0;
|
||||
int err = 0;
|
||||
|
||||
*tv = NULL;
|
||||
if (xe_vm_no_dma_fences(e->vm))
|
||||
if (xe_vm_no_dma_fences(q->vm))
|
||||
return 0;
|
||||
|
||||
retry:
|
||||
@ -153,14 +153,14 @@ static int xe_exec_begin(struct xe_engine *e, struct ww_acquire_ctx *ww,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void xe_exec_end(struct xe_engine *e,
|
||||
static void xe_exec_end(struct xe_exec_queue *q,
|
||||
struct ttm_validate_buffer *tv_onstack,
|
||||
struct ttm_validate_buffer *tv,
|
||||
struct ww_acquire_ctx *ww,
|
||||
struct list_head *objs)
|
||||
{
|
||||
if (!xe_vm_no_dma_fences(e->vm))
|
||||
xe_vm_unlock_dma_resv(e->vm, tv_onstack, tv, ww, objs);
|
||||
if (!xe_vm_no_dma_fences(q->vm))
|
||||
xe_vm_unlock_dma_resv(q->vm, tv_onstack, tv, ww, objs);
|
||||
}
|
||||
|
||||
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
@ -170,7 +170,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
struct drm_xe_exec *args = data;
|
||||
struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
|
||||
u64 __user *addresses_user = u64_to_user_ptr(args->address);
|
||||
struct xe_engine *engine;
|
||||
struct xe_exec_queue *q;
|
||||
struct xe_sync_entry *syncs = NULL;
|
||||
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
|
||||
struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
|
||||
@ -189,30 +189,30 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
|
||||
return -EINVAL;
|
||||
|
||||
engine = xe_engine_lookup(xef, args->engine_id);
|
||||
if (XE_IOCTL_DBG(xe, !engine))
|
||||
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
|
||||
if (XE_IOCTL_DBG(xe, !q))
|
||||
return -ENOENT;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_VM))
|
||||
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, engine->width != args->num_batch_buffer))
|
||||
if (XE_IOCTL_DBG(xe, q->width != args->num_batch_buffer))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_BANNED)) {
|
||||
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) {
|
||||
err = -ECANCELED;
|
||||
goto err_engine;
|
||||
goto err_exec_queue;
|
||||
}
|
||||
|
||||
if (args->num_syncs) {
|
||||
syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
|
||||
if (!syncs) {
|
||||
err = -ENOMEM;
|
||||
goto err_engine;
|
||||
goto err_exec_queue;
|
||||
}
|
||||
}
|
||||
|
||||
vm = engine->vm;
|
||||
vm = q->vm;
|
||||
|
||||
for (i = 0; i < args->num_syncs; i++) {
|
||||
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
|
||||
@ -222,9 +222,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
goto err_syncs;
|
||||
}
|
||||
|
||||
if (xe_engine_is_parallel(engine)) {
|
||||
if (xe_exec_queue_is_parallel(q)) {
|
||||
err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
|
||||
engine->width);
|
||||
q->width);
|
||||
if (err) {
|
||||
err = -EFAULT;
|
||||
goto err_syncs;
|
||||
@ -294,26 +294,26 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
goto err_unlock_list;
|
||||
}
|
||||
|
||||
err = xe_exec_begin(engine, &ww, tv_onstack, &tv, &objs);
|
||||
err = xe_exec_begin(q, &ww, tv_onstack, &tv, &objs);
|
||||
if (err)
|
||||
goto err_unlock_list;
|
||||
|
||||
if (xe_vm_is_closed_or_banned(engine->vm)) {
|
||||
if (xe_vm_is_closed_or_banned(q->vm)) {
|
||||
drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
|
||||
err = -ECANCELED;
|
||||
goto err_engine_end;
|
||||
goto err_exec_queue_end;
|
||||
}
|
||||
|
||||
if (xe_engine_is_lr(engine) && xe_engine_ring_full(engine)) {
|
||||
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
|
||||
err = -EWOULDBLOCK;
|
||||
goto err_engine_end;
|
||||
goto err_exec_queue_end;
|
||||
}
|
||||
|
||||
job = xe_sched_job_create(engine, xe_engine_is_parallel(engine) ?
|
||||
job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
|
||||
addresses : &args->address);
|
||||
if (IS_ERR(job)) {
|
||||
err = PTR_ERR(job);
|
||||
goto err_engine_end;
|
||||
goto err_exec_queue_end;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -395,8 +395,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
xe_sync_entry_signal(&syncs[i], job,
|
||||
&job->drm.s_fence->finished);
|
||||
|
||||
if (xe_engine_is_lr(engine))
|
||||
engine->ring_ops->emit_job(job);
|
||||
if (xe_exec_queue_is_lr(q))
|
||||
q->ring_ops->emit_job(job);
|
||||
xe_sched_job_push(job);
|
||||
xe_vm_reactivate_rebind(vm);
|
||||
|
||||
@ -412,8 +412,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
err_put_job:
|
||||
if (err)
|
||||
xe_sched_job_put(job);
|
||||
err_engine_end:
|
||||
xe_exec_end(engine, tv_onstack, tv, &ww, &objs);
|
||||
err_exec_queue_end:
|
||||
xe_exec_end(q, tv_onstack, tv, &ww, &objs);
|
||||
err_unlock_list:
|
||||
if (write_locked)
|
||||
up_write(&vm->lock);
|
||||
@ -425,8 +425,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
for (i = 0; i < num_syncs; i++)
|
||||
xe_sync_entry_cleanup(&syncs[i]);
|
||||
kfree(syncs);
|
||||
err_engine:
|
||||
xe_engine_put(engine);
|
||||
err_exec_queue:
|
||||
xe_exec_queue_put(q);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -22,57 +22,57 @@
|
||||
#include "xe_trace.h"
|
||||
#include "xe_vm.h"
|
||||
|
||||
static struct xe_engine *__xe_engine_create(struct xe_device *xe,
|
||||
struct xe_vm *vm,
|
||||
u32 logical_mask,
|
||||
u16 width, struct xe_hw_engine *hwe,
|
||||
u32 flags)
|
||||
static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
|
||||
struct xe_vm *vm,
|
||||
u32 logical_mask,
|
||||
u16 width, struct xe_hw_engine *hwe,
|
||||
u32 flags)
|
||||
{
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
struct xe_gt *gt = hwe->gt;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
e = kzalloc(sizeof(*e) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
|
||||
if (!e)
|
||||
q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
|
||||
if (!q)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
kref_init(&e->refcount);
|
||||
e->flags = flags;
|
||||
e->hwe = hwe;
|
||||
e->gt = gt;
|
||||
kref_init(&q->refcount);
|
||||
q->flags = flags;
|
||||
q->hwe = hwe;
|
||||
q->gt = gt;
|
||||
if (vm)
|
||||
e->vm = xe_vm_get(vm);
|
||||
e->class = hwe->class;
|
||||
e->width = width;
|
||||
e->logical_mask = logical_mask;
|
||||
e->fence_irq = >->fence_irq[hwe->class];
|
||||
e->ring_ops = gt->ring_ops[hwe->class];
|
||||
e->ops = gt->engine_ops;
|
||||
INIT_LIST_HEAD(&e->persistent.link);
|
||||
INIT_LIST_HEAD(&e->compute.link);
|
||||
INIT_LIST_HEAD(&e->multi_gt_link);
|
||||
q->vm = xe_vm_get(vm);
|
||||
q->class = hwe->class;
|
||||
q->width = width;
|
||||
q->logical_mask = logical_mask;
|
||||
q->fence_irq = >->fence_irq[hwe->class];
|
||||
q->ring_ops = gt->ring_ops[hwe->class];
|
||||
q->ops = gt->exec_queue_ops;
|
||||
INIT_LIST_HEAD(&q->persistent.link);
|
||||
INIT_LIST_HEAD(&q->compute.link);
|
||||
INIT_LIST_HEAD(&q->multi_gt_link);
|
||||
|
||||
/* FIXME: Wire up to configurable default value */
|
||||
e->sched_props.timeslice_us = 1 * 1000;
|
||||
e->sched_props.preempt_timeout_us = 640 * 1000;
|
||||
q->sched_props.timeslice_us = 1 * 1000;
|
||||
q->sched_props.preempt_timeout_us = 640 * 1000;
|
||||
|
||||
if (xe_engine_is_parallel(e)) {
|
||||
e->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
|
||||
e->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
|
||||
if (xe_exec_queue_is_parallel(q)) {
|
||||
q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
|
||||
q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
|
||||
}
|
||||
if (e->flags & ENGINE_FLAG_VM) {
|
||||
e->bind.fence_ctx = dma_fence_context_alloc(1);
|
||||
e->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
|
||||
if (q->flags & EXEC_QUEUE_FLAG_VM) {
|
||||
q->bind.fence_ctx = dma_fence_context_alloc(1);
|
||||
q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
|
||||
}
|
||||
|
||||
for (i = 0; i < width; ++i) {
|
||||
err = xe_lrc_init(e->lrc + i, hwe, e, vm, SZ_16K);
|
||||
err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K);
|
||||
if (err)
|
||||
goto err_lrc;
|
||||
}
|
||||
|
||||
err = e->ops->init(e);
|
||||
err = q->ops->init(q);
|
||||
if (err)
|
||||
goto err_lrc;
|
||||
|
||||
@ -84,24 +84,24 @@ static struct xe_engine *__xe_engine_create(struct xe_device *xe,
|
||||
* can perform GuC CT actions when needed. Caller is expected to
|
||||
* have already grabbed the rpm ref outside any sensitive locks.
|
||||
*/
|
||||
if (e->flags & ENGINE_FLAG_VM)
|
||||
if (q->flags & EXEC_QUEUE_FLAG_VM)
|
||||
drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
|
||||
|
||||
return e;
|
||||
return q;
|
||||
|
||||
err_lrc:
|
||||
for (i = i - 1; i >= 0; --i)
|
||||
xe_lrc_finish(e->lrc + i);
|
||||
kfree(e);
|
||||
xe_lrc_finish(q->lrc + i);
|
||||
kfree(q);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
struct xe_engine *xe_engine_create(struct xe_device *xe, struct xe_vm *vm,
|
||||
u32 logical_mask, u16 width,
|
||||
struct xe_hw_engine *hwe, u32 flags)
|
||||
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
|
||||
u32 logical_mask, u16 width,
|
||||
struct xe_hw_engine *hwe, u32 flags)
|
||||
{
|
||||
struct ww_acquire_ctx ww;
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
int err;
|
||||
|
||||
if (vm) {
|
||||
@ -109,16 +109,16 @@ struct xe_engine *xe_engine_create(struct xe_device *xe, struct xe_vm *vm,
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
e = __xe_engine_create(xe, vm, logical_mask, width, hwe, flags);
|
||||
q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
|
||||
if (vm)
|
||||
xe_vm_unlock(vm, &ww);
|
||||
|
||||
return e;
|
||||
return q;
|
||||
}
|
||||
|
||||
struct xe_engine *xe_engine_create_class(struct xe_device *xe, struct xe_gt *gt,
|
||||
struct xe_vm *vm,
|
||||
enum xe_engine_class class, u32 flags)
|
||||
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
|
||||
struct xe_vm *vm,
|
||||
enum xe_engine_class class, u32 flags)
|
||||
{
|
||||
struct xe_hw_engine *hwe, *hwe0 = NULL;
|
||||
enum xe_hw_engine_id id;
|
||||
@ -138,102 +138,102 @@ struct xe_engine *xe_engine_create_class(struct xe_device *xe, struct xe_gt *gt,
|
||||
if (!logical_mask)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
return xe_engine_create(xe, vm, logical_mask, 1, hwe0, flags);
|
||||
return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags);
|
||||
}
|
||||
|
||||
void xe_engine_destroy(struct kref *ref)
|
||||
void xe_exec_queue_destroy(struct kref *ref)
|
||||
{
|
||||
struct xe_engine *e = container_of(ref, struct xe_engine, refcount);
|
||||
struct xe_engine *engine, *next;
|
||||
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
|
||||
struct xe_exec_queue *eq, *next;
|
||||
|
||||
if (!(e->flags & ENGINE_FLAG_BIND_ENGINE_CHILD)) {
|
||||
list_for_each_entry_safe(engine, next, &e->multi_gt_list,
|
||||
if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
|
||||
list_for_each_entry_safe(eq, next, &q->multi_gt_list,
|
||||
multi_gt_link)
|
||||
xe_engine_put(engine);
|
||||
xe_exec_queue_put(eq);
|
||||
}
|
||||
|
||||
e->ops->fini(e);
|
||||
q->ops->fini(q);
|
||||
}
|
||||
|
||||
void xe_engine_fini(struct xe_engine *e)
|
||||
void xe_exec_queue_fini(struct xe_exec_queue *q)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < e->width; ++i)
|
||||
xe_lrc_finish(e->lrc + i);
|
||||
if (e->vm)
|
||||
xe_vm_put(e->vm);
|
||||
if (e->flags & ENGINE_FLAG_VM)
|
||||
xe_device_mem_access_put(gt_to_xe(e->gt));
|
||||
for (i = 0; i < q->width; ++i)
|
||||
xe_lrc_finish(q->lrc + i);
|
||||
if (q->vm)
|
||||
xe_vm_put(q->vm);
|
||||
if (q->flags & EXEC_QUEUE_FLAG_VM)
|
||||
xe_device_mem_access_put(gt_to_xe(q->gt));
|
||||
|
||||
kfree(e);
|
||||
kfree(q);
|
||||
}
|
||||
|
||||
struct xe_engine *xe_engine_lookup(struct xe_file *xef, u32 id)
|
||||
struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
|
||||
{
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
mutex_lock(&xef->engine.lock);
|
||||
e = xa_load(&xef->engine.xa, id);
|
||||
if (e)
|
||||
xe_engine_get(e);
|
||||
mutex_unlock(&xef->engine.lock);
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
q = xa_load(&xef->exec_queue.xa, id);
|
||||
if (q)
|
||||
xe_exec_queue_get(q);
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
|
||||
return e;
|
||||
return q;
|
||||
}
|
||||
|
||||
enum xe_engine_priority
|
||||
xe_engine_device_get_max_priority(struct xe_device *xe)
|
||||
enum xe_exec_queue_priority
|
||||
xe_exec_queue_device_get_max_priority(struct xe_device *xe)
|
||||
{
|
||||
return capable(CAP_SYS_NICE) ? XE_ENGINE_PRIORITY_HIGH :
|
||||
XE_ENGINE_PRIORITY_NORMAL;
|
||||
return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
|
||||
XE_EXEC_QUEUE_PRIORITY_NORMAL;
|
||||
}
|
||||
|
||||
static int engine_set_priority(struct xe_device *xe, struct xe_engine *e,
|
||||
u64 value, bool create)
|
||||
static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
|
||||
u64 value, bool create)
|
||||
{
|
||||
if (XE_IOCTL_DBG(xe, value > XE_ENGINE_PRIORITY_HIGH))
|
||||
if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, value > xe_engine_device_get_max_priority(xe)))
|
||||
if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
|
||||
return -EPERM;
|
||||
|
||||
return e->ops->set_priority(e, value);
|
||||
return q->ops->set_priority(q, value);
|
||||
}
|
||||
|
||||
static int engine_set_timeslice(struct xe_device *xe, struct xe_engine *e,
|
||||
u64 value, bool create)
|
||||
static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
|
||||
u64 value, bool create)
|
||||
{
|
||||
if (!capable(CAP_SYS_NICE))
|
||||
return -EPERM;
|
||||
|
||||
return e->ops->set_timeslice(e, value);
|
||||
return q->ops->set_timeslice(q, value);
|
||||
}
|
||||
|
||||
static int engine_set_preemption_timeout(struct xe_device *xe,
|
||||
struct xe_engine *e, u64 value,
|
||||
bool create)
|
||||
static int exec_queue_set_preemption_timeout(struct xe_device *xe,
|
||||
struct xe_exec_queue *q, u64 value,
|
||||
bool create)
|
||||
{
|
||||
if (!capable(CAP_SYS_NICE))
|
||||
return -EPERM;
|
||||
|
||||
return e->ops->set_preempt_timeout(e, value);
|
||||
return q->ops->set_preempt_timeout(q, value);
|
||||
}
|
||||
|
||||
static int engine_set_compute_mode(struct xe_device *xe, struct xe_engine *e,
|
||||
u64 value, bool create)
|
||||
static int exec_queue_set_compute_mode(struct xe_device *xe, struct xe_exec_queue *q,
|
||||
u64 value, bool create)
|
||||
{
|
||||
if (XE_IOCTL_DBG(xe, !create))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, e->flags & ENGINE_FLAG_COMPUTE_MODE))
|
||||
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, e->flags & ENGINE_FLAG_VM))
|
||||
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
|
||||
return -EINVAL;
|
||||
|
||||
if (value) {
|
||||
struct xe_vm *vm = e->vm;
|
||||
struct xe_vm *vm = q->vm;
|
||||
int err;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, xe_vm_in_fault_mode(vm)))
|
||||
@ -242,42 +242,42 @@ static int engine_set_compute_mode(struct xe_device *xe, struct xe_engine *e,
|
||||
if (XE_IOCTL_DBG(xe, !xe_vm_in_compute_mode(vm)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, e->width != 1))
|
||||
if (XE_IOCTL_DBG(xe, q->width != 1))
|
||||
return -EINVAL;
|
||||
|
||||
e->compute.context = dma_fence_context_alloc(1);
|
||||
spin_lock_init(&e->compute.lock);
|
||||
q->compute.context = dma_fence_context_alloc(1);
|
||||
spin_lock_init(&q->compute.lock);
|
||||
|
||||
err = xe_vm_add_compute_engine(vm, e);
|
||||
err = xe_vm_add_compute_exec_queue(vm, q);
|
||||
if (XE_IOCTL_DBG(xe, err))
|
||||
return err;
|
||||
|
||||
e->flags |= ENGINE_FLAG_COMPUTE_MODE;
|
||||
e->flags &= ~ENGINE_FLAG_PERSISTENT;
|
||||
q->flags |= EXEC_QUEUE_FLAG_COMPUTE_MODE;
|
||||
q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int engine_set_persistence(struct xe_device *xe, struct xe_engine *e,
|
||||
u64 value, bool create)
|
||||
static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
|
||||
u64 value, bool create)
|
||||
{
|
||||
if (XE_IOCTL_DBG(xe, !create))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, e->flags & ENGINE_FLAG_COMPUTE_MODE))
|
||||
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))
|
||||
return -EINVAL;
|
||||
|
||||
if (value)
|
||||
e->flags |= ENGINE_FLAG_PERSISTENT;
|
||||
q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
|
||||
else
|
||||
e->flags &= ~ENGINE_FLAG_PERSISTENT;
|
||||
q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int engine_set_job_timeout(struct xe_device *xe, struct xe_engine *e,
|
||||
u64 value, bool create)
|
||||
static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
|
||||
u64 value, bool create)
|
||||
{
|
||||
if (XE_IOCTL_DBG(xe, !create))
|
||||
return -EINVAL;
|
||||
@ -285,38 +285,10 @@ static int engine_set_job_timeout(struct xe_device *xe, struct xe_engine *e,
|
||||
if (!capable(CAP_SYS_NICE))
|
||||
return -EPERM;
|
||||
|
||||
return e->ops->set_job_timeout(e, value);
|
||||
return q->ops->set_job_timeout(q, value);
|
||||
}
|
||||
|
||||
static int engine_set_acc_trigger(struct xe_device *xe, struct xe_engine *e,
|
||||
u64 value, bool create)
|
||||
{
|
||||
if (XE_IOCTL_DBG(xe, !create))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
|
||||
return -EINVAL;
|
||||
|
||||
e->usm.acc_trigger = value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int engine_set_acc_notify(struct xe_device *xe, struct xe_engine *e,
|
||||
u64 value, bool create)
|
||||
{
|
||||
if (XE_IOCTL_DBG(xe, !create))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
|
||||
return -EINVAL;
|
||||
|
||||
e->usm.acc_notify = value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int engine_set_acc_granularity(struct xe_device *xe, struct xe_engine *e,
|
||||
static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
|
||||
u64 value, bool create)
|
||||
{
|
||||
if (XE_IOCTL_DBG(xe, !create))
|
||||
@ -325,34 +297,62 @@ static int engine_set_acc_granularity(struct xe_device *xe, struct xe_engine *e,
|
||||
if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
|
||||
return -EINVAL;
|
||||
|
||||
e->usm.acc_granularity = value;
|
||||
q->usm.acc_trigger = value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef int (*xe_engine_set_property_fn)(struct xe_device *xe,
|
||||
struct xe_engine *e,
|
||||
u64 value, bool create);
|
||||
static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
|
||||
u64 value, bool create)
|
||||
{
|
||||
if (XE_IOCTL_DBG(xe, !create))
|
||||
return -EINVAL;
|
||||
|
||||
static const xe_engine_set_property_fn engine_set_property_funcs[] = {
|
||||
[XE_ENGINE_SET_PROPERTY_PRIORITY] = engine_set_priority,
|
||||
[XE_ENGINE_SET_PROPERTY_TIMESLICE] = engine_set_timeslice,
|
||||
[XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT] = engine_set_preemption_timeout,
|
||||
[XE_ENGINE_SET_PROPERTY_COMPUTE_MODE] = engine_set_compute_mode,
|
||||
[XE_ENGINE_SET_PROPERTY_PERSISTENCE] = engine_set_persistence,
|
||||
[XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT] = engine_set_job_timeout,
|
||||
[XE_ENGINE_SET_PROPERTY_ACC_TRIGGER] = engine_set_acc_trigger,
|
||||
[XE_ENGINE_SET_PROPERTY_ACC_NOTIFY] = engine_set_acc_notify,
|
||||
[XE_ENGINE_SET_PROPERTY_ACC_GRANULARITY] = engine_set_acc_granularity,
|
||||
if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
|
||||
return -EINVAL;
|
||||
|
||||
q->usm.acc_notify = value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
|
||||
u64 value, bool create)
|
||||
{
|
||||
if (XE_IOCTL_DBG(xe, !create))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
|
||||
return -EINVAL;
|
||||
|
||||
q->usm.acc_granularity = value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
|
||||
struct xe_exec_queue *q,
|
||||
u64 value, bool create);
|
||||
|
||||
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE] = exec_queue_set_compute_mode,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
|
||||
};
|
||||
|
||||
static int engine_user_ext_set_property(struct xe_device *xe,
|
||||
struct xe_engine *e,
|
||||
u64 extension,
|
||||
bool create)
|
||||
static int exec_queue_user_ext_set_property(struct xe_device *xe,
|
||||
struct xe_exec_queue *q,
|
||||
u64 extension,
|
||||
bool create)
|
||||
{
|
||||
u64 __user *address = u64_to_user_ptr(extension);
|
||||
struct drm_xe_ext_engine_set_property ext;
|
||||
struct drm_xe_ext_exec_queue_set_property ext;
|
||||
int err;
|
||||
u32 idx;
|
||||
|
||||
@ -361,26 +361,26 @@ static int engine_user_ext_set_property(struct xe_device *xe,
|
||||
return -EFAULT;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, ext.property >=
|
||||
ARRAY_SIZE(engine_set_property_funcs)) ||
|
||||
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
|
||||
XE_IOCTL_DBG(xe, ext.pad))
|
||||
return -EINVAL;
|
||||
|
||||
idx = array_index_nospec(ext.property, ARRAY_SIZE(engine_set_property_funcs));
|
||||
return engine_set_property_funcs[idx](xe, e, ext.value, create);
|
||||
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
|
||||
return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
|
||||
}
|
||||
|
||||
typedef int (*xe_engine_user_extension_fn)(struct xe_device *xe,
|
||||
struct xe_engine *e,
|
||||
u64 extension,
|
||||
bool create);
|
||||
typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
|
||||
struct xe_exec_queue *q,
|
||||
u64 extension,
|
||||
bool create);
|
||||
|
||||
static const xe_engine_set_property_fn engine_user_extension_funcs[] = {
|
||||
[XE_ENGINE_EXTENSION_SET_PROPERTY] = engine_user_ext_set_property,
|
||||
static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
|
||||
[XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
|
||||
};
|
||||
|
||||
#define MAX_USER_EXTENSIONS 16
|
||||
static int engine_user_extensions(struct xe_device *xe, struct xe_engine *e,
|
||||
u64 extensions, int ext_number, bool create)
|
||||
static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
|
||||
u64 extensions, int ext_number, bool create)
|
||||
{
|
||||
u64 __user *address = u64_to_user_ptr(extensions);
|
||||
struct xe_user_extension ext;
|
||||
@ -396,17 +396,17 @@ static int engine_user_extensions(struct xe_device *xe, struct xe_engine *e,
|
||||
|
||||
if (XE_IOCTL_DBG(xe, ext.pad) ||
|
||||
XE_IOCTL_DBG(xe, ext.name >=
|
||||
ARRAY_SIZE(engine_user_extension_funcs)))
|
||||
ARRAY_SIZE(exec_queue_user_extension_funcs)))
|
||||
return -EINVAL;
|
||||
|
||||
idx = array_index_nospec(ext.name,
|
||||
ARRAY_SIZE(engine_user_extension_funcs));
|
||||
err = engine_user_extension_funcs[idx](xe, e, extensions, create);
|
||||
ARRAY_SIZE(exec_queue_user_extension_funcs));
|
||||
err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create);
|
||||
if (XE_IOCTL_DBG(xe, err))
|
||||
return err;
|
||||
|
||||
if (ext.next_extension)
|
||||
return engine_user_extensions(xe, e, ext.next_extension,
|
||||
return exec_queue_user_extensions(xe, q, ext.next_extension,
|
||||
++ext_number, create);
|
||||
|
||||
return 0;
|
||||
@ -440,9 +440,9 @@ find_hw_engine(struct xe_device *xe,
|
||||
eci.engine_instance, true);
|
||||
}
|
||||
|
||||
static u32 bind_engine_logical_mask(struct xe_device *xe, struct xe_gt *gt,
|
||||
struct drm_xe_engine_class_instance *eci,
|
||||
u16 width, u16 num_placements)
|
||||
static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
|
||||
struct drm_xe_engine_class_instance *eci,
|
||||
u16 width, u16 num_placements)
|
||||
{
|
||||
struct xe_hw_engine *hwe;
|
||||
enum xe_hw_engine_id id;
|
||||
@ -520,19 +520,19 @@ static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
|
||||
return return_mask;
|
||||
}
|
||||
|
||||
int xe_engine_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct xe_device *xe = to_xe_device(dev);
|
||||
struct xe_file *xef = to_xe_file(file);
|
||||
struct drm_xe_engine_create *args = data;
|
||||
struct drm_xe_exec_queue_create *args = data;
|
||||
struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
|
||||
struct drm_xe_engine_class_instance __user *user_eci =
|
||||
u64_to_user_ptr(args->instances);
|
||||
struct xe_hw_engine *hwe;
|
||||
struct xe_vm *vm, *migrate_vm;
|
||||
struct xe_gt *gt;
|
||||
struct xe_engine *e = NULL;
|
||||
struct xe_exec_queue *q = NULL;
|
||||
u32 logical_mask;
|
||||
u32 id;
|
||||
u32 len;
|
||||
@ -557,15 +557,15 @@ int xe_engine_create_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
|
||||
for_each_gt(gt, xe, id) {
|
||||
struct xe_engine *new;
|
||||
struct xe_exec_queue *new;
|
||||
|
||||
if (xe_gt_is_media_type(gt))
|
||||
continue;
|
||||
|
||||
eci[0].gt_id = gt->info.id;
|
||||
logical_mask = bind_engine_logical_mask(xe, gt, eci,
|
||||
args->width,
|
||||
args->num_placements);
|
||||
logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
|
||||
args->width,
|
||||
args->num_placements);
|
||||
if (XE_IOCTL_DBG(xe, !logical_mask))
|
||||
return -EINVAL;
|
||||
|
||||
@ -577,28 +577,28 @@ int xe_engine_create_ioctl(struct drm_device *dev, void *data,
|
||||
xe_device_mem_access_get(xe);
|
||||
|
||||
migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
|
||||
new = xe_engine_create(xe, migrate_vm, logical_mask,
|
||||
args->width, hwe,
|
||||
ENGINE_FLAG_PERSISTENT |
|
||||
ENGINE_FLAG_VM |
|
||||
(id ?
|
||||
ENGINE_FLAG_BIND_ENGINE_CHILD :
|
||||
0));
|
||||
new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
|
||||
args->width, hwe,
|
||||
EXEC_QUEUE_FLAG_PERSISTENT |
|
||||
EXEC_QUEUE_FLAG_VM |
|
||||
(id ?
|
||||
EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
|
||||
0));
|
||||
|
||||
xe_device_mem_access_put(xe); /* now held by engine */
|
||||
|
||||
xe_vm_put(migrate_vm);
|
||||
if (IS_ERR(new)) {
|
||||
err = PTR_ERR(new);
|
||||
if (e)
|
||||
goto put_engine;
|
||||
if (q)
|
||||
goto put_exec_queue;
|
||||
return err;
|
||||
}
|
||||
if (id == 0)
|
||||
e = new;
|
||||
q = new;
|
||||
else
|
||||
list_add_tail(&new->multi_gt_list,
|
||||
&e->multi_gt_link);
|
||||
&q->multi_gt_link);
|
||||
}
|
||||
} else {
|
||||
gt = xe_device_get_gt(xe, eci[0].gt_id);
|
||||
@ -628,223 +628,223 @@ int xe_engine_create_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
e = xe_engine_create(xe, vm, logical_mask,
|
||||
args->width, hwe,
|
||||
xe_vm_no_dma_fences(vm) ? 0 :
|
||||
ENGINE_FLAG_PERSISTENT);
|
||||
q = xe_exec_queue_create(xe, vm, logical_mask,
|
||||
args->width, hwe,
|
||||
xe_vm_no_dma_fences(vm) ? 0 :
|
||||
EXEC_QUEUE_FLAG_PERSISTENT);
|
||||
up_read(&vm->lock);
|
||||
xe_vm_put(vm);
|
||||
if (IS_ERR(e))
|
||||
return PTR_ERR(e);
|
||||
if (IS_ERR(q))
|
||||
return PTR_ERR(q);
|
||||
}
|
||||
|
||||
if (args->extensions) {
|
||||
err = engine_user_extensions(xe, e, args->extensions, 0, true);
|
||||
err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
|
||||
if (XE_IOCTL_DBG(xe, err))
|
||||
goto put_engine;
|
||||
goto put_exec_queue;
|
||||
}
|
||||
|
||||
if (XE_IOCTL_DBG(xe, e->vm && xe_vm_in_compute_mode(e->vm) !=
|
||||
!!(e->flags & ENGINE_FLAG_COMPUTE_MODE))) {
|
||||
if (XE_IOCTL_DBG(xe, q->vm && xe_vm_in_compute_mode(q->vm) !=
|
||||
!!(q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto put_engine;
|
||||
goto put_exec_queue;
|
||||
}
|
||||
|
||||
e->persistent.xef = xef;
|
||||
q->persistent.xef = xef;
|
||||
|
||||
mutex_lock(&xef->engine.lock);
|
||||
err = xa_alloc(&xef->engine.xa, &id, e, xa_limit_32b, GFP_KERNEL);
|
||||
mutex_unlock(&xef->engine.lock);
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
if (err)
|
||||
goto put_engine;
|
||||
goto put_exec_queue;
|
||||
|
||||
args->engine_id = id;
|
||||
args->exec_queue_id = id;
|
||||
|
||||
return 0;
|
||||
|
||||
put_engine:
|
||||
xe_engine_kill(e);
|
||||
xe_engine_put(e);
|
||||
put_exec_queue:
|
||||
xe_exec_queue_kill(q);
|
||||
xe_exec_queue_put(q);
|
||||
return err;
|
||||
}
|
||||
|
||||
int xe_engine_get_property_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct xe_device *xe = to_xe_device(dev);
|
||||
struct xe_file *xef = to_xe_file(file);
|
||||
struct drm_xe_engine_get_property *args = data;
|
||||
struct xe_engine *e;
|
||||
struct drm_xe_exec_queue_get_property *args = data;
|
||||
struct xe_exec_queue *q;
|
||||
int ret;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
|
||||
return -EINVAL;
|
||||
|
||||
e = xe_engine_lookup(xef, args->engine_id);
|
||||
if (XE_IOCTL_DBG(xe, !e))
|
||||
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
|
||||
if (XE_IOCTL_DBG(xe, !q))
|
||||
return -ENOENT;
|
||||
|
||||
switch (args->property) {
|
||||
case XE_ENGINE_GET_PROPERTY_BAN:
|
||||
args->value = !!(e->flags & ENGINE_FLAG_BANNED);
|
||||
case XE_EXEC_QUEUE_GET_PROPERTY_BAN:
|
||||
args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
xe_engine_put(e);
|
||||
xe_exec_queue_put(q);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void engine_kill_compute(struct xe_engine *e)
|
||||
static void exec_queue_kill_compute(struct xe_exec_queue *q)
|
||||
{
|
||||
if (!xe_vm_in_compute_mode(e->vm))
|
||||
if (!xe_vm_in_compute_mode(q->vm))
|
||||
return;
|
||||
|
||||
down_write(&e->vm->lock);
|
||||
list_del(&e->compute.link);
|
||||
--e->vm->preempt.num_engines;
|
||||
if (e->compute.pfence) {
|
||||
dma_fence_enable_sw_signaling(e->compute.pfence);
|
||||
dma_fence_put(e->compute.pfence);
|
||||
e->compute.pfence = NULL;
|
||||
down_write(&q->vm->lock);
|
||||
list_del(&q->compute.link);
|
||||
--q->vm->preempt.num_exec_queues;
|
||||
if (q->compute.pfence) {
|
||||
dma_fence_enable_sw_signaling(q->compute.pfence);
|
||||
dma_fence_put(q->compute.pfence);
|
||||
q->compute.pfence = NULL;
|
||||
}
|
||||
up_write(&e->vm->lock);
|
||||
up_write(&q->vm->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_engine_is_lr() - Whether an engine is long-running
|
||||
* @e: The engine
|
||||
* xe_exec_queue_is_lr() - Whether an exec_queue is long-running
|
||||
* @q: The exec_queue
|
||||
*
|
||||
* Return: True if the engine is long-running, false otherwise.
|
||||
* Return: True if the exec_queue is long-running, false otherwise.
|
||||
*/
|
||||
bool xe_engine_is_lr(struct xe_engine *e)
|
||||
bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
|
||||
{
|
||||
return e->vm && xe_vm_no_dma_fences(e->vm) &&
|
||||
!(e->flags & ENGINE_FLAG_VM);
|
||||
return q->vm && xe_vm_no_dma_fences(q->vm) &&
|
||||
!(q->flags & EXEC_QUEUE_FLAG_VM);
|
||||
}
|
||||
|
||||
static s32 xe_engine_num_job_inflight(struct xe_engine *e)
|
||||
static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
|
||||
{
|
||||
return e->lrc->fence_ctx.next_seqno - xe_lrc_seqno(e->lrc) - 1;
|
||||
return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_engine_ring_full() - Whether an engine's ring is full
|
||||
* @e: The engine
|
||||
* xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
|
||||
* @q: The exec_queue
|
||||
*
|
||||
* Return: True if the engine's ring is full, false otherwise.
|
||||
* Return: True if the exec_queue's ring is full, false otherwise.
|
||||
*/
|
||||
bool xe_engine_ring_full(struct xe_engine *e)
|
||||
bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_lrc *lrc = e->lrc;
|
||||
struct xe_lrc *lrc = q->lrc;
|
||||
s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
|
||||
|
||||
return xe_engine_num_job_inflight(e) >= max_job;
|
||||
return xe_exec_queue_num_job_inflight(q) >= max_job;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_engine_is_idle() - Whether an engine is idle.
|
||||
* @engine: The engine
|
||||
* xe_exec_queue_is_idle() - Whether an exec_queue is idle.
|
||||
* @q: The exec_queue
|
||||
*
|
||||
* FIXME: Need to determine what to use as the short-lived
|
||||
* timeline lock for the engines, so that the return value
|
||||
* timeline lock for the exec_queues, so that the return value
|
||||
* of this function becomes more than just an advisory
|
||||
* snapshot in time. The timeline lock must protect the
|
||||
* seqno from racing submissions on the same engine.
|
||||
* seqno from racing submissions on the same exec_queue.
|
||||
* Typically vm->resv, but user-created timeline locks use the migrate vm
|
||||
* and never grabs the migrate vm->resv so we have a race there.
|
||||
*
|
||||
* Return: True if the engine is idle, false otherwise.
|
||||
* Return: True if the exec_queue is idle, false otherwise.
|
||||
*/
|
||||
bool xe_engine_is_idle(struct xe_engine *engine)
|
||||
bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
|
||||
{
|
||||
if (XE_WARN_ON(xe_engine_is_parallel(engine)))
|
||||
if (XE_WARN_ON(xe_exec_queue_is_parallel(q)))
|
||||
return false;
|
||||
|
||||
return xe_lrc_seqno(&engine->lrc[0]) ==
|
||||
engine->lrc[0].fence_ctx.next_seqno - 1;
|
||||
return xe_lrc_seqno(&q->lrc[0]) ==
|
||||
q->lrc[0].fence_ctx.next_seqno - 1;
|
||||
}
|
||||
|
||||
void xe_engine_kill(struct xe_engine *e)
|
||||
void xe_exec_queue_kill(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_engine *engine = e, *next;
|
||||
struct xe_exec_queue *eq = q, *next;
|
||||
|
||||
list_for_each_entry_safe(engine, next, &engine->multi_gt_list,
|
||||
list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
|
||||
multi_gt_link) {
|
||||
e->ops->kill(engine);
|
||||
engine_kill_compute(engine);
|
||||
q->ops->kill(eq);
|
||||
exec_queue_kill_compute(eq);
|
||||
}
|
||||
|
||||
e->ops->kill(e);
|
||||
engine_kill_compute(e);
|
||||
q->ops->kill(q);
|
||||
exec_queue_kill_compute(q);
|
||||
}
|
||||
|
||||
int xe_engine_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct xe_device *xe = to_xe_device(dev);
|
||||
struct xe_file *xef = to_xe_file(file);
|
||||
struct drm_xe_engine_destroy *args = data;
|
||||
struct xe_engine *e;
|
||||
struct drm_xe_exec_queue_destroy *args = data;
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, args->pad) ||
|
||||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&xef->engine.lock);
|
||||
e = xa_erase(&xef->engine.xa, args->engine_id);
|
||||
mutex_unlock(&xef->engine.lock);
|
||||
if (XE_IOCTL_DBG(xe, !e))
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
if (XE_IOCTL_DBG(xe, !q))
|
||||
return -ENOENT;
|
||||
|
||||
if (!(e->flags & ENGINE_FLAG_PERSISTENT))
|
||||
xe_engine_kill(e);
|
||||
if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT))
|
||||
xe_exec_queue_kill(q);
|
||||
else
|
||||
xe_device_add_persistent_engines(xe, e);
|
||||
xe_device_add_persistent_exec_queues(xe, q);
|
||||
|
||||
trace_xe_engine_close(e);
|
||||
xe_engine_put(e);
|
||||
trace_xe_exec_queue_close(q);
|
||||
xe_exec_queue_put(q);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int xe_engine_set_property_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct xe_device *xe = to_xe_device(dev);
|
||||
struct xe_file *xef = to_xe_file(file);
|
||||
struct drm_xe_engine_set_property *args = data;
|
||||
struct xe_engine *e;
|
||||
struct drm_xe_exec_queue_set_property *args = data;
|
||||
struct xe_exec_queue *q;
|
||||
int ret;
|
||||
u32 idx;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
|
||||
return -EINVAL;
|
||||
|
||||
e = xe_engine_lookup(xef, args->engine_id);
|
||||
if (XE_IOCTL_DBG(xe, !e))
|
||||
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
|
||||
if (XE_IOCTL_DBG(xe, !q))
|
||||
return -ENOENT;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, args->property >=
|
||||
ARRAY_SIZE(engine_set_property_funcs))) {
|
||||
ARRAY_SIZE(exec_queue_set_property_funcs))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
idx = array_index_nospec(args->property,
|
||||
ARRAY_SIZE(engine_set_property_funcs));
|
||||
ret = engine_set_property_funcs[idx](xe, e, args->value, false);
|
||||
ARRAY_SIZE(exec_queue_set_property_funcs));
|
||||
ret = exec_queue_set_property_funcs[idx](xe, q, args->value, false);
|
||||
if (XE_IOCTL_DBG(xe, ret))
|
||||
goto out;
|
||||
|
||||
if (args->extensions)
|
||||
ret = engine_user_extensions(xe, e, args->extensions, 0,
|
||||
false);
|
||||
ret = exec_queue_user_extensions(xe, q, args->extensions, 0,
|
||||
false);
|
||||
out:
|
||||
xe_engine_put(e);
|
||||
xe_exec_queue_put(q);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3,10 +3,10 @@
|
||||
* Copyright © 2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_ENGINE_H_
|
||||
#define _XE_ENGINE_H_
|
||||
#ifndef _XE_EXEC_QUEUE_H_
|
||||
#define _XE_EXEC_QUEUE_H_
|
||||
|
||||
#include "xe_engine_types.h"
|
||||
#include "xe_exec_queue_types.h"
|
||||
#include "xe_vm_types.h"
|
||||
|
||||
struct drm_device;
|
||||
@ -14,50 +14,50 @@ struct drm_file;
|
||||
struct xe_device;
|
||||
struct xe_file;
|
||||
|
||||
struct xe_engine *xe_engine_create(struct xe_device *xe, struct xe_vm *vm,
|
||||
u32 logical_mask, u16 width,
|
||||
struct xe_hw_engine *hw_engine, u32 flags);
|
||||
struct xe_engine *xe_engine_create_class(struct xe_device *xe, struct xe_gt *gt,
|
||||
struct xe_vm *vm,
|
||||
enum xe_engine_class class, u32 flags);
|
||||
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
|
||||
u32 logical_mask, u16 width,
|
||||
struct xe_hw_engine *hw_engine, u32 flags);
|
||||
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
|
||||
struct xe_vm *vm,
|
||||
enum xe_engine_class class, u32 flags);
|
||||
|
||||
void xe_engine_fini(struct xe_engine *e);
|
||||
void xe_engine_destroy(struct kref *ref);
|
||||
void xe_exec_queue_fini(struct xe_exec_queue *q);
|
||||
void xe_exec_queue_destroy(struct kref *ref);
|
||||
|
||||
struct xe_engine *xe_engine_lookup(struct xe_file *xef, u32 id);
|
||||
struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
|
||||
|
||||
static inline struct xe_engine *xe_engine_get(struct xe_engine *engine)
|
||||
static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
|
||||
{
|
||||
kref_get(&engine->refcount);
|
||||
return engine;
|
||||
kref_get(&q->refcount);
|
||||
return q;
|
||||
}
|
||||
|
||||
static inline void xe_engine_put(struct xe_engine *engine)
|
||||
static inline void xe_exec_queue_put(struct xe_exec_queue *q)
|
||||
{
|
||||
kref_put(&engine->refcount, xe_engine_destroy);
|
||||
kref_put(&q->refcount, xe_exec_queue_destroy);
|
||||
}
|
||||
|
||||
static inline bool xe_engine_is_parallel(struct xe_engine *engine)
|
||||
static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
|
||||
{
|
||||
return engine->width > 1;
|
||||
return q->width > 1;
|
||||
}
|
||||
|
||||
bool xe_engine_is_lr(struct xe_engine *e);
|
||||
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
|
||||
|
||||
bool xe_engine_ring_full(struct xe_engine *e);
|
||||
bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
|
||||
|
||||
bool xe_engine_is_idle(struct xe_engine *engine);
|
||||
bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
|
||||
|
||||
void xe_engine_kill(struct xe_engine *e);
|
||||
void xe_exec_queue_kill(struct xe_exec_queue *q);
|
||||
|
||||
int xe_engine_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int xe_engine_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int xe_engine_set_property_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int xe_engine_get_property_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
enum xe_engine_priority xe_engine_device_get_max_priority(struct xe_device *xe);
|
||||
int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
|
||||
|
||||
#endif
|
||||
|
209
drivers/gpu/drm/xe/xe_exec_queue_types.h
Normal file
209
drivers/gpu/drm/xe/xe_exec_queue_types.h
Normal file
@ -0,0 +1,209 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2022 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_EXEC_QUEUE_TYPES_H_
|
||||
#define _XE_EXEC_QUEUE_TYPES_H_
|
||||
|
||||
#include <linux/kref.h>
|
||||
|
||||
#include <drm/gpu_scheduler.h>
|
||||
|
||||
#include "xe_gpu_scheduler_types.h"
|
||||
#include "xe_hw_engine_types.h"
|
||||
#include "xe_hw_fence_types.h"
|
||||
#include "xe_lrc_types.h"
|
||||
|
||||
struct xe_execlist_exec_queue;
|
||||
struct xe_gt;
|
||||
struct xe_guc_exec_queue;
|
||||
struct xe_hw_engine;
|
||||
struct xe_vm;
|
||||
|
||||
enum xe_exec_queue_priority {
|
||||
XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
|
||||
XE_EXEC_QUEUE_PRIORITY_LOW = 0,
|
||||
XE_EXEC_QUEUE_PRIORITY_NORMAL,
|
||||
XE_EXEC_QUEUE_PRIORITY_HIGH,
|
||||
XE_EXEC_QUEUE_PRIORITY_KERNEL,
|
||||
|
||||
XE_EXEC_QUEUE_PRIORITY_COUNT
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_exec_queue - Execution queue
|
||||
*
|
||||
* Contains all state necessary for submissions. Can either be a user object or
|
||||
* a kernel object.
|
||||
*/
|
||||
struct xe_exec_queue {
|
||||
/** @gt: graphics tile this exec queue can submit to */
|
||||
struct xe_gt *gt;
|
||||
/**
|
||||
* @hwe: A hardware of the same class. May (physical engine) or may not
|
||||
* (virtual engine) be where jobs actual engine up running. Should never
|
||||
* really be used for submissions.
|
||||
*/
|
||||
struct xe_hw_engine *hwe;
|
||||
/** @refcount: ref count of this exec queue */
|
||||
struct kref refcount;
|
||||
/** @vm: VM (address space) for this exec queue */
|
||||
struct xe_vm *vm;
|
||||
/** @class: class of this exec queue */
|
||||
enum xe_engine_class class;
|
||||
/** @priority: priority of this exec queue */
|
||||
enum xe_exec_queue_priority priority;
|
||||
/**
|
||||
* @logical_mask: logical mask of where job submitted to exec queue can run
|
||||
*/
|
||||
u32 logical_mask;
|
||||
/** @name: name of this exec queue */
|
||||
char name[MAX_FENCE_NAME_LEN];
|
||||
/** @width: width (number BB submitted per exec) of this exec queue */
|
||||
u16 width;
|
||||
/** @fence_irq: fence IRQ used to signal job completion */
|
||||
struct xe_hw_fence_irq *fence_irq;
|
||||
|
||||
#define EXEC_QUEUE_FLAG_BANNED BIT(0)
|
||||
#define EXEC_QUEUE_FLAG_KERNEL BIT(1)
|
||||
#define EXEC_QUEUE_FLAG_PERSISTENT BIT(2)
|
||||
#define EXEC_QUEUE_FLAG_COMPUTE_MODE BIT(3)
|
||||
/* Caller needs to hold rpm ref when creating engine with EXEC_QUEUE_FLAG_VM */
|
||||
#define EXEC_QUEUE_FLAG_VM BIT(4)
|
||||
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
|
||||
#define EXEC_QUEUE_FLAG_WA BIT(6)
|
||||
|
||||
/**
|
||||
* @flags: flags for this exec queue, should statically setup aside from ban
|
||||
* bit
|
||||
*/
|
||||
unsigned long flags;
|
||||
|
||||
union {
|
||||
/** @multi_gt_list: list head for VM bind engines if multi-GT */
|
||||
struct list_head multi_gt_list;
|
||||
/** @multi_gt_link: link for VM bind engines if multi-GT */
|
||||
struct list_head multi_gt_link;
|
||||
};
|
||||
|
||||
union {
|
||||
/** @execlist: execlist backend specific state for exec queue */
|
||||
struct xe_execlist_exec_queue *execlist;
|
||||
/** @guc: GuC backend specific state for exec queue */
|
||||
struct xe_guc_exec_queue *guc;
|
||||
};
|
||||
|
||||
/**
|
||||
* @persistent: persistent exec queue state
|
||||
*/
|
||||
struct {
|
||||
/** @xef: file which this exec queue belongs to */
|
||||
struct xe_file *xef;
|
||||
/** @link: link in list of persistent exec queues */
|
||||
struct list_head link;
|
||||
} persistent;
|
||||
|
||||
union {
|
||||
/**
|
||||
* @parallel: parallel submission state
|
||||
*/
|
||||
struct {
|
||||
/** @composite_fence_ctx: context composite fence */
|
||||
u64 composite_fence_ctx;
|
||||
/** @composite_fence_seqno: seqno for composite fence */
|
||||
u32 composite_fence_seqno;
|
||||
} parallel;
|
||||
/**
|
||||
* @bind: bind submission state
|
||||
*/
|
||||
struct {
|
||||
/** @fence_ctx: context bind fence */
|
||||
u64 fence_ctx;
|
||||
/** @fence_seqno: seqno for bind fence */
|
||||
u32 fence_seqno;
|
||||
} bind;
|
||||
};
|
||||
|
||||
/** @sched_props: scheduling properties */
|
||||
struct {
|
||||
/** @timeslice_us: timeslice period in micro-seconds */
|
||||
u32 timeslice_us;
|
||||
/** @preempt_timeout_us: preemption timeout in micro-seconds */
|
||||
u32 preempt_timeout_us;
|
||||
} sched_props;
|
||||
|
||||
/** @compute: compute exec queue state */
|
||||
struct {
|
||||
/** @pfence: preemption fence */
|
||||
struct dma_fence *pfence;
|
||||
/** @context: preemption fence context */
|
||||
u64 context;
|
||||
/** @seqno: preemption fence seqno */
|
||||
u32 seqno;
|
||||
/** @link: link into VM's list of exec queues */
|
||||
struct list_head link;
|
||||
/** @lock: preemption fences lock */
|
||||
spinlock_t lock;
|
||||
} compute;
|
||||
|
||||
/** @usm: unified shared memory state */
|
||||
struct {
|
||||
/** @acc_trigger: access counter trigger */
|
||||
u32 acc_trigger;
|
||||
/** @acc_notify: access counter notify */
|
||||
u32 acc_notify;
|
||||
/** @acc_granularity: access counter granularity */
|
||||
u32 acc_granularity;
|
||||
} usm;
|
||||
|
||||
/** @ops: submission backend exec queue operations */
|
||||
const struct xe_exec_queue_ops *ops;
|
||||
|
||||
/** @ring_ops: ring operations for this exec queue */
|
||||
const struct xe_ring_ops *ring_ops;
|
||||
/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
|
||||
struct drm_sched_entity *entity;
|
||||
/** @lrc: logical ring context for this exec queue */
|
||||
struct xe_lrc lrc[];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_exec_queue_ops - Submission backend exec queue operations
|
||||
*/
|
||||
struct xe_exec_queue_ops {
|
||||
/** @init: Initialize exec queue for submission backend */
|
||||
int (*init)(struct xe_exec_queue *q);
|
||||
/** @kill: Kill inflight submissions for backend */
|
||||
void (*kill)(struct xe_exec_queue *q);
|
||||
/** @fini: Fini exec queue for submission backend */
|
||||
void (*fini)(struct xe_exec_queue *q);
|
||||
/** @set_priority: Set priority for exec queue */
|
||||
int (*set_priority)(struct xe_exec_queue *q,
|
||||
enum xe_exec_queue_priority priority);
|
||||
/** @set_timeslice: Set timeslice for exec queue */
|
||||
int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
|
||||
/** @set_preempt_timeout: Set preemption timeout for exec queue */
|
||||
int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
|
||||
/** @set_job_timeout: Set job timeout for exec queue */
|
||||
int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms);
|
||||
/**
|
||||
* @suspend: Suspend exec queue from executing, allowed to be called
|
||||
* multiple times in a row before resume with the caveat that
|
||||
* suspend_wait returns before calling suspend again.
|
||||
*/
|
||||
int (*suspend)(struct xe_exec_queue *q);
|
||||
/**
|
||||
* @suspend_wait: Wait for an exec queue to suspend executing, should be
|
||||
* call after suspend.
|
||||
*/
|
||||
void (*suspend_wait)(struct xe_exec_queue *q);
|
||||
/**
|
||||
* @resume: Resume exec queue execution, exec queue must be in a suspended
|
||||
* state and dma fence returned from most recent suspend call must be
|
||||
* signalled when this function is called.
|
||||
*/
|
||||
void (*resume)(struct xe_exec_queue *q);
|
||||
};
|
||||
|
||||
#endif
|
@ -91,7 +91,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
|
||||
}
|
||||
|
||||
static void __xe_execlist_port_start(struct xe_execlist_port *port,
|
||||
struct xe_execlist_engine *exl)
|
||||
struct xe_execlist_exec_queue *exl)
|
||||
{
|
||||
struct xe_device *xe = gt_to_xe(port->hwe->gt);
|
||||
int max_ctx = FIELD_MAX(GEN11_SW_CTX_ID);
|
||||
@ -109,7 +109,7 @@ static void __xe_execlist_port_start(struct xe_execlist_port *port,
|
||||
port->last_ctx_id = 1;
|
||||
}
|
||||
|
||||
__start_lrc(port->hwe, exl->engine->lrc, port->last_ctx_id);
|
||||
__start_lrc(port->hwe, exl->q->lrc, port->last_ctx_id);
|
||||
port->running_exl = exl;
|
||||
exl->has_run = true;
|
||||
}
|
||||
@ -128,16 +128,16 @@ static void __xe_execlist_port_idle(struct xe_execlist_port *port)
|
||||
port->running_exl = NULL;
|
||||
}
|
||||
|
||||
static bool xe_execlist_is_idle(struct xe_execlist_engine *exl)
|
||||
static bool xe_execlist_is_idle(struct xe_execlist_exec_queue *exl)
|
||||
{
|
||||
struct xe_lrc *lrc = exl->engine->lrc;
|
||||
struct xe_lrc *lrc = exl->q->lrc;
|
||||
|
||||
return lrc->ring.tail == lrc->ring.old_tail;
|
||||
}
|
||||
|
||||
static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
|
||||
{
|
||||
struct xe_execlist_engine *exl = NULL;
|
||||
struct xe_execlist_exec_queue *exl = NULL;
|
||||
int i;
|
||||
|
||||
xe_execlist_port_assert_held(port);
|
||||
@ -145,12 +145,12 @@ static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
|
||||
for (i = ARRAY_SIZE(port->active) - 1; i >= 0; i--) {
|
||||
while (!list_empty(&port->active[i])) {
|
||||
exl = list_first_entry(&port->active[i],
|
||||
struct xe_execlist_engine,
|
||||
struct xe_execlist_exec_queue,
|
||||
active_link);
|
||||
list_del(&exl->active_link);
|
||||
|
||||
if (xe_execlist_is_idle(exl)) {
|
||||
exl->active_priority = XE_ENGINE_PRIORITY_UNSET;
|
||||
exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -198,7 +198,7 @@ static void xe_execlist_port_irq_handler(struct xe_hw_engine *hwe,
|
||||
}
|
||||
|
||||
static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
|
||||
enum xe_engine_priority priority)
|
||||
enum xe_exec_queue_priority priority)
|
||||
{
|
||||
xe_execlist_port_assert_held(port);
|
||||
|
||||
@ -208,25 +208,25 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
|
||||
__xe_execlist_port_start_next_active(port);
|
||||
}
|
||||
|
||||
static void xe_execlist_make_active(struct xe_execlist_engine *exl)
|
||||
static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
|
||||
{
|
||||
struct xe_execlist_port *port = exl->port;
|
||||
enum xe_engine_priority priority = exl->active_priority;
|
||||
enum xe_exec_queue_priority priority = exl->active_priority;
|
||||
|
||||
XE_WARN_ON(priority == XE_ENGINE_PRIORITY_UNSET);
|
||||
XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
|
||||
XE_WARN_ON(priority < 0);
|
||||
XE_WARN_ON(priority >= ARRAY_SIZE(exl->port->active));
|
||||
|
||||
spin_lock_irq(&port->lock);
|
||||
|
||||
if (exl->active_priority != priority &&
|
||||
exl->active_priority != XE_ENGINE_PRIORITY_UNSET) {
|
||||
exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET) {
|
||||
/* Priority changed, move it to the right list */
|
||||
list_del(&exl->active_link);
|
||||
exl->active_priority = XE_ENGINE_PRIORITY_UNSET;
|
||||
exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
|
||||
}
|
||||
|
||||
if (exl->active_priority == XE_ENGINE_PRIORITY_UNSET) {
|
||||
if (exl->active_priority == XE_EXEC_QUEUE_PRIORITY_UNSET) {
|
||||
exl->active_priority = priority;
|
||||
list_add_tail(&exl->active_link, &port->active[priority]);
|
||||
}
|
||||
@ -293,10 +293,10 @@ static struct dma_fence *
|
||||
execlist_run_job(struct drm_sched_job *drm_job)
|
||||
{
|
||||
struct xe_sched_job *job = to_xe_sched_job(drm_job);
|
||||
struct xe_engine *e = job->engine;
|
||||
struct xe_execlist_engine *exl = job->engine->execlist;
|
||||
struct xe_exec_queue *q = job->q;
|
||||
struct xe_execlist_exec_queue *exl = job->q->execlist;
|
||||
|
||||
e->ring_ops->emit_job(job);
|
||||
q->ring_ops->emit_job(job);
|
||||
xe_execlist_make_active(exl);
|
||||
|
||||
return dma_fence_get(job->fence);
|
||||
@ -314,11 +314,11 @@ static const struct drm_sched_backend_ops drm_sched_ops = {
|
||||
.free_job = execlist_job_free,
|
||||
};
|
||||
|
||||
static int execlist_engine_init(struct xe_engine *e)
|
||||
static int execlist_exec_queue_init(struct xe_exec_queue *q)
|
||||
{
|
||||
struct drm_gpu_scheduler *sched;
|
||||
struct xe_execlist_engine *exl;
|
||||
struct xe_device *xe = gt_to_xe(e->gt);
|
||||
struct xe_execlist_exec_queue *exl;
|
||||
struct xe_device *xe = gt_to_xe(q->gt);
|
||||
int err;
|
||||
|
||||
XE_WARN_ON(xe_device_guc_submission_enabled(xe));
|
||||
@ -329,13 +329,13 @@ static int execlist_engine_init(struct xe_engine *e)
|
||||
if (!exl)
|
||||
return -ENOMEM;
|
||||
|
||||
exl->engine = e;
|
||||
exl->q = q;
|
||||
|
||||
err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL, 1,
|
||||
e->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
|
||||
q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
|
||||
XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
|
||||
NULL, NULL, e->hwe->name,
|
||||
gt_to_xe(e->gt)->drm.dev);
|
||||
NULL, NULL, q->hwe->name,
|
||||
gt_to_xe(q->gt)->drm.dev);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
@ -344,30 +344,30 @@ static int execlist_engine_init(struct xe_engine *e)
|
||||
if (err)
|
||||
goto err_sched;
|
||||
|
||||
exl->port = e->hwe->exl_port;
|
||||
exl->port = q->hwe->exl_port;
|
||||
exl->has_run = false;
|
||||
exl->active_priority = XE_ENGINE_PRIORITY_UNSET;
|
||||
e->execlist = exl;
|
||||
e->entity = &exl->entity;
|
||||
exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
|
||||
q->execlist = exl;
|
||||
q->entity = &exl->entity;
|
||||
|
||||
switch (e->class) {
|
||||
switch (q->class) {
|
||||
case XE_ENGINE_CLASS_RENDER:
|
||||
sprintf(e->name, "rcs%d", ffs(e->logical_mask) - 1);
|
||||
sprintf(q->name, "rcs%d", ffs(q->logical_mask) - 1);
|
||||
break;
|
||||
case XE_ENGINE_CLASS_VIDEO_DECODE:
|
||||
sprintf(e->name, "vcs%d", ffs(e->logical_mask) - 1);
|
||||
sprintf(q->name, "vcs%d", ffs(q->logical_mask) - 1);
|
||||
break;
|
||||
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
|
||||
sprintf(e->name, "vecs%d", ffs(e->logical_mask) - 1);
|
||||
sprintf(q->name, "vecs%d", ffs(q->logical_mask) - 1);
|
||||
break;
|
||||
case XE_ENGINE_CLASS_COPY:
|
||||
sprintf(e->name, "bcs%d", ffs(e->logical_mask) - 1);
|
||||
sprintf(q->name, "bcs%d", ffs(q->logical_mask) - 1);
|
||||
break;
|
||||
case XE_ENGINE_CLASS_COMPUTE:
|
||||
sprintf(e->name, "ccs%d", ffs(e->logical_mask) - 1);
|
||||
sprintf(q->name, "ccs%d", ffs(q->logical_mask) - 1);
|
||||
break;
|
||||
default:
|
||||
XE_WARN_ON(e->class);
|
||||
XE_WARN_ON(q->class);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -379,96 +379,96 @@ static int execlist_engine_init(struct xe_engine *e)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void execlist_engine_fini_async(struct work_struct *w)
|
||||
static void execlist_exec_queue_fini_async(struct work_struct *w)
|
||||
{
|
||||
struct xe_execlist_engine *ee =
|
||||
container_of(w, struct xe_execlist_engine, fini_async);
|
||||
struct xe_engine *e = ee->engine;
|
||||
struct xe_execlist_engine *exl = e->execlist;
|
||||
struct xe_execlist_exec_queue *ee =
|
||||
container_of(w, struct xe_execlist_exec_queue, fini_async);
|
||||
struct xe_exec_queue *q = ee->q;
|
||||
struct xe_execlist_exec_queue *exl = q->execlist;
|
||||
unsigned long flags;
|
||||
|
||||
XE_WARN_ON(xe_device_guc_submission_enabled(gt_to_xe(e->gt)));
|
||||
XE_WARN_ON(xe_device_guc_submission_enabled(gt_to_xe(q->gt)));
|
||||
|
||||
spin_lock_irqsave(&exl->port->lock, flags);
|
||||
if (WARN_ON(exl->active_priority != XE_ENGINE_PRIORITY_UNSET))
|
||||
if (WARN_ON(exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET))
|
||||
list_del(&exl->active_link);
|
||||
spin_unlock_irqrestore(&exl->port->lock, flags);
|
||||
|
||||
if (e->flags & ENGINE_FLAG_PERSISTENT)
|
||||
xe_device_remove_persistent_engines(gt_to_xe(e->gt), e);
|
||||
if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
|
||||
xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
|
||||
drm_sched_entity_fini(&exl->entity);
|
||||
drm_sched_fini(&exl->sched);
|
||||
kfree(exl);
|
||||
|
||||
xe_engine_fini(e);
|
||||
xe_exec_queue_fini(q);
|
||||
}
|
||||
|
||||
static void execlist_engine_kill(struct xe_engine *e)
|
||||
static void execlist_exec_queue_kill(struct xe_exec_queue *q)
|
||||
{
|
||||
/* NIY */
|
||||
}
|
||||
|
||||
static void execlist_engine_fini(struct xe_engine *e)
|
||||
static void execlist_exec_queue_fini(struct xe_exec_queue *q)
|
||||
{
|
||||
INIT_WORK(&e->execlist->fini_async, execlist_engine_fini_async);
|
||||
queue_work(system_unbound_wq, &e->execlist->fini_async);
|
||||
INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
|
||||
queue_work(system_unbound_wq, &q->execlist->fini_async);
|
||||
}
|
||||
|
||||
static int execlist_engine_set_priority(struct xe_engine *e,
|
||||
enum xe_engine_priority priority)
|
||||
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
|
||||
enum xe_exec_queue_priority priority)
|
||||
{
|
||||
/* NIY */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int execlist_engine_set_timeslice(struct xe_engine *e, u32 timeslice_us)
|
||||
static int execlist_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
|
||||
{
|
||||
/* NIY */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int execlist_engine_set_preempt_timeout(struct xe_engine *e,
|
||||
u32 preempt_timeout_us)
|
||||
static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
|
||||
u32 preempt_timeout_us)
|
||||
{
|
||||
/* NIY */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int execlist_engine_set_job_timeout(struct xe_engine *e,
|
||||
u32 job_timeout_ms)
|
||||
static int execlist_exec_queue_set_job_timeout(struct xe_exec_queue *q,
|
||||
u32 job_timeout_ms)
|
||||
{
|
||||
/* NIY */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int execlist_engine_suspend(struct xe_engine *e)
|
||||
static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
|
||||
{
|
||||
/* NIY */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void execlist_engine_suspend_wait(struct xe_engine *e)
|
||||
static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
|
||||
|
||||
{
|
||||
/* NIY */
|
||||
}
|
||||
|
||||
static void execlist_engine_resume(struct xe_engine *e)
|
||||
static void execlist_exec_queue_resume(struct xe_exec_queue *q)
|
||||
{
|
||||
/* NIY */
|
||||
}
|
||||
|
||||
static const struct xe_engine_ops execlist_engine_ops = {
|
||||
.init = execlist_engine_init,
|
||||
.kill = execlist_engine_kill,
|
||||
.fini = execlist_engine_fini,
|
||||
.set_priority = execlist_engine_set_priority,
|
||||
.set_timeslice = execlist_engine_set_timeslice,
|
||||
.set_preempt_timeout = execlist_engine_set_preempt_timeout,
|
||||
.set_job_timeout = execlist_engine_set_job_timeout,
|
||||
.suspend = execlist_engine_suspend,
|
||||
.suspend_wait = execlist_engine_suspend_wait,
|
||||
.resume = execlist_engine_resume,
|
||||
static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
|
||||
.init = execlist_exec_queue_init,
|
||||
.kill = execlist_exec_queue_kill,
|
||||
.fini = execlist_exec_queue_fini,
|
||||
.set_priority = execlist_exec_queue_set_priority,
|
||||
.set_timeslice = execlist_exec_queue_set_timeslice,
|
||||
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
|
||||
.set_job_timeout = execlist_exec_queue_set_job_timeout,
|
||||
.suspend = execlist_exec_queue_suspend,
|
||||
.suspend_wait = execlist_exec_queue_suspend_wait,
|
||||
.resume = execlist_exec_queue_resume,
|
||||
};
|
||||
|
||||
int xe_execlist_init(struct xe_gt *gt)
|
||||
@ -477,7 +477,7 @@ int xe_execlist_init(struct xe_gt *gt)
|
||||
if (xe_device_guc_submission_enabled(gt_to_xe(gt)))
|
||||
return 0;
|
||||
|
||||
gt->engine_ops = &execlist_engine_ops;
|
||||
gt->exec_queue_ops = &execlist_exec_queue_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -10,27 +10,27 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "xe_engine_types.h"
|
||||
#include "xe_exec_queue_types.h"
|
||||
|
||||
struct xe_hw_engine;
|
||||
struct xe_execlist_engine;
|
||||
struct xe_execlist_exec_queue;
|
||||
|
||||
struct xe_execlist_port {
|
||||
struct xe_hw_engine *hwe;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
struct list_head active[XE_ENGINE_PRIORITY_COUNT];
|
||||
struct list_head active[XE_EXEC_QUEUE_PRIORITY_COUNT];
|
||||
|
||||
u32 last_ctx_id;
|
||||
|
||||
struct xe_execlist_engine *running_exl;
|
||||
struct xe_execlist_exec_queue *running_exl;
|
||||
|
||||
struct timer_list irq_fail;
|
||||
};
|
||||
|
||||
struct xe_execlist_engine {
|
||||
struct xe_engine *engine;
|
||||
struct xe_execlist_exec_queue {
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
struct drm_gpu_scheduler sched;
|
||||
|
||||
@ -42,7 +42,7 @@ struct xe_execlist_engine {
|
||||
|
||||
struct work_struct fini_async;
|
||||
|
||||
enum xe_engine_priority active_priority;
|
||||
enum xe_exec_queue_priority active_priority;
|
||||
struct list_head active_link;
|
||||
};
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "xe_gt_sysfs.h"
|
||||
#include "xe_gt_tlb_invalidation.h"
|
||||
#include "xe_gt_topology.h"
|
||||
#include "xe_guc_engine_types.h"
|
||||
#include "xe_guc_exec_queue_types.h"
|
||||
#include "xe_hw_fence.h"
|
||||
#include "xe_irq.h"
|
||||
#include "xe_lrc.h"
|
||||
@ -81,7 +81,7 @@ static void gt_fini(struct drm_device *drm, void *arg)
|
||||
|
||||
static void gt_reset_worker(struct work_struct *w);
|
||||
|
||||
static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
|
||||
static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_sched_job *job;
|
||||
struct xe_bb *bb;
|
||||
@ -94,7 +94,7 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
|
||||
return PTR_ERR(bb);
|
||||
|
||||
batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
|
||||
job = xe_bb_create_wa_job(e, bb, batch_ofs);
|
||||
job = xe_bb_create_wa_job(q, bb, batch_ofs);
|
||||
if (IS_ERR(job)) {
|
||||
xe_bb_free(bb, NULL);
|
||||
return PTR_ERR(job);
|
||||
@ -115,9 +115,9 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
|
||||
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_reg_sr *sr = &e->hwe->reg_lrc;
|
||||
struct xe_reg_sr *sr = &q->hwe->reg_lrc;
|
||||
struct xe_reg_sr_entry *entry;
|
||||
unsigned long reg;
|
||||
struct xe_sched_job *job;
|
||||
@ -143,7 +143,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
|
||||
}
|
||||
|
||||
batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
|
||||
job = xe_bb_create_wa_job(e, bb, batch_ofs);
|
||||
job = xe_bb_create_wa_job(q, bb, batch_ofs);
|
||||
if (IS_ERR(job)) {
|
||||
xe_bb_free(bb, NULL);
|
||||
return PTR_ERR(job);
|
||||
@ -173,7 +173,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
|
||||
int err = 0;
|
||||
|
||||
for_each_hw_engine(hwe, gt, id) {
|
||||
struct xe_engine *e, *nop_e;
|
||||
struct xe_exec_queue *q, *nop_q;
|
||||
struct xe_vm *vm;
|
||||
void *default_lrc;
|
||||
|
||||
@ -192,58 +192,58 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
|
||||
return -ENOMEM;
|
||||
|
||||
vm = xe_migrate_get_vm(tile->migrate);
|
||||
e = xe_engine_create(xe, vm, BIT(hwe->logical_instance), 1,
|
||||
hwe, ENGINE_FLAG_WA);
|
||||
if (IS_ERR(e)) {
|
||||
err = PTR_ERR(e);
|
||||
xe_gt_err(gt, "hwe %s: xe_engine_create failed (%pe)\n",
|
||||
hwe->name, e);
|
||||
q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1,
|
||||
hwe, EXEC_QUEUE_FLAG_WA);
|
||||
if (IS_ERR(q)) {
|
||||
err = PTR_ERR(q);
|
||||
xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
|
||||
hwe->name, q);
|
||||
goto put_vm;
|
||||
}
|
||||
|
||||
/* Prime golden LRC with known good state */
|
||||
err = emit_wa_job(gt, e);
|
||||
err = emit_wa_job(gt, q);
|
||||
if (err) {
|
||||
xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
|
||||
hwe->name, ERR_PTR(err), e->guc->id);
|
||||
goto put_engine;
|
||||
hwe->name, ERR_PTR(err), q->guc->id);
|
||||
goto put_exec_queue;
|
||||
}
|
||||
|
||||
nop_e = xe_engine_create(xe, vm, BIT(hwe->logical_instance),
|
||||
1, hwe, ENGINE_FLAG_WA);
|
||||
if (IS_ERR(nop_e)) {
|
||||
err = PTR_ERR(nop_e);
|
||||
xe_gt_err(gt, "hwe %s: nop xe_engine_create failed (%pe)\n",
|
||||
hwe->name, nop_e);
|
||||
goto put_engine;
|
||||
nop_q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance),
|
||||
1, hwe, EXEC_QUEUE_FLAG_WA);
|
||||
if (IS_ERR(nop_q)) {
|
||||
err = PTR_ERR(nop_q);
|
||||
xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
|
||||
hwe->name, nop_q);
|
||||
goto put_exec_queue;
|
||||
}
|
||||
|
||||
/* Switch to different LRC */
|
||||
err = emit_nop_job(gt, nop_e);
|
||||
err = emit_nop_job(gt, nop_q);
|
||||
if (err) {
|
||||
xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
|
||||
hwe->name, ERR_PTR(err), nop_e->guc->id);
|
||||
goto put_nop_e;
|
||||
hwe->name, ERR_PTR(err), nop_q->guc->id);
|
||||
goto put_nop_q;
|
||||
}
|
||||
|
||||
/* Reload golden LRC to record the effect of any indirect W/A */
|
||||
err = emit_nop_job(gt, e);
|
||||
err = emit_nop_job(gt, q);
|
||||
if (err) {
|
||||
xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
|
||||
hwe->name, ERR_PTR(err), e->guc->id);
|
||||
goto put_nop_e;
|
||||
hwe->name, ERR_PTR(err), q->guc->id);
|
||||
goto put_nop_q;
|
||||
}
|
||||
|
||||
xe_map_memcpy_from(xe, default_lrc,
|
||||
&e->lrc[0].bo->vmap,
|
||||
xe_lrc_pphwsp_offset(&e->lrc[0]),
|
||||
&q->lrc[0].bo->vmap,
|
||||
xe_lrc_pphwsp_offset(&q->lrc[0]),
|
||||
xe_lrc_size(xe, hwe->class));
|
||||
|
||||
gt->default_lrc[hwe->class] = default_lrc;
|
||||
put_nop_e:
|
||||
xe_engine_put(nop_e);
|
||||
put_engine:
|
||||
xe_engine_put(e);
|
||||
put_nop_q:
|
||||
xe_exec_queue_put(nop_q);
|
||||
put_exec_queue:
|
||||
xe_exec_queue_put(q);
|
||||
put_vm:
|
||||
xe_vm_put(vm);
|
||||
if (err)
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "xe_sa_types.h"
|
||||
#include "xe_uc_types.h"
|
||||
|
||||
struct xe_engine_ops;
|
||||
struct xe_exec_queue_ops;
|
||||
struct xe_migrate;
|
||||
struct xe_ring_ops;
|
||||
|
||||
@ -269,8 +269,8 @@ struct xe_gt {
|
||||
/** @gtidle: idle properties of GT */
|
||||
struct xe_gt_idle gtidle;
|
||||
|
||||
/** @engine_ops: submission backend engine operations */
|
||||
const struct xe_engine_ops *engine_ops;
|
||||
/** @exec_queue_ops: submission backend exec queue operations */
|
||||
const struct xe_exec_queue_ops *exec_queue_ops;
|
||||
|
||||
/**
|
||||
* @ring_ops: ring operations for this hw engine (1 per engine class)
|
||||
|
@ -495,7 +495,7 @@ static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
|
||||
u8 gc;
|
||||
|
||||
/*
|
||||
* 1. Write all MMIO entries for this engine to the table. No
|
||||
* 1. Write all MMIO entries for this exec queue to the table. No
|
||||
* need to worry about fused-off engines and when there are
|
||||
* entries in the regset: the reg_state_list has been zero'ed
|
||||
* by xe_guc_ads_populate()
|
||||
|
@ -888,11 +888,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
|
||||
ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
|
||||
break;
|
||||
case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
|
||||
ret = xe_guc_engine_reset_handler(guc, payload, adj_len);
|
||||
ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
|
||||
break;
|
||||
case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
|
||||
ret = xe_guc_engine_reset_failure_handler(guc, payload,
|
||||
adj_len);
|
||||
ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
|
||||
adj_len);
|
||||
break;
|
||||
case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
|
||||
/* Selftest only at the moment */
|
||||
@ -902,8 +902,8 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
|
||||
/* FIXME: Handle this */
|
||||
break;
|
||||
case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
|
||||
ret = xe_guc_engine_memory_cat_error_handler(guc, payload,
|
||||
adj_len);
|
||||
ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
|
||||
adj_len);
|
||||
break;
|
||||
case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
|
||||
ret = xe_guc_pagefault_handler(guc, payload, adj_len);
|
||||
|
@ -12,22 +12,22 @@
|
||||
#include "xe_gpu_scheduler_types.h"
|
||||
|
||||
struct dma_fence;
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
|
||||
/**
|
||||
* struct xe_guc_engine - GuC specific state for an xe_engine
|
||||
* struct xe_guc_exec_queue - GuC specific state for an xe_exec_queue
|
||||
*/
|
||||
struct xe_guc_engine {
|
||||
/** @engine: Backpointer to parent xe_engine */
|
||||
struct xe_engine *engine;
|
||||
/** @sched: GPU scheduler for this xe_engine */
|
||||
struct xe_guc_exec_queue {
|
||||
/** @q: Backpointer to parent xe_exec_queue */
|
||||
struct xe_exec_queue *q;
|
||||
/** @sched: GPU scheduler for this xe_exec_queue */
|
||||
struct xe_gpu_scheduler sched;
|
||||
/** @entity: Scheduler entity for this xe_engine */
|
||||
/** @entity: Scheduler entity for this xe_exec_queue */
|
||||
struct xe_sched_entity entity;
|
||||
/**
|
||||
* @static_msgs: Static messages for this xe_engine, used when a message
|
||||
* needs to sent through the GPU scheduler but memory allocations are
|
||||
* not allowed.
|
||||
* @static_msgs: Static messages for this xe_exec_queue, used when
|
||||
* a message needs to sent through the GPU scheduler but memory
|
||||
* allocations are not allowed.
|
||||
*/
|
||||
#define MAX_STATIC_MSG_TYPE 3
|
||||
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
|
||||
@ -37,17 +37,17 @@ struct xe_guc_engine {
|
||||
struct work_struct fini_async;
|
||||
/** @resume_time: time of last resume */
|
||||
u64 resume_time;
|
||||
/** @state: GuC specific state for this xe_engine */
|
||||
/** @state: GuC specific state for this xe_exec_queue */
|
||||
atomic_t state;
|
||||
/** @wqi_head: work queue item tail */
|
||||
u32 wqi_head;
|
||||
/** @wqi_tail: work queue item tail */
|
||||
u32 wqi_tail;
|
||||
/** @id: GuC id for this xe_engine */
|
||||
/** @id: GuC id for this exec_queue */
|
||||
u16 id;
|
||||
/** @suspend_wait: wait queue used to wait on pending suspends */
|
||||
wait_queue_head_t suspend_wait;
|
||||
/** @suspend_pending: a suspend of the engine is pending */
|
||||
/** @suspend_pending: a suspend of the exec_queue is pending */
|
||||
bool suspend_pending;
|
||||
};
|
||||
|
@ -69,13 +69,13 @@ struct guc_klv_generic_dw_t {
|
||||
} __packed;
|
||||
|
||||
/* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */
|
||||
struct guc_update_engine_policy_header {
|
||||
struct guc_update_exec_queue_policy_header {
|
||||
u32 action;
|
||||
u32 guc_id;
|
||||
} __packed;
|
||||
|
||||
struct guc_update_engine_policy {
|
||||
struct guc_update_engine_policy_header header;
|
||||
struct guc_update_exec_queue_policy {
|
||||
struct guc_update_exec_queue_policy_header header;
|
||||
struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
|
||||
} __packed;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -9,7 +9,7 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_printer;
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
struct xe_guc;
|
||||
|
||||
int xe_guc_submit_init(struct xe_guc *guc);
|
||||
@ -21,18 +21,18 @@ int xe_guc_submit_start(struct xe_guc *guc);
|
||||
|
||||
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
||||
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
||||
int xe_guc_engine_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
||||
int xe_guc_engine_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
|
||||
u32 len);
|
||||
int xe_guc_engine_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
||||
int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
||||
int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
|
||||
u32 len);
|
||||
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
||||
|
||||
struct xe_guc_submit_engine_snapshot *
|
||||
xe_guc_engine_snapshot_capture(struct xe_engine *e);
|
||||
struct xe_guc_submit_exec_queue_snapshot *
|
||||
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
|
||||
void
|
||||
xe_guc_engine_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
|
||||
struct drm_printer *p);
|
||||
xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
|
||||
struct drm_printer *p);
|
||||
void
|
||||
xe_guc_engine_snapshot_free(struct xe_guc_submit_engine_snapshot *snapshot);
|
||||
xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot);
|
||||
void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p);
|
||||
|
||||
#endif
|
||||
|
@ -79,20 +79,20 @@ struct pending_list_snapshot {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xe_guc_submit_engine_snapshot - Snapshot for devcoredump
|
||||
* struct xe_guc_submit_exec_queue_snapshot - Snapshot for devcoredump
|
||||
*/
|
||||
struct xe_guc_submit_engine_snapshot {
|
||||
/** @name: name of this engine */
|
||||
struct xe_guc_submit_exec_queue_snapshot {
|
||||
/** @name: name of this exec queue */
|
||||
char name[MAX_FENCE_NAME_LEN];
|
||||
/** @class: class of this engine */
|
||||
/** @class: class of this exec queue */
|
||||
enum xe_engine_class class;
|
||||
/**
|
||||
* @logical_mask: logical mask of where job submitted to engine can run
|
||||
* @logical_mask: logical mask of where job submitted to exec queue can run
|
||||
*/
|
||||
u32 logical_mask;
|
||||
/** @width: width (number BB submitted per exec) of this engine */
|
||||
/** @width: width (number BB submitted per exec) of this exec queue */
|
||||
u16 width;
|
||||
/** @refcount: ref count of this engine */
|
||||
/** @refcount: ref count of this exec queue */
|
||||
u32 refcount;
|
||||
/**
|
||||
* @sched_timeout: the time after which a job is removed from the
|
||||
@ -113,8 +113,8 @@ struct xe_guc_submit_engine_snapshot {
|
||||
|
||||
/** @schedule_state: Schedule State at the moment of Crash */
|
||||
u32 schedule_state;
|
||||
/** @engine_flags: Flags of the faulty engine */
|
||||
unsigned long engine_flags;
|
||||
/** @exec_queue_flags: Flags of the faulty exec_queue */
|
||||
unsigned long exec_queue_flags;
|
||||
|
||||
/** @guc: GuC Engine Snapshot */
|
||||
struct {
|
||||
@ -122,7 +122,7 @@ struct xe_guc_submit_engine_snapshot {
|
||||
u32 wqi_head;
|
||||
/** @wqi_tail: work queue item tail */
|
||||
u32 wqi_tail;
|
||||
/** @id: GuC id for this xe_engine */
|
||||
/** @id: GuC id for this exec_queue */
|
||||
u16 id;
|
||||
} guc;
|
||||
|
||||
|
@ -33,8 +33,8 @@ struct xe_guc {
|
||||
struct xe_guc_pc pc;
|
||||
/** @submission_state: GuC submission state */
|
||||
struct {
|
||||
/** @engine_lookup: Lookup an xe_engine from guc_id */
|
||||
struct xarray engine_lookup;
|
||||
/** @exec_queue_lookup: Lookup an xe_engine from guc_id */
|
||||
struct xarray exec_queue_lookup;
|
||||
/** @guc_ids: used to allocate new guc_ids, single-lrc */
|
||||
struct ida guc_ids;
|
||||
/** @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include "regs/xe_regs.h"
|
||||
#include "xe_bo.h"
|
||||
#include "xe_device.h"
|
||||
#include "xe_engine_types.h"
|
||||
#include "xe_exec_queue_types.h"
|
||||
#include "xe_gt.h"
|
||||
#include "xe_hw_fence.h"
|
||||
#include "xe_map.h"
|
||||
@ -604,7 +604,7 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
|
||||
#define ACC_NOTIFY_S 16
|
||||
|
||||
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
||||
struct xe_engine *e, struct xe_vm *vm, u32 ring_size)
|
||||
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
|
||||
{
|
||||
struct xe_gt *gt = hwe->gt;
|
||||
struct xe_tile *tile = gt_to_tile(gt);
|
||||
@ -669,12 +669,12 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
||||
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
|
||||
if (xe->info.has_asid && vm)
|
||||
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
|
||||
(e->usm.acc_granularity <<
|
||||
(q->usm.acc_granularity <<
|
||||
ACC_GRANULARITY_S) | vm->usm.asid);
|
||||
if (xe->info.supports_usm && vm)
|
||||
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
|
||||
(e->usm.acc_notify << ACC_NOTIFY_S) |
|
||||
e->usm.acc_trigger);
|
||||
(q->usm.acc_notify << ACC_NOTIFY_S) |
|
||||
q->usm.acc_trigger);
|
||||
|
||||
lrc->desc = GEN8_CTX_VALID;
|
||||
lrc->desc |= INTEL_LEGACY_64B_CONTEXT << GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "xe_lrc_types.h"
|
||||
|
||||
struct xe_device;
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
enum xe_engine_class;
|
||||
struct xe_hw_engine;
|
||||
struct xe_vm;
|
||||
@ -16,7 +16,7 @@ struct xe_vm;
|
||||
#define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4)
|
||||
|
||||
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
||||
struct xe_engine *e, struct xe_vm *vm, u32 ring_size);
|
||||
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size);
|
||||
void xe_lrc_finish(struct xe_lrc *lrc);
|
||||
|
||||
size_t xe_lrc_size(struct xe_device *xe, enum xe_engine_class class);
|
||||
|
@ -34,8 +34,8 @@
|
||||
* struct xe_migrate - migrate context.
|
||||
*/
|
||||
struct xe_migrate {
|
||||
/** @eng: Default engine used for migration */
|
||||
struct xe_engine *eng;
|
||||
/** @q: Default exec queue used for migration */
|
||||
struct xe_exec_queue *q;
|
||||
/** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
|
||||
struct xe_tile *tile;
|
||||
/** @job_mutex: Timeline mutex for @eng. */
|
||||
@ -78,9 +78,9 @@ struct xe_migrate {
|
||||
*
|
||||
* Return: The default migrate engine
|
||||
*/
|
||||
struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile)
|
||||
struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
|
||||
{
|
||||
return tile->migrate->eng;
|
||||
return tile->migrate->q;
|
||||
}
|
||||
|
||||
static void xe_migrate_fini(struct drm_device *dev, void *arg)
|
||||
@ -88,11 +88,11 @@ static void xe_migrate_fini(struct drm_device *dev, void *arg)
|
||||
struct xe_migrate *m = arg;
|
||||
struct ww_acquire_ctx ww;
|
||||
|
||||
xe_vm_lock(m->eng->vm, &ww, 0, false);
|
||||
xe_vm_lock(m->q->vm, &ww, 0, false);
|
||||
xe_bo_unpin(m->pt_bo);
|
||||
if (m->cleared_bo)
|
||||
xe_bo_unpin(m->cleared_bo);
|
||||
xe_vm_unlock(m->eng->vm, &ww);
|
||||
xe_vm_unlock(m->q->vm, &ww);
|
||||
|
||||
dma_fence_put(m->fence);
|
||||
if (m->cleared_bo)
|
||||
@ -100,8 +100,8 @@ static void xe_migrate_fini(struct drm_device *dev, void *arg)
|
||||
xe_bo_put(m->pt_bo);
|
||||
drm_suballoc_manager_fini(&m->vm_update_sa);
|
||||
mutex_destroy(&m->job_mutex);
|
||||
xe_vm_close_and_put(m->eng->vm);
|
||||
xe_engine_put(m->eng);
|
||||
xe_vm_close_and_put(m->q->vm);
|
||||
xe_exec_queue_put(m->q);
|
||||
}
|
||||
|
||||
static u64 xe_migrate_vm_addr(u64 slot, u32 level)
|
||||
@ -341,20 +341,20 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
|
||||
if (!hwe)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
m->eng = xe_engine_create(xe, vm,
|
||||
BIT(hwe->logical_instance), 1,
|
||||
hwe, ENGINE_FLAG_KERNEL);
|
||||
m->q = xe_exec_queue_create(xe, vm,
|
||||
BIT(hwe->logical_instance), 1,
|
||||
hwe, EXEC_QUEUE_FLAG_KERNEL);
|
||||
} else {
|
||||
m->eng = xe_engine_create_class(xe, primary_gt, vm,
|
||||
XE_ENGINE_CLASS_COPY,
|
||||
ENGINE_FLAG_KERNEL);
|
||||
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
|
||||
XE_ENGINE_CLASS_COPY,
|
||||
EXEC_QUEUE_FLAG_KERNEL);
|
||||
}
|
||||
if (IS_ERR(m->eng)) {
|
||||
if (IS_ERR(m->q)) {
|
||||
xe_vm_close_and_put(vm);
|
||||
return ERR_CAST(m->eng);
|
||||
return ERR_CAST(m->q);
|
||||
}
|
||||
if (xe->info.supports_usm)
|
||||
m->eng->priority = XE_ENGINE_PRIORITY_KERNEL;
|
||||
m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
|
||||
|
||||
mutex_init(&m->job_mutex);
|
||||
|
||||
@ -456,7 +456,7 @@ static void emit_pte(struct xe_migrate *m,
|
||||
addr = xe_res_dma(cur) & PAGE_MASK;
|
||||
if (is_vram) {
|
||||
/* Is this a 64K PTE entry? */
|
||||
if ((m->eng->vm->flags & XE_VM_FLAG_64K) &&
|
||||
if ((m->q->vm->flags & XE_VM_FLAG_64K) &&
|
||||
!(cur_ofs & (16 * 8 - 1))) {
|
||||
XE_WARN_ON(!IS_ALIGNED(addr, SZ_64K));
|
||||
addr |= XE_PTE_PS64;
|
||||
@ -714,7 +714,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
|
||||
src_L0, ccs_ofs, copy_ccs);
|
||||
|
||||
mutex_lock(&m->job_mutex);
|
||||
job = xe_bb_create_migration_job(m->eng, bb,
|
||||
job = xe_bb_create_migration_job(m->q, bb,
|
||||
xe_migrate_batch_base(m, usm),
|
||||
update_idx);
|
||||
if (IS_ERR(job)) {
|
||||
@ -938,7 +938,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
|
||||
}
|
||||
|
||||
mutex_lock(&m->job_mutex);
|
||||
job = xe_bb_create_migration_job(m->eng, bb,
|
||||
job = xe_bb_create_migration_job(m->q, bb,
|
||||
xe_migrate_batch_base(m, usm),
|
||||
update_idx);
|
||||
if (IS_ERR(job)) {
|
||||
@ -1024,7 +1024,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
|
||||
|
||||
struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
|
||||
{
|
||||
return xe_vm_get(m->eng->vm);
|
||||
return xe_vm_get(m->q->vm);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
|
||||
@ -1106,7 +1106,7 @@ static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
|
||||
* @m: The migrate context.
|
||||
* @vm: The vm we'll be updating.
|
||||
* @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
|
||||
* @eng: The engine to be used for the update or NULL if the default
|
||||
* @q: The exec queue to be used for the update or NULL if the default
|
||||
* migration engine is to be used.
|
||||
* @updates: An array of update descriptors.
|
||||
* @num_updates: Number of descriptors in @updates.
|
||||
@ -1132,7 +1132,7 @@ struct dma_fence *
|
||||
xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
struct xe_vm *vm,
|
||||
struct xe_bo *bo,
|
||||
struct xe_engine *eng,
|
||||
struct xe_exec_queue *q,
|
||||
const struct xe_vm_pgtable_update *updates,
|
||||
u32 num_updates,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
@ -1150,13 +1150,13 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
|
||||
u64 addr;
|
||||
int err = 0;
|
||||
bool usm = !eng && xe->info.supports_usm;
|
||||
bool usm = !q && xe->info.supports_usm;
|
||||
bool first_munmap_rebind = vma &&
|
||||
vma->gpuva.flags & XE_VMA_FIRST_REBIND;
|
||||
struct xe_engine *eng_override = !eng ? m->eng : eng;
|
||||
struct xe_exec_queue *q_override = !q ? m->q : q;
|
||||
|
||||
/* Use the CPU if no in syncs and engine is idle */
|
||||
if (no_in_syncs(syncs, num_syncs) && xe_engine_is_idle(eng_override)) {
|
||||
if (no_in_syncs(syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
|
||||
fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
|
||||
num_updates,
|
||||
first_munmap_rebind,
|
||||
@ -1186,14 +1186,14 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
*/
|
||||
XE_WARN_ON(batch_size >= SZ_128K);
|
||||
|
||||
bb = xe_bb_new(gt, batch_size, !eng && xe->info.supports_usm);
|
||||
bb = xe_bb_new(gt, batch_size, !q && xe->info.supports_usm);
|
||||
if (IS_ERR(bb))
|
||||
return ERR_CAST(bb);
|
||||
|
||||
/* For sysmem PTE's, need to map them in our hole.. */
|
||||
if (!IS_DGFX(xe)) {
|
||||
ppgtt_ofs = NUM_KERNEL_PDE - 1;
|
||||
if (eng) {
|
||||
if (q) {
|
||||
XE_WARN_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT);
|
||||
|
||||
sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
|
||||
@ -1249,10 +1249,10 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
write_pgtable(tile, bb, 0, &updates[i], pt_update);
|
||||
}
|
||||
|
||||
if (!eng)
|
||||
if (!q)
|
||||
mutex_lock(&m->job_mutex);
|
||||
|
||||
job = xe_bb_create_migration_job(eng ?: m->eng, bb,
|
||||
job = xe_bb_create_migration_job(q ?: m->q, bb,
|
||||
xe_migrate_batch_base(m, usm),
|
||||
update_idx);
|
||||
if (IS_ERR(job)) {
|
||||
@ -1295,7 +1295,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
fence = dma_fence_get(&job->drm.s_fence->finished);
|
||||
xe_sched_job_push(job);
|
||||
|
||||
if (!eng)
|
||||
if (!q)
|
||||
mutex_unlock(&m->job_mutex);
|
||||
|
||||
xe_bb_free(bb, fence);
|
||||
@ -1306,7 +1306,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
err_job:
|
||||
xe_sched_job_put(job);
|
||||
err_bb:
|
||||
if (!eng)
|
||||
if (!q)
|
||||
mutex_unlock(&m->job_mutex);
|
||||
xe_bb_free(bb, NULL);
|
||||
err:
|
||||
|
@ -14,7 +14,7 @@ struct ttm_resource;
|
||||
|
||||
struct xe_bo;
|
||||
struct xe_gt;
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
struct xe_migrate;
|
||||
struct xe_migrate_pt_update;
|
||||
struct xe_sync_entry;
|
||||
@ -97,7 +97,7 @@ struct dma_fence *
|
||||
xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
struct xe_vm *vm,
|
||||
struct xe_bo *bo,
|
||||
struct xe_engine *eng,
|
||||
struct xe_exec_queue *q,
|
||||
const struct xe_vm_pgtable_update *updates,
|
||||
u32 num_updates,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
@ -105,5 +105,5 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
|
||||
void xe_migrate_wait(struct xe_migrate *m);
|
||||
|
||||
struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile);
|
||||
struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile);
|
||||
#endif
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
struct xe_gt;
|
||||
|
||||
void xe_mocs_init_early(struct xe_gt *gt);
|
||||
|
@ -15,19 +15,19 @@ static void preempt_fence_work_func(struct work_struct *w)
|
||||
bool cookie = dma_fence_begin_signalling();
|
||||
struct xe_preempt_fence *pfence =
|
||||
container_of(w, typeof(*pfence), preempt_work);
|
||||
struct xe_engine *e = pfence->engine;
|
||||
struct xe_exec_queue *q = pfence->q;
|
||||
|
||||
if (pfence->error)
|
||||
dma_fence_set_error(&pfence->base, pfence->error);
|
||||
else
|
||||
e->ops->suspend_wait(e);
|
||||
q->ops->suspend_wait(q);
|
||||
|
||||
dma_fence_signal(&pfence->base);
|
||||
dma_fence_end_signalling(cookie);
|
||||
|
||||
xe_vm_queue_rebind_worker(e->vm);
|
||||
xe_vm_queue_rebind_worker(q->vm);
|
||||
|
||||
xe_engine_put(e);
|
||||
xe_exec_queue_put(q);
|
||||
}
|
||||
|
||||
static const char *
|
||||
@ -46,9 +46,9 @@ static bool preempt_fence_enable_signaling(struct dma_fence *fence)
|
||||
{
|
||||
struct xe_preempt_fence *pfence =
|
||||
container_of(fence, typeof(*pfence), base);
|
||||
struct xe_engine *e = pfence->engine;
|
||||
struct xe_exec_queue *q = pfence->q;
|
||||
|
||||
pfence->error = e->ops->suspend(e);
|
||||
pfence->error = q->ops->suspend(q);
|
||||
queue_work(system_unbound_wq, &pfence->preempt_work);
|
||||
return true;
|
||||
}
|
||||
@ -104,43 +104,43 @@ void xe_preempt_fence_free(struct xe_preempt_fence *pfence)
|
||||
* xe_preempt_fence_alloc().
|
||||
* @pfence: The struct xe_preempt_fence pointer returned from
|
||||
* xe_preempt_fence_alloc().
|
||||
* @e: The struct xe_engine used for arming.
|
||||
* @q: The struct xe_exec_queue used for arming.
|
||||
* @context: The dma-fence context used for arming.
|
||||
* @seqno: The dma-fence seqno used for arming.
|
||||
*
|
||||
* Inserts the preempt fence into @context's timeline, takes @link off any
|
||||
* list, and registers the struct xe_engine as the xe_engine to be preempted.
|
||||
* list, and registers the struct xe_exec_queue as the xe_engine to be preempted.
|
||||
*
|
||||
* Return: A pointer to a struct dma_fence embedded into the preempt fence.
|
||||
* This function doesn't error.
|
||||
*/
|
||||
struct dma_fence *
|
||||
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_engine *e,
|
||||
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
|
||||
u64 context, u32 seqno)
|
||||
{
|
||||
list_del_init(&pfence->link);
|
||||
pfence->engine = xe_engine_get(e);
|
||||
pfence->q = xe_exec_queue_get(q);
|
||||
dma_fence_init(&pfence->base, &preempt_fence_ops,
|
||||
&e->compute.lock, context, seqno);
|
||||
&q->compute.lock, context, seqno);
|
||||
|
||||
return &pfence->base;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_preempt_fence_create() - Helper to create and arm a preempt fence.
|
||||
* @e: The struct xe_engine used for arming.
|
||||
* @q: The struct xe_exec_queue used for arming.
|
||||
* @context: The dma-fence context used for arming.
|
||||
* @seqno: The dma-fence seqno used for arming.
|
||||
*
|
||||
* Allocates and inserts the preempt fence into @context's timeline,
|
||||
* and registers @e as the struct xe_engine to be preempted.
|
||||
* and registers @e as the struct xe_exec_queue to be preempted.
|
||||
*
|
||||
* Return: A pointer to the resulting struct dma_fence on success. An error
|
||||
* pointer on error. In particular if allocation fails it returns
|
||||
* ERR_PTR(-ENOMEM);
|
||||
*/
|
||||
struct dma_fence *
|
||||
xe_preempt_fence_create(struct xe_engine *e,
|
||||
xe_preempt_fence_create(struct xe_exec_queue *q,
|
||||
u64 context, u32 seqno)
|
||||
{
|
||||
struct xe_preempt_fence *pfence;
|
||||
@ -149,7 +149,7 @@ xe_preempt_fence_create(struct xe_engine *e,
|
||||
if (IS_ERR(pfence))
|
||||
return ERR_CAST(pfence);
|
||||
|
||||
return xe_preempt_fence_arm(pfence, e, context, seqno);
|
||||
return xe_preempt_fence_arm(pfence, q, context, seqno);
|
||||
}
|
||||
|
||||
bool xe_fence_is_xe_preempt(const struct dma_fence *fence)
|
||||
|
@ -11,7 +11,7 @@
|
||||
struct list_head;
|
||||
|
||||
struct dma_fence *
|
||||
xe_preempt_fence_create(struct xe_engine *e,
|
||||
xe_preempt_fence_create(struct xe_exec_queue *q,
|
||||
u64 context, u32 seqno);
|
||||
|
||||
struct xe_preempt_fence *xe_preempt_fence_alloc(void);
|
||||
@ -19,7 +19,7 @@ struct xe_preempt_fence *xe_preempt_fence_alloc(void);
|
||||
void xe_preempt_fence_free(struct xe_preempt_fence *pfence);
|
||||
|
||||
struct dma_fence *
|
||||
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_engine *e,
|
||||
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
|
||||
u64 context, u32 seqno);
|
||||
|
||||
static inline struct xe_preempt_fence *
|
||||
|
@ -9,12 +9,11 @@
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
|
||||
/**
|
||||
* struct xe_preempt_fence - XE preempt fence
|
||||
*
|
||||
* A preemption fence which suspends the execution of an xe_engine on the
|
||||
* hardware and triggers a callback once the xe_engine is complete.
|
||||
*/
|
||||
struct xe_preempt_fence {
|
||||
@ -22,8 +21,8 @@ struct xe_preempt_fence {
|
||||
struct dma_fence base;
|
||||
/** @link: link into list of pending preempt fences */
|
||||
struct list_head link;
|
||||
/** @engine: xe engine for this preempt fence */
|
||||
struct xe_engine *engine;
|
||||
/** @q: exec queue for this preempt fence */
|
||||
struct xe_exec_queue *q;
|
||||
/** @preempt_work: work struct which issues preemption */
|
||||
struct work_struct preempt_work;
|
||||
/** @error: preempt fence is in error state */
|
||||
|
@ -1307,7 +1307,7 @@ static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
|
||||
* address range.
|
||||
* @tile: The tile to bind for.
|
||||
* @vma: The vma to bind.
|
||||
* @e: The engine with which to do pipelined page-table updates.
|
||||
* @q: The exec_queue with which to do pipelined page-table updates.
|
||||
* @syncs: Entries to sync on before binding the built tree to the live vm tree.
|
||||
* @num_syncs: Number of @sync entries.
|
||||
* @rebind: Whether we're rebinding this vma to the same address range without
|
||||
@ -1325,7 +1325,7 @@ static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
|
||||
* on success, an error pointer on error.
|
||||
*/
|
||||
struct dma_fence *
|
||||
__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
|
||||
__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
bool rebind)
|
||||
{
|
||||
@ -1351,7 +1351,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
|
||||
|
||||
vm_dbg(&xe_vma_vm(vma)->xe->drm,
|
||||
"Preparing bind, with range [%llx...%llx) engine %p.\n",
|
||||
xe_vma_start(vma), xe_vma_end(vma) - 1, e);
|
||||
xe_vma_start(vma), xe_vma_end(vma) - 1, q);
|
||||
|
||||
err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind);
|
||||
if (err)
|
||||
@ -1388,7 +1388,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
|
||||
}
|
||||
|
||||
fence = xe_migrate_update_pgtables(tile->migrate,
|
||||
vm, xe_vma_bo(vma), e,
|
||||
vm, xe_vma_bo(vma), q,
|
||||
entries, num_entries,
|
||||
syncs, num_syncs,
|
||||
&bind_pt_update.base);
|
||||
@ -1663,7 +1663,7 @@ static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
|
||||
* address range.
|
||||
* @tile: The tile to unbind for.
|
||||
* @vma: The vma to unbind.
|
||||
* @e: The engine with which to do pipelined page-table updates.
|
||||
* @q: The exec_queue with which to do pipelined page-table updates.
|
||||
* @syncs: Entries to sync on before disconnecting the tree to be destroyed.
|
||||
* @num_syncs: Number of @sync entries.
|
||||
*
|
||||
@ -1679,7 +1679,7 @@ static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
|
||||
* on success, an error pointer on error.
|
||||
*/
|
||||
struct dma_fence *
|
||||
__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
|
||||
__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs)
|
||||
{
|
||||
struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
|
||||
@ -1704,7 +1704,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
|
||||
|
||||
vm_dbg(&xe_vma_vm(vma)->xe->drm,
|
||||
"Preparing unbind, with range [%llx...%llx) engine %p.\n",
|
||||
xe_vma_start(vma), xe_vma_end(vma) - 1, e);
|
||||
xe_vma_start(vma), xe_vma_end(vma) - 1, q);
|
||||
|
||||
num_entries = xe_pt_stage_unbind(tile, vma, entries);
|
||||
XE_WARN_ON(num_entries > ARRAY_SIZE(entries));
|
||||
@ -1729,8 +1729,8 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
|
||||
* lower level, because it needs to be more conservative.
|
||||
*/
|
||||
fence = xe_migrate_update_pgtables(tile->migrate,
|
||||
vm, NULL, e ? e :
|
||||
vm->eng[tile->id],
|
||||
vm, NULL, q ? q :
|
||||
vm->q[tile->id],
|
||||
entries, num_entries,
|
||||
syncs, num_syncs,
|
||||
&unbind_pt_update.base);
|
||||
|
@ -12,7 +12,7 @@
|
||||
struct dma_fence;
|
||||
struct xe_bo;
|
||||
struct xe_device;
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
struct xe_sync_entry;
|
||||
struct xe_tile;
|
||||
struct xe_vm;
|
||||
@ -35,12 +35,12 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
|
||||
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
|
||||
|
||||
struct dma_fence *
|
||||
__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
|
||||
__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
bool rebind);
|
||||
|
||||
struct dma_fence *
|
||||
__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
|
||||
__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs);
|
||||
|
||||
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
|
||||
|
@ -203,7 +203,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
|
||||
config->info[XE_QUERY_CONFIG_MEM_REGION_COUNT] =
|
||||
hweight_long(xe->info.mem_region_mask);
|
||||
config->info[XE_QUERY_CONFIG_MAX_ENGINE_PRIORITY] =
|
||||
xe_engine_device_get_max_priority(xe);
|
||||
xe_exec_queue_device_get_max_priority(xe);
|
||||
|
||||
if (copy_to_user(query_ptr, config, size)) {
|
||||
kfree(config);
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "regs/xe_gt_regs.h"
|
||||
#include "regs/xe_lrc_layout.h"
|
||||
#include "regs/xe_regs.h"
|
||||
#include "xe_engine_types.h"
|
||||
#include "xe_exec_queue_types.h"
|
||||
#include "xe_gt.h"
|
||||
#include "xe_lrc.h"
|
||||
#include "xe_macros.h"
|
||||
@ -156,7 +156,7 @@ static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
|
||||
|
||||
static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
|
||||
{
|
||||
struct xe_gt *gt = job->engine->gt;
|
||||
struct xe_gt *gt = job->q->gt;
|
||||
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
|
||||
u32 flags;
|
||||
|
||||
@ -172,7 +172,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
|
||||
|
||||
if (lacks_render)
|
||||
flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
|
||||
else if (job->engine->class == XE_ENGINE_CLASS_COMPUTE)
|
||||
else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
|
||||
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
|
||||
|
||||
dw[i++] = GFX_OP_PIPE_CONTROL(6) | PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
|
||||
@ -202,7 +202,7 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
|
||||
|
||||
static u32 get_ppgtt_flag(struct xe_sched_job *job)
|
||||
{
|
||||
return !(job->engine->flags & ENGINE_FLAG_WA) ? BIT(8) : 0;
|
||||
return !(job->q->flags & EXEC_QUEUE_FLAG_WA) ? BIT(8) : 0;
|
||||
}
|
||||
|
||||
static void __emit_job_gen12_copy(struct xe_sched_job *job, struct xe_lrc *lrc,
|
||||
@ -210,7 +210,7 @@ static void __emit_job_gen12_copy(struct xe_sched_job *job, struct xe_lrc *lrc,
|
||||
{
|
||||
u32 dw[MAX_JOB_SIZE_DW], i = 0;
|
||||
u32 ppgtt_flag = get_ppgtt_flag(job);
|
||||
struct xe_vm *vm = job->engine->vm;
|
||||
struct xe_vm *vm = job->q->vm;
|
||||
|
||||
if (vm->batch_invalidate_tlb) {
|
||||
dw[i++] = preparser_disable(true);
|
||||
@ -255,10 +255,10 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
|
||||
{
|
||||
u32 dw[MAX_JOB_SIZE_DW], i = 0;
|
||||
u32 ppgtt_flag = get_ppgtt_flag(job);
|
||||
struct xe_gt *gt = job->engine->gt;
|
||||
struct xe_gt *gt = job->q->gt;
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
bool decode = job->engine->class == XE_ENGINE_CLASS_VIDEO_DECODE;
|
||||
struct xe_vm *vm = job->engine->vm;
|
||||
bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
|
||||
struct xe_vm *vm = job->q->vm;
|
||||
|
||||
dw[i++] = preparser_disable(true);
|
||||
|
||||
@ -302,16 +302,16 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
|
||||
{
|
||||
u32 dw[MAX_JOB_SIZE_DW], i = 0;
|
||||
u32 ppgtt_flag = get_ppgtt_flag(job);
|
||||
struct xe_gt *gt = job->engine->gt;
|
||||
struct xe_gt *gt = job->q->gt;
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
|
||||
struct xe_vm *vm = job->engine->vm;
|
||||
struct xe_vm *vm = job->q->vm;
|
||||
u32 mask_flags = 0;
|
||||
|
||||
dw[i++] = preparser_disable(true);
|
||||
if (lacks_render)
|
||||
mask_flags = PIPE_CONTROL_3D_ARCH_FLAGS;
|
||||
else if (job->engine->class == XE_ENGINE_CLASS_COMPUTE)
|
||||
else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
|
||||
mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
|
||||
|
||||
/* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
|
||||
@ -378,14 +378,14 @@ static void emit_job_gen12_copy(struct xe_sched_job *job)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (xe_sched_job_is_migration(job->engine)) {
|
||||
emit_migration_job_gen12(job, job->engine->lrc,
|
||||
if (xe_sched_job_is_migration(job->q)) {
|
||||
emit_migration_job_gen12(job, job->q->lrc,
|
||||
xe_sched_job_seqno(job));
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < job->engine->width; ++i)
|
||||
__emit_job_gen12_copy(job, job->engine->lrc + i,
|
||||
for (i = 0; i < job->q->width; ++i)
|
||||
__emit_job_gen12_copy(job, job->q->lrc + i,
|
||||
job->batch_addr[i],
|
||||
xe_sched_job_seqno(job));
|
||||
}
|
||||
@ -395,8 +395,8 @@ static void emit_job_gen12_video(struct xe_sched_job *job)
|
||||
int i;
|
||||
|
||||
/* FIXME: Not doing parallel handshake for now */
|
||||
for (i = 0; i < job->engine->width; ++i)
|
||||
__emit_job_gen12_video(job, job->engine->lrc + i,
|
||||
for (i = 0; i < job->q->width; ++i)
|
||||
__emit_job_gen12_video(job, job->q->lrc + i,
|
||||
job->batch_addr[i],
|
||||
xe_sched_job_seqno(job));
|
||||
}
|
||||
@ -405,8 +405,8 @@ static void emit_job_gen12_render_compute(struct xe_sched_job *job)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < job->engine->width; ++i)
|
||||
__emit_job_gen12_render_compute(job, job->engine->lrc + i,
|
||||
for (i = 0; i < job->q->width; ++i)
|
||||
__emit_job_gen12_render_compute(job, job->q->lrc + i,
|
||||
job->batch_addr[i],
|
||||
xe_sched_job_seqno(job));
|
||||
}
|
||||
|
@ -57,58 +57,58 @@ static struct xe_sched_job *job_alloc(bool parallel)
|
||||
xe_sched_job_slab, GFP_KERNEL);
|
||||
}
|
||||
|
||||
bool xe_sched_job_is_migration(struct xe_engine *e)
|
||||
bool xe_sched_job_is_migration(struct xe_exec_queue *q)
|
||||
{
|
||||
return e->vm && (e->vm->flags & XE_VM_FLAG_MIGRATION) &&
|
||||
!(e->flags & ENGINE_FLAG_WA);
|
||||
return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION) &&
|
||||
!(q->flags & EXEC_QUEUE_FLAG_WA);
|
||||
}
|
||||
|
||||
static void job_free(struct xe_sched_job *job)
|
||||
{
|
||||
struct xe_engine *e = job->engine;
|
||||
bool is_migration = xe_sched_job_is_migration(e);
|
||||
struct xe_exec_queue *q = job->q;
|
||||
bool is_migration = xe_sched_job_is_migration(q);
|
||||
|
||||
kmem_cache_free(xe_engine_is_parallel(job->engine) || is_migration ?
|
||||
kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ?
|
||||
xe_sched_job_parallel_slab : xe_sched_job_slab, job);
|
||||
}
|
||||
|
||||
static struct xe_device *job_to_xe(struct xe_sched_job *job)
|
||||
{
|
||||
return gt_to_xe(job->engine->gt);
|
||||
return gt_to_xe(job->q->gt);
|
||||
}
|
||||
|
||||
struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
|
||||
struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
|
||||
u64 *batch_addr)
|
||||
{
|
||||
struct xe_sched_job *job;
|
||||
struct dma_fence **fences;
|
||||
bool is_migration = xe_sched_job_is_migration(e);
|
||||
bool is_migration = xe_sched_job_is_migration(q);
|
||||
int err;
|
||||
int i, j;
|
||||
u32 width;
|
||||
|
||||
/* Migration and kernel engines have their own locking */
|
||||
if (!(e->flags & (ENGINE_FLAG_KERNEL | ENGINE_FLAG_VM |
|
||||
ENGINE_FLAG_WA))) {
|
||||
lockdep_assert_held(&e->vm->lock);
|
||||
if (!xe_vm_no_dma_fences(e->vm))
|
||||
xe_vm_assert_held(e->vm);
|
||||
if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM |
|
||||
EXEC_QUEUE_FLAG_WA))) {
|
||||
lockdep_assert_held(&q->vm->lock);
|
||||
if (!xe_vm_no_dma_fences(q->vm))
|
||||
xe_vm_assert_held(q->vm);
|
||||
}
|
||||
|
||||
job = job_alloc(xe_engine_is_parallel(e) || is_migration);
|
||||
job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration);
|
||||
if (!job)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
job->engine = e;
|
||||
job->q = q;
|
||||
kref_init(&job->refcount);
|
||||
xe_engine_get(job->engine);
|
||||
xe_exec_queue_get(job->q);
|
||||
|
||||
err = drm_sched_job_init(&job->drm, e->entity, 1, NULL);
|
||||
err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
if (!xe_engine_is_parallel(e)) {
|
||||
job->fence = xe_lrc_create_seqno_fence(e->lrc);
|
||||
if (!xe_exec_queue_is_parallel(q)) {
|
||||
job->fence = xe_lrc_create_seqno_fence(q->lrc);
|
||||
if (IS_ERR(job->fence)) {
|
||||
err = PTR_ERR(job->fence);
|
||||
goto err_sched_job;
|
||||
@ -116,38 +116,38 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
|
||||
} else {
|
||||
struct dma_fence_array *cf;
|
||||
|
||||
fences = kmalloc_array(e->width, sizeof(*fences), GFP_KERNEL);
|
||||
fences = kmalloc_array(q->width, sizeof(*fences), GFP_KERNEL);
|
||||
if (!fences) {
|
||||
err = -ENOMEM;
|
||||
goto err_sched_job;
|
||||
}
|
||||
|
||||
for (j = 0; j < e->width; ++j) {
|
||||
fences[j] = xe_lrc_create_seqno_fence(e->lrc + j);
|
||||
for (j = 0; j < q->width; ++j) {
|
||||
fences[j] = xe_lrc_create_seqno_fence(q->lrc + j);
|
||||
if (IS_ERR(fences[j])) {
|
||||
err = PTR_ERR(fences[j]);
|
||||
goto err_fences;
|
||||
}
|
||||
}
|
||||
|
||||
cf = dma_fence_array_create(e->width, fences,
|
||||
e->parallel.composite_fence_ctx,
|
||||
e->parallel.composite_fence_seqno++,
|
||||
cf = dma_fence_array_create(q->width, fences,
|
||||
q->parallel.composite_fence_ctx,
|
||||
q->parallel.composite_fence_seqno++,
|
||||
false);
|
||||
if (!cf) {
|
||||
--e->parallel.composite_fence_seqno;
|
||||
--q->parallel.composite_fence_seqno;
|
||||
err = -ENOMEM;
|
||||
goto err_fences;
|
||||
}
|
||||
|
||||
/* Sanity check */
|
||||
for (j = 0; j < e->width; ++j)
|
||||
for (j = 0; j < q->width; ++j)
|
||||
XE_WARN_ON(cf->base.seqno != fences[j]->seqno);
|
||||
|
||||
job->fence = &cf->base;
|
||||
}
|
||||
|
||||
width = e->width;
|
||||
width = q->width;
|
||||
if (is_migration)
|
||||
width = 2;
|
||||
|
||||
@ -155,7 +155,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
|
||||
job->batch_addr[i] = batch_addr[i];
|
||||
|
||||
/* All other jobs require a VM to be open which has a ref */
|
||||
if (unlikely(e->flags & ENGINE_FLAG_KERNEL))
|
||||
if (unlikely(q->flags & EXEC_QUEUE_FLAG_KERNEL))
|
||||
xe_device_mem_access_get(job_to_xe(job));
|
||||
xe_device_assert_mem_access(job_to_xe(job));
|
||||
|
||||
@ -164,14 +164,14 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
|
||||
|
||||
err_fences:
|
||||
for (j = j - 1; j >= 0; --j) {
|
||||
--e->lrc[j].fence_ctx.next_seqno;
|
||||
--q->lrc[j].fence_ctx.next_seqno;
|
||||
dma_fence_put(fences[j]);
|
||||
}
|
||||
kfree(fences);
|
||||
err_sched_job:
|
||||
drm_sched_job_cleanup(&job->drm);
|
||||
err_free:
|
||||
xe_engine_put(e);
|
||||
xe_exec_queue_put(q);
|
||||
job_free(job);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@ -188,9 +188,9 @@ void xe_sched_job_destroy(struct kref *ref)
|
||||
struct xe_sched_job *job =
|
||||
container_of(ref, struct xe_sched_job, refcount);
|
||||
|
||||
if (unlikely(job->engine->flags & ENGINE_FLAG_KERNEL))
|
||||
if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL))
|
||||
xe_device_mem_access_put(job_to_xe(job));
|
||||
xe_engine_put(job->engine);
|
||||
xe_exec_queue_put(job->q);
|
||||
dma_fence_put(job->fence);
|
||||
drm_sched_job_cleanup(&job->drm);
|
||||
job_free(job);
|
||||
@ -222,12 +222,12 @@ void xe_sched_job_set_error(struct xe_sched_job *job, int error)
|
||||
trace_xe_sched_job_set_error(job);
|
||||
|
||||
dma_fence_enable_sw_signaling(job->fence);
|
||||
xe_hw_fence_irq_run(job->engine->fence_irq);
|
||||
xe_hw_fence_irq_run(job->q->fence_irq);
|
||||
}
|
||||
|
||||
bool xe_sched_job_started(struct xe_sched_job *job)
|
||||
{
|
||||
struct xe_lrc *lrc = job->engine->lrc;
|
||||
struct xe_lrc *lrc = job->q->lrc;
|
||||
|
||||
return !__dma_fence_is_later(xe_sched_job_seqno(job),
|
||||
xe_lrc_start_seqno(lrc),
|
||||
@ -236,7 +236,7 @@ bool xe_sched_job_started(struct xe_sched_job *job)
|
||||
|
||||
bool xe_sched_job_completed(struct xe_sched_job *job)
|
||||
{
|
||||
struct xe_lrc *lrc = job->engine->lrc;
|
||||
struct xe_lrc *lrc = job->q->lrc;
|
||||
|
||||
/*
|
||||
* Can safely check just LRC[0] seqno as that is last seqno written when
|
||||
|
@ -14,7 +14,7 @@
|
||||
int xe_sched_job_module_init(void);
|
||||
void xe_sched_job_module_exit(void);
|
||||
|
||||
struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
|
||||
struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
|
||||
u64 *batch_addr);
|
||||
void xe_sched_job_destroy(struct kref *ref);
|
||||
|
||||
@ -71,6 +71,6 @@ xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
|
||||
job->migrate_flush_flags = flags;
|
||||
}
|
||||
|
||||
bool xe_sched_job_is_migration(struct xe_engine *e);
|
||||
bool xe_sched_job_is_migration(struct xe_exec_queue *q);
|
||||
|
||||
#endif
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
#include <drm/gpu_scheduler.h>
|
||||
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
|
||||
/**
|
||||
* struct xe_sched_job - XE schedule job (batch buffer tracking)
|
||||
@ -18,8 +18,8 @@ struct xe_engine;
|
||||
struct xe_sched_job {
|
||||
/** @drm: base DRM scheduler job */
|
||||
struct drm_sched_job drm;
|
||||
/** @engine: XE submission engine */
|
||||
struct xe_engine *engine;
|
||||
/** @q: Exec queue */
|
||||
struct xe_exec_queue *q;
|
||||
/** @refcount: ref count of this job */
|
||||
struct kref refcount;
|
||||
/**
|
||||
|
@ -13,11 +13,11 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "xe_bo_types.h"
|
||||
#include "xe_engine_types.h"
|
||||
#include "xe_exec_queue_types.h"
|
||||
#include "xe_gpu_scheduler_types.h"
|
||||
#include "xe_gt_tlb_invalidation_types.h"
|
||||
#include "xe_gt_types.h"
|
||||
#include "xe_guc_engine_types.h"
|
||||
#include "xe_guc_exec_queue_types.h"
|
||||
#include "xe_sched_job.h"
|
||||
#include "xe_vm.h"
|
||||
|
||||
@ -105,9 +105,9 @@ DEFINE_EVENT(xe_bo, xe_bo_move,
|
||||
TP_ARGS(bo)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(xe_engine,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e),
|
||||
DECLARE_EVENT_CLASS(xe_exec_queue,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(enum xe_engine_class, class)
|
||||
@ -120,13 +120,13 @@ DECLARE_EVENT_CLASS(xe_engine,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->class = e->class;
|
||||
__entry->logical_mask = e->logical_mask;
|
||||
__entry->gt_id = e->gt->info.id;
|
||||
__entry->width = e->width;
|
||||
__entry->guc_id = e->guc->id;
|
||||
__entry->guc_state = atomic_read(&e->guc->state);
|
||||
__entry->flags = e->flags;
|
||||
__entry->class = q->class;
|
||||
__entry->logical_mask = q->logical_mask;
|
||||
__entry->gt_id = q->gt->info.id;
|
||||
__entry->width = q->width;
|
||||
__entry->guc_id = q->guc->id;
|
||||
__entry->guc_state = atomic_read(&q->guc->state);
|
||||
__entry->flags = q->flags;
|
||||
),
|
||||
|
||||
TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
|
||||
@ -135,94 +135,94 @@ DECLARE_EVENT_CLASS(xe_engine,
|
||||
__entry->guc_state, __entry->flags)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_create,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_supress_resume,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_submit,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_scheduling_enable,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_scheduling_disable,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_scheduling_done,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_register,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_deregister,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_deregister_done,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_close,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_kill,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_cleanup_entity,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_destroy,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_reset,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_memory_cat_error,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_stop,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_resubmit,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_engine, xe_engine_lr_cleanup,
|
||||
TP_PROTO(struct xe_engine *e),
|
||||
TP_ARGS(e)
|
||||
DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
|
||||
TP_PROTO(struct xe_exec_queue *q),
|
||||
TP_ARGS(q)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(xe_sched_job,
|
||||
@ -241,10 +241,10 @@ DECLARE_EVENT_CLASS(xe_sched_job,
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->seqno = xe_sched_job_seqno(job);
|
||||
__entry->guc_id = job->engine->guc->id;
|
||||
__entry->guc_id = job->q->guc->id;
|
||||
__entry->guc_state =
|
||||
atomic_read(&job->engine->guc->state);
|
||||
__entry->flags = job->engine->flags;
|
||||
atomic_read(&job->q->guc->state);
|
||||
__entry->flags = job->q->flags;
|
||||
__entry->error = job->fence->error;
|
||||
__entry->fence = (unsigned long)job->fence;
|
||||
__entry->batch_addr = (u64)job->batch_addr[0];
|
||||
@ -303,7 +303,7 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
|
||||
TP_fast_assign(
|
||||
__entry->opcode = msg->opcode;
|
||||
__entry->guc_id =
|
||||
((struct xe_engine *)msg->private_data)->guc->id;
|
||||
((struct xe_exec_queue *)msg->private_data)->guc->id;
|
||||
),
|
||||
|
||||
TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
|
||||
|
@ -165,15 +165,15 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
|
||||
|
||||
static bool preempt_fences_waiting(struct xe_vm *vm)
|
||||
{
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
lockdep_assert_held(&vm->lock);
|
||||
xe_vm_assert_held(vm);
|
||||
|
||||
list_for_each_entry(e, &vm->preempt.engines, compute.link) {
|
||||
if (!e->compute.pfence || (e->compute.pfence &&
|
||||
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&e->compute.pfence->flags))) {
|
||||
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
|
||||
if (!q->compute.pfence ||
|
||||
(q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&q->compute.pfence->flags))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -195,10 +195,10 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
|
||||
lockdep_assert_held(&vm->lock);
|
||||
xe_vm_assert_held(vm);
|
||||
|
||||
if (*count >= vm->preempt.num_engines)
|
||||
if (*count >= vm->preempt.num_exec_queues)
|
||||
return 0;
|
||||
|
||||
for (; *count < vm->preempt.num_engines; ++(*count)) {
|
||||
for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
|
||||
struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
|
||||
|
||||
if (IS_ERR(pfence))
|
||||
@ -212,18 +212,18 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
|
||||
|
||||
static int wait_for_existing_preempt_fences(struct xe_vm *vm)
|
||||
{
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
xe_vm_assert_held(vm);
|
||||
|
||||
list_for_each_entry(e, &vm->preempt.engines, compute.link) {
|
||||
if (e->compute.pfence) {
|
||||
long timeout = dma_fence_wait(e->compute.pfence, false);
|
||||
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
|
||||
if (q->compute.pfence) {
|
||||
long timeout = dma_fence_wait(q->compute.pfence, false);
|
||||
|
||||
if (timeout < 0)
|
||||
return -ETIME;
|
||||
dma_fence_put(e->compute.pfence);
|
||||
e->compute.pfence = NULL;
|
||||
dma_fence_put(q->compute.pfence);
|
||||
q->compute.pfence = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -232,11 +232,11 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
|
||||
|
||||
static bool xe_vm_is_idle(struct xe_vm *vm)
|
||||
{
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
xe_vm_assert_held(vm);
|
||||
list_for_each_entry(e, &vm->preempt.engines, compute.link) {
|
||||
if (!xe_engine_is_idle(e))
|
||||
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
|
||||
if (!xe_exec_queue_is_idle(q))
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -246,36 +246,36 @@ static bool xe_vm_is_idle(struct xe_vm *vm)
|
||||
static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
|
||||
{
|
||||
struct list_head *link;
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
list_for_each_entry(e, &vm->preempt.engines, compute.link) {
|
||||
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
link = list->next;
|
||||
XE_WARN_ON(link == list);
|
||||
|
||||
fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
|
||||
e, e->compute.context,
|
||||
++e->compute.seqno);
|
||||
dma_fence_put(e->compute.pfence);
|
||||
e->compute.pfence = fence;
|
||||
q, q->compute.context,
|
||||
++q->compute.seqno);
|
||||
dma_fence_put(q->compute.pfence);
|
||||
q->compute.pfence = fence;
|
||||
}
|
||||
}
|
||||
|
||||
static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
|
||||
{
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
struct ww_acquire_ctx ww;
|
||||
int err;
|
||||
|
||||
err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
|
||||
err = xe_bo_lock(bo, &ww, vm->preempt.num_exec_queues, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
list_for_each_entry(e, &vm->preempt.engines, compute.link)
|
||||
if (e->compute.pfence) {
|
||||
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
|
||||
if (q->compute.pfence) {
|
||||
dma_resv_add_fence(bo->ttm.base.resv,
|
||||
e->compute.pfence,
|
||||
q->compute.pfence,
|
||||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
}
|
||||
|
||||
@ -304,22 +304,22 @@ void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
|
||||
|
||||
static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
|
||||
{
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
lockdep_assert_held(&vm->lock);
|
||||
xe_vm_assert_held(vm);
|
||||
|
||||
list_for_each_entry(e, &vm->preempt.engines, compute.link) {
|
||||
e->ops->resume(e);
|
||||
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
|
||||
q->ops->resume(q);
|
||||
|
||||
dma_resv_add_fence(xe_vm_resv(vm), e->compute.pfence,
|
||||
dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
|
||||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
xe_vm_fence_all_extobjs(vm, e->compute.pfence,
|
||||
xe_vm_fence_all_extobjs(vm, q->compute.pfence,
|
||||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
}
|
||||
}
|
||||
|
||||
int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
|
||||
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
|
||||
{
|
||||
struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
|
||||
struct ttm_validate_buffer *tv;
|
||||
@ -337,16 +337,16 @@ int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
|
||||
if (err)
|
||||
goto out_unlock_outer;
|
||||
|
||||
pfence = xe_preempt_fence_create(e, e->compute.context,
|
||||
++e->compute.seqno);
|
||||
pfence = xe_preempt_fence_create(q, q->compute.context,
|
||||
++q->compute.seqno);
|
||||
if (!pfence) {
|
||||
err = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
list_add(&e->compute.link, &vm->preempt.engines);
|
||||
++vm->preempt.num_engines;
|
||||
e->compute.pfence = pfence;
|
||||
list_add(&q->compute.link, &vm->preempt.exec_queues);
|
||||
++vm->preempt.num_exec_queues;
|
||||
q->compute.pfence = pfence;
|
||||
|
||||
down_read(&vm->userptr.notifier_lock);
|
||||
|
||||
@ -518,7 +518,7 @@ void xe_vm_unlock_dma_resv(struct xe_vm *vm,
|
||||
static void xe_vm_kill(struct xe_vm *vm)
|
||||
{
|
||||
struct ww_acquire_ctx ww;
|
||||
struct xe_engine *e;
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
lockdep_assert_held(&vm->lock);
|
||||
|
||||
@ -526,8 +526,8 @@ static void xe_vm_kill(struct xe_vm *vm)
|
||||
vm->flags |= XE_VM_FLAG_BANNED;
|
||||
trace_xe_vm_kill(vm);
|
||||
|
||||
list_for_each_entry(e, &vm->preempt.engines, compute.link)
|
||||
e->ops->kill(e);
|
||||
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
|
||||
q->ops->kill(q);
|
||||
xe_vm_unlock(vm, &ww);
|
||||
|
||||
/* TODO: Inform user the VM is banned */
|
||||
@ -584,7 +584,7 @@ static void preempt_rebind_work_func(struct work_struct *w)
|
||||
}
|
||||
|
||||
err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
|
||||
false, vm->preempt.num_engines);
|
||||
false, vm->preempt.num_exec_queues);
|
||||
if (err)
|
||||
goto out_unlock_outer;
|
||||
|
||||
@ -833,7 +833,7 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
|
||||
}
|
||||
|
||||
static struct dma_fence *
|
||||
xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
|
||||
xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
bool first_op, bool last_op);
|
||||
|
||||
@ -1241,7 +1241,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
|
||||
|
||||
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
|
||||
|
||||
INIT_LIST_HEAD(&vm->preempt.engines);
|
||||
INIT_LIST_HEAD(&vm->preempt.exec_queues);
|
||||
vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
|
||||
|
||||
for_each_tile(tile, xe, id)
|
||||
@ -1320,21 +1320,21 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
|
||||
for_each_tile(tile, xe, id) {
|
||||
struct xe_gt *gt = tile->primary_gt;
|
||||
struct xe_vm *migrate_vm;
|
||||
struct xe_engine *eng;
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
if (!vm->pt_root[id])
|
||||
continue;
|
||||
|
||||
migrate_vm = xe_migrate_get_vm(tile->migrate);
|
||||
eng = xe_engine_create_class(xe, gt, migrate_vm,
|
||||
XE_ENGINE_CLASS_COPY,
|
||||
ENGINE_FLAG_VM);
|
||||
q = xe_exec_queue_create_class(xe, gt, migrate_vm,
|
||||
XE_ENGINE_CLASS_COPY,
|
||||
EXEC_QUEUE_FLAG_VM);
|
||||
xe_vm_put(migrate_vm);
|
||||
if (IS_ERR(eng)) {
|
||||
err = PTR_ERR(eng);
|
||||
if (IS_ERR(q)) {
|
||||
err = PTR_ERR(q);
|
||||
goto err_close;
|
||||
}
|
||||
vm->eng[id] = eng;
|
||||
vm->q[id] = q;
|
||||
number_tiles++;
|
||||
}
|
||||
}
|
||||
@ -1422,7 +1422,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
|
||||
struct drm_gpuva *gpuva, *next;
|
||||
u8 id;
|
||||
|
||||
XE_WARN_ON(vm->preempt.num_engines);
|
||||
XE_WARN_ON(vm->preempt.num_exec_queues);
|
||||
|
||||
xe_vm_close(vm);
|
||||
flush_async_ops(vm);
|
||||
@ -1430,10 +1430,10 @@ void xe_vm_close_and_put(struct xe_vm *vm)
|
||||
flush_work(&vm->preempt.rebind_work);
|
||||
|
||||
for_each_tile(tile, xe, id) {
|
||||
if (vm->eng[id]) {
|
||||
xe_engine_kill(vm->eng[id]);
|
||||
xe_engine_put(vm->eng[id]);
|
||||
vm->eng[id] = NULL;
|
||||
if (vm->q[id]) {
|
||||
xe_exec_queue_kill(vm->q[id]);
|
||||
xe_exec_queue_put(vm->q[id]);
|
||||
vm->q[id] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1573,7 +1573,7 @@ u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
|
||||
}
|
||||
|
||||
static struct dma_fence *
|
||||
xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
|
||||
xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
bool first_op, bool last_op)
|
||||
{
|
||||
@ -1600,7 +1600,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
|
||||
if (!(vma->tile_present & BIT(id)))
|
||||
goto next;
|
||||
|
||||
fence = __xe_pt_unbind_vma(tile, vma, e, first_op ? syncs : NULL,
|
||||
fence = __xe_pt_unbind_vma(tile, vma, q, first_op ? syncs : NULL,
|
||||
first_op ? num_syncs : 0);
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
@ -1611,8 +1611,8 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
|
||||
fences[cur_fence++] = fence;
|
||||
|
||||
next:
|
||||
if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
|
||||
e = list_next_entry(e, multi_gt_list);
|
||||
if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
|
||||
q = list_next_entry(q, multi_gt_list);
|
||||
}
|
||||
|
||||
if (fences) {
|
||||
@ -1648,7 +1648,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
|
||||
}
|
||||
|
||||
static struct dma_fence *
|
||||
xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
|
||||
xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
bool first_op, bool last_op)
|
||||
{
|
||||
@ -1675,7 +1675,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
|
||||
if (!(vma->tile_mask & BIT(id)))
|
||||
goto next;
|
||||
|
||||
fence = __xe_pt_bind_vma(tile, vma, e ? e : vm->eng[id],
|
||||
fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
|
||||
first_op ? syncs : NULL,
|
||||
first_op ? num_syncs : 0,
|
||||
vma->tile_present & BIT(id));
|
||||
@ -1688,8 +1688,8 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
|
||||
fences[cur_fence++] = fence;
|
||||
|
||||
next:
|
||||
if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
|
||||
e = list_next_entry(e, multi_gt_list);
|
||||
if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
|
||||
q = list_next_entry(q, multi_gt_list);
|
||||
}
|
||||
|
||||
if (fences) {
|
||||
@ -1805,7 +1805,7 @@ int xe_vm_async_fence_wait_start(struct dma_fence *fence)
|
||||
}
|
||||
|
||||
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
||||
struct xe_engine *e, struct xe_sync_entry *syncs,
|
||||
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
|
||||
u32 num_syncs, struct async_op_fence *afence,
|
||||
bool immediate, bool first_op, bool last_op)
|
||||
{
|
||||
@ -1814,7 +1814,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
||||
xe_vm_assert_held(vm);
|
||||
|
||||
if (immediate) {
|
||||
fence = xe_vm_bind_vma(vma, e, syncs, num_syncs, first_op,
|
||||
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
|
||||
last_op);
|
||||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
@ -1836,7 +1836,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
|
||||
static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_bo *bo, struct xe_sync_entry *syncs,
|
||||
u32 num_syncs, struct async_op_fence *afence,
|
||||
bool immediate, bool first_op, bool last_op)
|
||||
@ -1852,12 +1852,12 @@ static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
|
||||
return err;
|
||||
}
|
||||
|
||||
return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence, immediate,
|
||||
return __xe_vm_bind(vm, vma, q, syncs, num_syncs, afence, immediate,
|
||||
first_op, last_op);
|
||||
}
|
||||
|
||||
static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
|
||||
struct xe_engine *e, struct xe_sync_entry *syncs,
|
||||
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
|
||||
u32 num_syncs, struct async_op_fence *afence,
|
||||
bool first_op, bool last_op)
|
||||
{
|
||||
@ -1866,7 +1866,7 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
|
||||
xe_vm_assert_held(vm);
|
||||
xe_bo_assert_held(xe_vma_bo(vma));
|
||||
|
||||
fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs, first_op, last_op);
|
||||
fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
|
||||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
if (afence)
|
||||
@ -2074,7 +2074,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
vm = xa_load(&xef->vm.xa, args->vm_id);
|
||||
if (XE_IOCTL_DBG(xe, !vm))
|
||||
err = -ENOENT;
|
||||
else if (XE_IOCTL_DBG(xe, vm->preempt.num_engines))
|
||||
else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
|
||||
err = -EBUSY;
|
||||
else
|
||||
xa_erase(&xef->vm.xa, args->vm_id);
|
||||
@ -2093,7 +2093,7 @@ static const u32 region_to_mem_type[] = {
|
||||
};
|
||||
|
||||
static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
|
||||
struct xe_engine *e, u32 region,
|
||||
struct xe_exec_queue *q, u32 region,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
struct async_op_fence *afence, bool first_op,
|
||||
bool last_op)
|
||||
@ -2109,7 +2109,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
|
||||
}
|
||||
|
||||
if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
|
||||
return xe_vm_bind(vm, vma, e, xe_vma_bo(vma), syncs, num_syncs,
|
||||
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
|
||||
afence, true, first_op, last_op);
|
||||
} else {
|
||||
int i;
|
||||
@ -2414,7 +2414,7 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
|
||||
* Parse operations list and create any resources needed for the operations
|
||||
* prior to fully committing to the operations. This setup can fail.
|
||||
*/
|
||||
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
|
||||
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
|
||||
struct drm_gpuva_ops **ops, int num_ops_list,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
struct list_head *ops_list, bool async)
|
||||
@ -2434,9 +2434,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
|
||||
if (!fence)
|
||||
return -ENOMEM;
|
||||
|
||||
seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
|
||||
seqno = q ? ++q->bind.fence_seqno : ++vm->async_ops.fence.seqno;
|
||||
dma_fence_init(&fence->fence, &async_op_fence_ops,
|
||||
&vm->async_ops.lock, e ? e->bind.fence_ctx :
|
||||
&vm->async_ops.lock, q ? q->bind.fence_ctx :
|
||||
vm->async_ops.fence.context, seqno);
|
||||
|
||||
if (!xe_vm_no_dma_fences(vm)) {
|
||||
@ -2467,7 +2467,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
|
||||
op->syncs = syncs;
|
||||
}
|
||||
|
||||
op->engine = e;
|
||||
op->q = q;
|
||||
|
||||
switch (op->base.op) {
|
||||
case DRM_GPUVA_OP_MAP:
|
||||
@ -2677,7 +2677,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
|
||||
|
||||
switch (op->base.op) {
|
||||
case DRM_GPUVA_OP_MAP:
|
||||
err = xe_vm_bind(vm, vma, op->engine, xe_vma_bo(vma),
|
||||
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
|
||||
op->syncs, op->num_syncs, op->fence,
|
||||
op->map.immediate || !xe_vm_in_fault_mode(vm),
|
||||
op->flags & XE_VMA_OP_FIRST,
|
||||
@ -2693,7 +2693,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
|
||||
vm->async_ops.munmap_rebind_inflight = true;
|
||||
vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
|
||||
}
|
||||
err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
|
||||
err = xe_vm_unbind(vm, vma, op->q, op->syncs,
|
||||
op->num_syncs,
|
||||
!prev && !next ? op->fence : NULL,
|
||||
op->flags & XE_VMA_OP_FIRST,
|
||||
@ -2706,7 +2706,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
|
||||
|
||||
if (prev) {
|
||||
op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
|
||||
err = xe_vm_bind(vm, op->remap.prev, op->engine,
|
||||
err = xe_vm_bind(vm, op->remap.prev, op->q,
|
||||
xe_vma_bo(op->remap.prev), op->syncs,
|
||||
op->num_syncs,
|
||||
!next ? op->fence : NULL, true, false,
|
||||
@ -2719,7 +2719,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
|
||||
|
||||
if (next) {
|
||||
op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
|
||||
err = xe_vm_bind(vm, op->remap.next, op->engine,
|
||||
err = xe_vm_bind(vm, op->remap.next, op->q,
|
||||
xe_vma_bo(op->remap.next),
|
||||
op->syncs, op->num_syncs,
|
||||
op->fence, true, false,
|
||||
@ -2734,13 +2734,13 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
|
||||
break;
|
||||
}
|
||||
case DRM_GPUVA_OP_UNMAP:
|
||||
err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
|
||||
err = xe_vm_unbind(vm, vma, op->q, op->syncs,
|
||||
op->num_syncs, op->fence,
|
||||
op->flags & XE_VMA_OP_FIRST,
|
||||
op->flags & XE_VMA_OP_LAST);
|
||||
break;
|
||||
case DRM_GPUVA_OP_PREFETCH:
|
||||
err = xe_vm_prefetch(vm, vma, op->engine, op->prefetch.region,
|
||||
err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
|
||||
op->syncs, op->num_syncs, op->fence,
|
||||
op->flags & XE_VMA_OP_FIRST,
|
||||
op->flags & XE_VMA_OP_LAST);
|
||||
@ -2819,8 +2819,8 @@ static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
|
||||
while (op->num_syncs--)
|
||||
xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
|
||||
kfree(op->syncs);
|
||||
if (op->engine)
|
||||
xe_engine_put(op->engine);
|
||||
if (op->q)
|
||||
xe_exec_queue_put(op->q);
|
||||
if (op->fence)
|
||||
dma_fence_put(&op->fence->fence);
|
||||
}
|
||||
@ -3174,7 +3174,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
struct xe_bo **bos = NULL;
|
||||
struct drm_gpuva_ops **ops = NULL;
|
||||
struct xe_vm *vm;
|
||||
struct xe_engine *e = NULL;
|
||||
struct xe_exec_queue *q = NULL;
|
||||
u32 num_syncs;
|
||||
struct xe_sync_entry *syncs = NULL;
|
||||
struct drm_xe_vm_bind_op *bind_ops;
|
||||
@ -3187,23 +3187,23 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (args->engine_id) {
|
||||
e = xe_engine_lookup(xef, args->engine_id);
|
||||
if (XE_IOCTL_DBG(xe, !e)) {
|
||||
if (args->exec_queue_id) {
|
||||
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
|
||||
if (XE_IOCTL_DBG(xe, !q)) {
|
||||
err = -ENOENT;
|
||||
goto free_objs;
|
||||
}
|
||||
|
||||
if (XE_IOCTL_DBG(xe, !(e->flags & ENGINE_FLAG_VM))) {
|
||||
if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
|
||||
err = -EINVAL;
|
||||
goto put_engine;
|
||||
goto put_exec_queue;
|
||||
}
|
||||
}
|
||||
|
||||
vm = xe_vm_lookup(xef, args->vm_id);
|
||||
if (XE_IOCTL_DBG(xe, !vm)) {
|
||||
err = -EINVAL;
|
||||
goto put_engine;
|
||||
goto put_exec_queue;
|
||||
}
|
||||
|
||||
err = down_write_killable(&vm->lock);
|
||||
@ -3357,7 +3357,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
}
|
||||
}
|
||||
|
||||
err = vm_bind_ioctl_ops_parse(vm, e, ops, args->num_binds,
|
||||
err = vm_bind_ioctl_ops_parse(vm, q, ops, args->num_binds,
|
||||
syncs, num_syncs, &ops_list, async);
|
||||
if (err)
|
||||
goto unwind_ops;
|
||||
@ -3391,9 +3391,9 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
up_write(&vm->lock);
|
||||
put_vm:
|
||||
xe_vm_put(vm);
|
||||
put_engine:
|
||||
if (e)
|
||||
xe_engine_put(e);
|
||||
put_exec_queue:
|
||||
if (q)
|
||||
xe_exec_queue_put(q);
|
||||
free_objs:
|
||||
kfree(bos);
|
||||
kfree(ops);
|
||||
|
@ -18,7 +18,7 @@ struct drm_file;
|
||||
struct ttm_buffer_object;
|
||||
struct ttm_validate_buffer;
|
||||
|
||||
struct xe_engine;
|
||||
struct xe_exec_queue;
|
||||
struct xe_file;
|
||||
struct xe_sync_entry;
|
||||
|
||||
@ -164,7 +164,7 @@ static inline bool xe_vm_no_dma_fences(struct xe_vm *vm)
|
||||
return xe_vm_in_compute_mode(vm) || xe_vm_in_fault_mode(vm);
|
||||
}
|
||||
|
||||
int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e);
|
||||
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
|
||||
|
||||
int xe_vm_userptr_pin(struct xe_vm *vm);
|
||||
|
||||
|
@ -138,8 +138,8 @@ struct xe_vm {
|
||||
|
||||
struct xe_device *xe;
|
||||
|
||||
/* engine used for (un)binding vma's */
|
||||
struct xe_engine *eng[XE_MAX_TILES_PER_DEVICE];
|
||||
/* exec queue used for (un)binding vma's */
|
||||
struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
|
||||
|
||||
/** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
|
||||
struct ttm_lru_bulk_move lru_bulk_move;
|
||||
@ -278,10 +278,10 @@ struct xe_vm {
|
||||
* an engine again
|
||||
*/
|
||||
s64 min_run_period_ms;
|
||||
/** @engines: list of engines attached to this VM */
|
||||
struct list_head engines;
|
||||
/** @num_engines: number user engines attached to this VM */
|
||||
int num_engines;
|
||||
/** @exec_queues: list of exec queues attached to this VM */
|
||||
struct list_head exec_queues;
|
||||
/** @num_exec_queues: number exec queues attached to this VM */
|
||||
int num_exec_queues;
|
||||
/**
|
||||
* @rebind_deactivated: Whether rebind has been temporarily deactivated
|
||||
* due to no work available. Protected by the vm resv.
|
||||
@ -386,8 +386,8 @@ struct xe_vma_op {
|
||||
* operations is processed
|
||||
*/
|
||||
struct drm_gpuva_ops *ops;
|
||||
/** @engine: engine for this operation */
|
||||
struct xe_engine *engine;
|
||||
/** @q: exec queue for this operation */
|
||||
struct xe_exec_queue *q;
|
||||
/**
|
||||
* @syncs: syncs for this operation, only used on first and last
|
||||
* operation
|
||||
|
@ -103,14 +103,14 @@ struct xe_user_extension {
|
||||
#define DRM_XE_VM_CREATE 0x03
|
||||
#define DRM_XE_VM_DESTROY 0x04
|
||||
#define DRM_XE_VM_BIND 0x05
|
||||
#define DRM_XE_ENGINE_CREATE 0x06
|
||||
#define DRM_XE_ENGINE_DESTROY 0x07
|
||||
#define DRM_XE_EXEC_QUEUE_CREATE 0x06
|
||||
#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
|
||||
#define DRM_XE_EXEC 0x08
|
||||
#define DRM_XE_MMIO 0x09
|
||||
#define DRM_XE_ENGINE_SET_PROPERTY 0x0a
|
||||
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x0a
|
||||
#define DRM_XE_WAIT_USER_FENCE 0x0b
|
||||
#define DRM_XE_VM_MADVISE 0x0c
|
||||
#define DRM_XE_ENGINE_GET_PROPERTY 0x0d
|
||||
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x0d
|
||||
|
||||
/* Must be kept compact -- no holes */
|
||||
#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
|
||||
@ -119,12 +119,12 @@ struct xe_user_extension {
|
||||
#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
|
||||
#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
|
||||
#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
|
||||
#define DRM_IOCTL_XE_ENGINE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_CREATE, struct drm_xe_engine_create)
|
||||
#define DRM_IOCTL_XE_ENGINE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_GET_PROPERTY, struct drm_xe_engine_get_property)
|
||||
#define DRM_IOCTL_XE_ENGINE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_ENGINE_DESTROY, struct drm_xe_engine_destroy)
|
||||
#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
|
||||
#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
|
||||
#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
|
||||
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
|
||||
#define DRM_IOCTL_XE_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MMIO, struct drm_xe_mmio)
|
||||
#define DRM_IOCTL_XE_ENGINE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_ENGINE_SET_PROPERTY, struct drm_xe_engine_set_property)
|
||||
#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
|
||||
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
|
||||
#define DRM_IOCTL_XE_VM_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_MADVISE, struct drm_xe_vm_madvise)
|
||||
|
||||
@ -649,11 +649,11 @@ struct drm_xe_vm_bind {
|
||||
__u32 vm_id;
|
||||
|
||||
/**
|
||||
* @engine_id: engine_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
|
||||
* and engine must have same vm_id. If zero, the default VM bind engine
|
||||
* @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
|
||||
* and exec queue must have same vm_id. If zero, the default VM bind engine
|
||||
* is used.
|
||||
*/
|
||||
__u32 engine_id;
|
||||
__u32 exec_queue_id;
|
||||
|
||||
/** @num_binds: number of binds in this IOCTL */
|
||||
__u32 num_binds;
|
||||
@ -685,8 +685,8 @@ struct drm_xe_vm_bind {
|
||||
__u64 reserved[2];
|
||||
};
|
||||
|
||||
/** struct drm_xe_ext_engine_set_property - engine set property extension */
|
||||
struct drm_xe_ext_engine_set_property {
|
||||
/** struct drm_xe_ext_exec_queue_set_property - exec queue set property extension */
|
||||
struct drm_xe_ext_exec_queue_set_property {
|
||||
/** @base: base user extension */
|
||||
struct xe_user_extension base;
|
||||
|
||||
@ -701,32 +701,32 @@ struct drm_xe_ext_engine_set_property {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_xe_engine_set_property - engine set property
|
||||
* struct drm_xe_exec_queue_set_property - exec queue set property
|
||||
*
|
||||
* Same namespace for extensions as drm_xe_engine_create
|
||||
* Same namespace for extensions as drm_xe_exec_queue_create
|
||||
*/
|
||||
struct drm_xe_engine_set_property {
|
||||
struct drm_xe_exec_queue_set_property {
|
||||
/** @extensions: Pointer to the first extension struct, if any */
|
||||
__u64 extensions;
|
||||
|
||||
/** @engine_id: Engine ID */
|
||||
__u32 engine_id;
|
||||
/** @exec_queue_id: Exec queue ID */
|
||||
__u32 exec_queue_id;
|
||||
|
||||
#define XE_ENGINE_SET_PROPERTY_PRIORITY 0
|
||||
#define XE_ENGINE_SET_PROPERTY_TIMESLICE 1
|
||||
#define XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
|
||||
/*
|
||||
* Long running or ULLS engine mode. DMA fences not allowed in this
|
||||
* mode. Must match the value of DRM_XE_VM_CREATE_COMPUTE_MODE, serves
|
||||
* as a sanity check the UMD knows what it is doing. Can only be set at
|
||||
* engine create time.
|
||||
*/
|
||||
#define XE_ENGINE_SET_PROPERTY_COMPUTE_MODE 3
|
||||
#define XE_ENGINE_SET_PROPERTY_PERSISTENCE 4
|
||||
#define XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT 5
|
||||
#define XE_ENGINE_SET_PROPERTY_ACC_TRIGGER 6
|
||||
#define XE_ENGINE_SET_PROPERTY_ACC_NOTIFY 7
|
||||
#define XE_ENGINE_SET_PROPERTY_ACC_GRANULARITY 8
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE 3
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 4
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 5
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 6
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 7
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 8
|
||||
/** @property: property to set */
|
||||
__u32 property;
|
||||
|
||||
@ -755,25 +755,25 @@ struct drm_xe_engine_class_instance {
|
||||
__u16 gt_id;
|
||||
};
|
||||
|
||||
struct drm_xe_engine_create {
|
||||
#define XE_ENGINE_EXTENSION_SET_PROPERTY 0
|
||||
struct drm_xe_exec_queue_create {
|
||||
#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
|
||||
/** @extensions: Pointer to the first extension struct, if any */
|
||||
__u64 extensions;
|
||||
|
||||
/** @width: submission width (number BB per exec) for this engine */
|
||||
/** @width: submission width (number BB per exec) for this exec queue */
|
||||
__u16 width;
|
||||
|
||||
/** @num_placements: number of valid placements for this engine */
|
||||
/** @num_placements: number of valid placements for this exec queue */
|
||||
__u16 num_placements;
|
||||
|
||||
/** @vm_id: VM to use for this engine */
|
||||
/** @vm_id: VM to use for this exec queue */
|
||||
__u32 vm_id;
|
||||
|
||||
/** @flags: MBZ */
|
||||
__u32 flags;
|
||||
|
||||
/** @engine_id: Returned engine ID */
|
||||
__u32 engine_id;
|
||||
/** @exec_queue_id: Returned exec queue ID */
|
||||
__u32 exec_queue_id;
|
||||
|
||||
/**
|
||||
* @instances: user pointer to a 2-d array of struct
|
||||
@ -788,14 +788,14 @@ struct drm_xe_engine_create {
|
||||
__u64 reserved[2];
|
||||
};
|
||||
|
||||
struct drm_xe_engine_get_property {
|
||||
struct drm_xe_exec_queue_get_property {
|
||||
/** @extensions: Pointer to the first extension struct, if any */
|
||||
__u64 extensions;
|
||||
|
||||
/** @engine_id: Engine ID */
|
||||
__u32 engine_id;
|
||||
/** @exec_queue_id: Exec queue ID */
|
||||
__u32 exec_queue_id;
|
||||
|
||||
#define XE_ENGINE_GET_PROPERTY_BAN 0
|
||||
#define XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
|
||||
/** @property: property to get */
|
||||
__u32 property;
|
||||
|
||||
@ -806,9 +806,9 @@ struct drm_xe_engine_get_property {
|
||||
__u64 reserved[2];
|
||||
};
|
||||
|
||||
struct drm_xe_engine_destroy {
|
||||
/** @engine_id: Engine ID */
|
||||
__u32 engine_id;
|
||||
struct drm_xe_exec_queue_destroy {
|
||||
/** @exec_queue_id: Exec queue ID */
|
||||
__u32 exec_queue_id;
|
||||
|
||||
/** @pad: MBZ */
|
||||
__u32 pad;
|
||||
@ -855,8 +855,8 @@ struct drm_xe_exec {
|
||||
/** @extensions: Pointer to the first extension struct, if any */
|
||||
__u64 extensions;
|
||||
|
||||
/** @engine_id: Engine ID for the batch buffer */
|
||||
__u32 engine_id;
|
||||
/** @exec_queue_id: Exec queue ID for the batch buffer */
|
||||
__u32 exec_queue_id;
|
||||
|
||||
/** @num_syncs: Amount of struct drm_xe_sync in array. */
|
||||
__u32 num_syncs;
|
||||
|
Loading…
Reference in New Issue
Block a user