mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-02 16:44:59 +00:00
drm/sched: Use struct for drm_sched_init() params
drm_sched_init() has a great many parameters and upcoming new
functionality for the scheduler might add even more. Generally, the
great number of parameters reduces readability and has already caused
one missnaming, addressed in:
commit 6f1cacf4eb
("drm/nouveau: Improve variable name in
nouveau_sched_init()").
Introduce a new struct for the scheduler init parameters and port all
users.
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Acked-by: Matthew Brost <matthew.brost@intel.com> # for Xe
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> # for Panfrost and Panthor
Reviewed-by: Christian Gmeiner <cgmeiner@igalia.com> # for Etnaviv
Reviewed-by: Frank Binns <frank.binns@imgtec.com> # for Imagination
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com> # for Sched
Reviewed-by: Maíra Canal <mcanal@igalia.com> # for v3d
Reviewed-by: Danilo Krummrich <dakr@kernel.org>
Reviewed-by: Lizhi Hou <lizhi.hou@amd.com> # for amdxdna
Signed-off-by: Philipp Stanner <phasta@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20250211111422.21235-2-phasta@kernel.org
This commit is contained in:
parent
62ae45687e
commit
796a9f55a8
@ -516,6 +516,14 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
|
|||||||
{
|
{
|
||||||
struct amdxdna_client *client = hwctx->client;
|
struct amdxdna_client *client = hwctx->client;
|
||||||
struct amdxdna_dev *xdna = client->xdna;
|
struct amdxdna_dev *xdna = client->xdna;
|
||||||
|
const struct drm_sched_init_args args = {
|
||||||
|
.ops = &sched_ops,
|
||||||
|
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
|
||||||
|
.credit_limit = HWCTX_MAX_CMDS,
|
||||||
|
.timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
|
||||||
|
.name = hwctx->name,
|
||||||
|
.dev = xdna->ddev.dev,
|
||||||
|
};
|
||||||
struct drm_gpu_scheduler *sched;
|
struct drm_gpu_scheduler *sched;
|
||||||
struct amdxdna_hwctx_priv *priv;
|
struct amdxdna_hwctx_priv *priv;
|
||||||
struct amdxdna_gem_obj *heap;
|
struct amdxdna_gem_obj *heap;
|
||||||
@ -573,9 +581,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
|
|||||||
might_lock(&priv->io_lock);
|
might_lock(&priv->io_lock);
|
||||||
fs_reclaim_release(GFP_KERNEL);
|
fs_reclaim_release(GFP_KERNEL);
|
||||||
|
|
||||||
ret = drm_sched_init(sched, &sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT,
|
ret = drm_sched_init(sched, &args);
|
||||||
HWCTX_MAX_CMDS, 0, msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
|
|
||||||
NULL, NULL, hwctx->name, xdna->ddev.dev);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
|
XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
|
||||||
goto free_cmd_bufs;
|
goto free_cmd_bufs;
|
||||||
|
@ -2823,6 +2823,12 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
|
static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
struct drm_sched_init_args args = {
|
||||||
|
.ops = &amdgpu_sched_ops,
|
||||||
|
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
|
||||||
|
.timeout_wq = adev->reset_domain->wq,
|
||||||
|
.dev = adev->dev,
|
||||||
|
};
|
||||||
long timeout;
|
long timeout;
|
||||||
int r, i;
|
int r, i;
|
||||||
|
|
||||||
@ -2848,12 +2854,12 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
|
args.timeout = timeout;
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
args.credit_limit = ring->num_hw_submission;
|
||||||
ring->num_hw_submission, 0,
|
args.score = ring->sched_score;
|
||||||
timeout, adev->reset_domain->wq,
|
args.name = ring->name;
|
||||||
ring->sched_score, ring->name,
|
|
||||||
adev->dev);
|
r = drm_sched_init(&ring->sched, &args);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||||
ring->name);
|
ring->name);
|
||||||
|
@ -144,17 +144,17 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
|
|||||||
|
|
||||||
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
|
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
|
||||||
{
|
{
|
||||||
int ret;
|
const struct drm_sched_init_args args = {
|
||||||
|
.ops = &etnaviv_sched_ops,
|
||||||
|
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
|
||||||
|
.credit_limit = etnaviv_hw_jobs_limit,
|
||||||
|
.hang_limit = etnaviv_job_hang_limit,
|
||||||
|
.timeout = msecs_to_jiffies(500),
|
||||||
|
.name = dev_name(gpu->dev),
|
||||||
|
.dev = gpu->dev,
|
||||||
|
};
|
||||||
|
|
||||||
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
|
return drm_sched_init(&gpu->sched, &args);
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
|
||||||
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
|
|
||||||
msecs_to_jiffies(500), NULL, NULL,
|
|
||||||
dev_name(gpu->dev), gpu->dev);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
|
void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
|
||||||
|
@ -1210,6 +1210,17 @@ struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
struct pvr_device *pvr_dev = ctx->pvr_dev;
|
struct pvr_device *pvr_dev = ctx->pvr_dev;
|
||||||
|
const struct drm_sched_init_args sched_args = {
|
||||||
|
.ops = &pvr_queue_sched_ops,
|
||||||
|
.submit_wq = pvr_dev->sched_wq,
|
||||||
|
.num_rqs = 1,
|
||||||
|
.credit_limit = 64 * 1024,
|
||||||
|
.hang_limit = 1,
|
||||||
|
.timeout = msecs_to_jiffies(500),
|
||||||
|
.timeout_wq = pvr_dev->sched_wq,
|
||||||
|
.name = "pvr-queue",
|
||||||
|
.dev = pvr_dev->base.dev,
|
||||||
|
};
|
||||||
struct drm_gpu_scheduler *sched;
|
struct drm_gpu_scheduler *sched;
|
||||||
struct pvr_queue *queue;
|
struct pvr_queue *queue;
|
||||||
int ctx_state_size, err;
|
int ctx_state_size, err;
|
||||||
@ -1282,12 +1293,7 @@ struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
|
|||||||
|
|
||||||
queue->timeline_ufo.value = cpu_map;
|
queue->timeline_ufo.value = cpu_map;
|
||||||
|
|
||||||
err = drm_sched_init(&queue->scheduler,
|
err = drm_sched_init(&queue->scheduler, &sched_args);
|
||||||
&pvr_queue_sched_ops,
|
|
||||||
pvr_dev->sched_wq, 1, 64 * 1024, 1,
|
|
||||||
msecs_to_jiffies(500),
|
|
||||||
pvr_dev->sched_wq, NULL, "pvr-queue",
|
|
||||||
pvr_dev->base.dev);
|
|
||||||
if (err)
|
if (err)
|
||||||
goto err_release_ufo;
|
goto err_release_ufo;
|
||||||
|
|
||||||
|
@ -515,18 +515,22 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
|
|||||||
{
|
{
|
||||||
unsigned int timeout = lima_sched_timeout_ms > 0 ?
|
unsigned int timeout = lima_sched_timeout_ms > 0 ?
|
||||||
lima_sched_timeout_ms : 10000;
|
lima_sched_timeout_ms : 10000;
|
||||||
|
const struct drm_sched_init_args args = {
|
||||||
|
.ops = &lima_sched_ops,
|
||||||
|
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
|
||||||
|
.credit_limit = 1,
|
||||||
|
.hang_limit = lima_job_hang_limit,
|
||||||
|
.timeout = msecs_to_jiffies(timeout),
|
||||||
|
.name = name,
|
||||||
|
.dev = pipe->ldev->dev,
|
||||||
|
};
|
||||||
|
|
||||||
pipe->fence_context = dma_fence_context_alloc(1);
|
pipe->fence_context = dma_fence_context_alloc(1);
|
||||||
spin_lock_init(&pipe->fence_lock);
|
spin_lock_init(&pipe->fence_lock);
|
||||||
|
|
||||||
INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
|
INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
|
||||||
|
|
||||||
return drm_sched_init(&pipe->base, &lima_sched_ops, NULL,
|
return drm_sched_init(&pipe->base, &args);
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
|
||||||
1,
|
|
||||||
lima_job_hang_limit,
|
|
||||||
msecs_to_jiffies(timeout), NULL,
|
|
||||||
NULL, name, pipe->ldev->dev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
|
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
|
||||||
|
@ -59,8 +59,14 @@ static const struct drm_sched_backend_ops msm_sched_ops = {
|
|||||||
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
|
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
|
||||||
void *memptrs, uint64_t memptrs_iova)
|
void *memptrs, uint64_t memptrs_iova)
|
||||||
{
|
{
|
||||||
|
struct drm_sched_init_args args = {
|
||||||
|
.ops = &msm_sched_ops,
|
||||||
|
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
|
||||||
|
.credit_limit = num_hw_submissions,
|
||||||
|
.timeout = MAX_SCHEDULE_TIMEOUT,
|
||||||
|
.dev = gpu->dev->dev,
|
||||||
|
};
|
||||||
struct msm_ringbuffer *ring;
|
struct msm_ringbuffer *ring;
|
||||||
long sched_timeout;
|
|
||||||
char name[32];
|
char name[32];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -87,6 +93,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
|
|||||||
}
|
}
|
||||||
|
|
||||||
msm_gem_object_set_name(ring->bo, "ring%d", id);
|
msm_gem_object_set_name(ring->bo, "ring%d", id);
|
||||||
|
args.name = to_msm_bo(ring->bo)->name,
|
||||||
|
|
||||||
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
|
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
|
||||||
ring->next = ring->start;
|
ring->next = ring->start;
|
||||||
@ -95,13 +102,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
|
|||||||
ring->memptrs = memptrs;
|
ring->memptrs = memptrs;
|
||||||
ring->memptrs_iova = memptrs_iova;
|
ring->memptrs_iova = memptrs_iova;
|
||||||
|
|
||||||
/* currently managing hangcheck ourselves: */
|
ret = drm_sched_init(&ring->sched, &args);
|
||||||
sched_timeout = MAX_SCHEDULE_TIMEOUT;
|
|
||||||
|
|
||||||
ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL,
|
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
|
||||||
num_hw_submissions, 0, sched_timeout,
|
|
||||||
NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -404,7 +404,14 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
|
|||||||
{
|
{
|
||||||
struct drm_gpu_scheduler *drm_sched = &sched->base;
|
struct drm_gpu_scheduler *drm_sched = &sched->base;
|
||||||
struct drm_sched_entity *entity = &sched->entity;
|
struct drm_sched_entity *entity = &sched->entity;
|
||||||
const long timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
|
struct drm_sched_init_args args = {
|
||||||
|
.ops = &nouveau_sched_ops,
|
||||||
|
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
|
||||||
|
.credit_limit = credit_limit,
|
||||||
|
.timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS),
|
||||||
|
.name = "nouveau_sched",
|
||||||
|
.dev = drm->dev->dev
|
||||||
|
};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!wq) {
|
if (!wq) {
|
||||||
@ -416,10 +423,9 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
|
|||||||
sched->wq = wq;
|
sched->wq = wq;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
|
args.submit_wq = wq,
|
||||||
NOUVEAU_SCHED_PRIORITY_COUNT,
|
|
||||||
credit_limit, 0, timeout,
|
ret = drm_sched_init(drm_sched, &args);
|
||||||
NULL, NULL, "nouveau_sched", drm->dev->dev);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail_wq;
|
goto fail_wq;
|
||||||
|
|
||||||
|
@ -836,8 +836,16 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
|
|||||||
|
|
||||||
int panfrost_job_init(struct panfrost_device *pfdev)
|
int panfrost_job_init(struct panfrost_device *pfdev)
|
||||||
{
|
{
|
||||||
|
struct drm_sched_init_args args = {
|
||||||
|
.ops = &panfrost_sched_ops,
|
||||||
|
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
|
||||||
|
.credit_limit = 2,
|
||||||
|
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
|
||||||
|
.timeout_wq = pfdev->reset.wq,
|
||||||
|
.name = "pan_js",
|
||||||
|
.dev = pfdev->dev,
|
||||||
|
};
|
||||||
struct panfrost_job_slot *js;
|
struct panfrost_job_slot *js;
|
||||||
unsigned int nentries = 2;
|
|
||||||
int ret, j;
|
int ret, j;
|
||||||
|
|
||||||
/* All GPUs have two entries per queue, but without jobchain
|
/* All GPUs have two entries per queue, but without jobchain
|
||||||
@ -845,7 +853,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
|
|||||||
* so let's just advertise one entry in that case.
|
* so let's just advertise one entry in that case.
|
||||||
*/
|
*/
|
||||||
if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
|
if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
|
||||||
nentries = 1;
|
args.credit_limit = 1;
|
||||||
|
|
||||||
pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
|
pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
|
||||||
if (!js)
|
if (!js)
|
||||||
@ -875,13 +883,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
|
|||||||
for (j = 0; j < NUM_JOB_SLOTS; j++) {
|
for (j = 0; j < NUM_JOB_SLOTS; j++) {
|
||||||
js->queue[j].fence_context = dma_fence_context_alloc(1);
|
js->queue[j].fence_context = dma_fence_context_alloc(1);
|
||||||
|
|
||||||
ret = drm_sched_init(&js->queue[j].sched,
|
ret = drm_sched_init(&js->queue[j].sched, &args);
|
||||||
&panfrost_sched_ops, NULL,
|
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
|
||||||
nentries, 0,
|
|
||||||
msecs_to_jiffies(JOB_TIMEOUT_MS),
|
|
||||||
pfdev->reset.wq,
|
|
||||||
NULL, "pan_js", pfdev->dev);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
|
dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
|
||||||
goto err_sched;
|
goto err_sched;
|
||||||
|
@ -2311,6 +2311,16 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
|
|||||||
u64 full_va_range = 1ull << va_bits;
|
u64 full_va_range = 1ull << va_bits;
|
||||||
struct drm_gem_object *dummy_gem;
|
struct drm_gem_object *dummy_gem;
|
||||||
struct drm_gpu_scheduler *sched;
|
struct drm_gpu_scheduler *sched;
|
||||||
|
const struct drm_sched_init_args sched_args = {
|
||||||
|
.ops = &panthor_vm_bind_ops,
|
||||||
|
.submit_wq = ptdev->mmu->vm.wq,
|
||||||
|
.num_rqs = 1,
|
||||||
|
.credit_limit = 1,
|
||||||
|
/* Bind operations are synchronous for now, no timeout needed. */
|
||||||
|
.timeout = MAX_SCHEDULE_TIMEOUT,
|
||||||
|
.name = "panthor-vm-bind",
|
||||||
|
.dev = ptdev->base.dev,
|
||||||
|
};
|
||||||
struct io_pgtable_cfg pgtbl_cfg;
|
struct io_pgtable_cfg pgtbl_cfg;
|
||||||
u64 mair, min_va, va_range;
|
u64 mair, min_va, va_range;
|
||||||
struct panthor_vm *vm;
|
struct panthor_vm *vm;
|
||||||
@ -2368,11 +2378,7 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
|
|||||||
goto err_mm_takedown;
|
goto err_mm_takedown;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bind operations are synchronous for now, no timeout needed. */
|
ret = drm_sched_init(&vm->sched, &sched_args);
|
||||||
ret = drm_sched_init(&vm->sched, &panthor_vm_bind_ops, ptdev->mmu->vm.wq,
|
|
||||||
1, 1, 0,
|
|
||||||
MAX_SCHEDULE_TIMEOUT, NULL, NULL,
|
|
||||||
"panthor-vm-bind", ptdev->base.dev);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_io_pgtable;
|
goto err_free_io_pgtable;
|
||||||
|
|
||||||
|
@ -3289,6 +3289,22 @@ static struct panthor_queue *
|
|||||||
group_create_queue(struct panthor_group *group,
|
group_create_queue(struct panthor_group *group,
|
||||||
const struct drm_panthor_queue_create *args)
|
const struct drm_panthor_queue_create *args)
|
||||||
{
|
{
|
||||||
|
const struct drm_sched_init_args sched_args = {
|
||||||
|
.ops = &panthor_queue_sched_ops,
|
||||||
|
.submit_wq = group->ptdev->scheduler->wq,
|
||||||
|
.num_rqs = 1,
|
||||||
|
/*
|
||||||
|
* The credit limit argument tells us the total number of
|
||||||
|
* instructions across all CS slots in the ringbuffer, with
|
||||||
|
* some jobs requiring twice as many as others, depending on
|
||||||
|
* their profiling status.
|
||||||
|
*/
|
||||||
|
.credit_limit = args->ringbuf_size / sizeof(u64),
|
||||||
|
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
|
||||||
|
.timeout_wq = group->ptdev->reset.wq,
|
||||||
|
.name = "panthor-queue",
|
||||||
|
.dev = group->ptdev->base.dev,
|
||||||
|
};
|
||||||
struct drm_gpu_scheduler *drm_sched;
|
struct drm_gpu_scheduler *drm_sched;
|
||||||
struct panthor_queue *queue;
|
struct panthor_queue *queue;
|
||||||
int ret;
|
int ret;
|
||||||
@ -3359,17 +3375,7 @@ group_create_queue(struct panthor_group *group,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_queue;
|
goto err_free_queue;
|
||||||
|
|
||||||
/*
|
ret = drm_sched_init(&queue->scheduler, &sched_args);
|
||||||
* Credit limit argument tells us the total number of instructions
|
|
||||||
* across all CS slots in the ringbuffer, with some jobs requiring
|
|
||||||
* twice as many as others, depending on their profiling status.
|
|
||||||
*/
|
|
||||||
ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
|
|
||||||
group->ptdev->scheduler->wq, 1,
|
|
||||||
args->ringbuf_size / sizeof(u64),
|
|
||||||
0, msecs_to_jiffies(JOB_TIMEOUT_MS),
|
|
||||||
group->ptdev->reset.wq,
|
|
||||||
NULL, "panthor-queue", group->ptdev->base.dev);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_queue;
|
goto err_free_queue;
|
||||||
|
|
||||||
|
@ -1244,40 +1244,24 @@ static void drm_sched_run_job_work(struct work_struct *w)
|
|||||||
* drm_sched_init - Init a gpu scheduler instance
|
* drm_sched_init - Init a gpu scheduler instance
|
||||||
*
|
*
|
||||||
* @sched: scheduler instance
|
* @sched: scheduler instance
|
||||||
* @ops: backend operations for this scheduler
|
* @args: scheduler initialization arguments
|
||||||
* @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
|
|
||||||
* allocated and used
|
|
||||||
* @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
|
|
||||||
* @credit_limit: the number of credits this scheduler can hold from all jobs
|
|
||||||
* @hang_limit: number of times to allow a job to hang before dropping it
|
|
||||||
* @timeout: timeout value in jiffies for the scheduler
|
|
||||||
* @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
|
|
||||||
* used
|
|
||||||
* @score: optional score atomic shared with other schedulers
|
|
||||||
* @name: name used for debugging
|
|
||||||
* @dev: target &struct device
|
|
||||||
*
|
*
|
||||||
* Return 0 on success, otherwise error code.
|
* Return 0 on success, otherwise error code.
|
||||||
*/
|
*/
|
||||||
int drm_sched_init(struct drm_gpu_scheduler *sched,
|
int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
|
||||||
const struct drm_sched_backend_ops *ops,
|
|
||||||
struct workqueue_struct *submit_wq,
|
|
||||||
u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
|
|
||||||
long timeout, struct workqueue_struct *timeout_wq,
|
|
||||||
atomic_t *score, const char *name, struct device *dev)
|
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
sched->ops = ops;
|
sched->ops = args->ops;
|
||||||
sched->credit_limit = credit_limit;
|
sched->credit_limit = args->credit_limit;
|
||||||
sched->name = name;
|
sched->name = args->name;
|
||||||
sched->timeout = timeout;
|
sched->timeout = args->timeout;
|
||||||
sched->timeout_wq = timeout_wq ? : system_wq;
|
sched->hang_limit = args->hang_limit;
|
||||||
sched->hang_limit = hang_limit;
|
sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq;
|
||||||
sched->score = score ? score : &sched->_score;
|
sched->score = args->score ? args->score : &sched->_score;
|
||||||
sched->dev = dev;
|
sched->dev = args->dev;
|
||||||
|
|
||||||
if (num_rqs > DRM_SCHED_PRIORITY_COUNT) {
|
if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
|
||||||
/* This is a gross violation--tell drivers what the problem is.
|
/* This is a gross violation--tell drivers what the problem is.
|
||||||
*/
|
*/
|
||||||
drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
|
drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
|
||||||
@ -1292,16 +1276,16 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (submit_wq) {
|
if (args->submit_wq) {
|
||||||
sched->submit_wq = submit_wq;
|
sched->submit_wq = args->submit_wq;
|
||||||
sched->own_submit_wq = false;
|
sched->own_submit_wq = false;
|
||||||
} else {
|
} else {
|
||||||
#ifdef CONFIG_LOCKDEP
|
#ifdef CONFIG_LOCKDEP
|
||||||
sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name,
|
sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name,
|
||||||
WQ_MEM_RECLAIM,
|
WQ_MEM_RECLAIM,
|
||||||
&drm_sched_lockdep_map);
|
&drm_sched_lockdep_map);
|
||||||
#else
|
#else
|
||||||
sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
|
sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM);
|
||||||
#endif
|
#endif
|
||||||
if (!sched->submit_wq)
|
if (!sched->submit_wq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -1309,11 +1293,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
|
|||||||
sched->own_submit_wq = true;
|
sched->own_submit_wq = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
|
sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq),
|
||||||
GFP_KERNEL | __GFP_ZERO);
|
GFP_KERNEL | __GFP_ZERO);
|
||||||
if (!sched->sched_rq)
|
if (!sched->sched_rq)
|
||||||
goto Out_check_own;
|
goto Out_check_own;
|
||||||
sched->num_rqs = num_rqs;
|
sched->num_rqs = args->num_rqs;
|
||||||
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
|
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
|
||||||
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
|
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
|
||||||
if (!sched->sched_rq[i])
|
if (!sched->sched_rq[i])
|
||||||
|
@ -820,67 +820,54 @@ static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
|
|||||||
.free_job = v3d_cpu_job_free
|
.free_job = v3d_cpu_job_free
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int
|
||||||
|
v3d_queue_sched_init(struct v3d_dev *v3d, const struct drm_sched_backend_ops *ops,
|
||||||
|
enum v3d_queue queue, const char *name)
|
||||||
|
{
|
||||||
|
struct drm_sched_init_args args = {
|
||||||
|
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
|
||||||
|
.credit_limit = 1,
|
||||||
|
.timeout = msecs_to_jiffies(500),
|
||||||
|
.dev = v3d->drm.dev,
|
||||||
|
};
|
||||||
|
|
||||||
|
args.ops = ops;
|
||||||
|
args.name = name;
|
||||||
|
|
||||||
|
return drm_sched_init(&v3d->queue[queue].sched, &args);
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
v3d_sched_init(struct v3d_dev *v3d)
|
v3d_sched_init(struct v3d_dev *v3d)
|
||||||
{
|
{
|
||||||
int hw_jobs_limit = 1;
|
|
||||||
int job_hang_limit = 0;
|
|
||||||
int hang_limit_ms = 500;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
|
ret = v3d_queue_sched_init(v3d, &v3d_bin_sched_ops, V3D_BIN, "v3d_bin");
|
||||||
&v3d_bin_sched_ops, NULL,
|
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
|
||||||
hw_jobs_limit, job_hang_limit,
|
|
||||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
|
||||||
NULL, "v3d_bin", v3d->drm.dev);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
|
ret = v3d_queue_sched_init(v3d, &v3d_render_sched_ops, V3D_RENDER,
|
||||||
&v3d_render_sched_ops, NULL,
|
"v3d_render");
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
|
||||||
hw_jobs_limit, job_hang_limit,
|
|
||||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
|
||||||
NULL, "v3d_render", v3d->drm.dev);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
|
ret = v3d_queue_sched_init(v3d, &v3d_tfu_sched_ops, V3D_TFU, "v3d_tfu");
|
||||||
&v3d_tfu_sched_ops, NULL,
|
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
|
||||||
hw_jobs_limit, job_hang_limit,
|
|
||||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
|
||||||
NULL, "v3d_tfu", v3d->drm.dev);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (v3d_has_csd(v3d)) {
|
if (v3d_has_csd(v3d)) {
|
||||||
ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
|
ret = v3d_queue_sched_init(v3d, &v3d_csd_sched_ops, V3D_CSD,
|
||||||
&v3d_csd_sched_ops, NULL,
|
"v3d_csd");
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
|
||||||
hw_jobs_limit, job_hang_limit,
|
|
||||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
|
||||||
NULL, "v3d_csd", v3d->drm.dev);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
|
ret = v3d_queue_sched_init(v3d, &v3d_cache_clean_sched_ops,
|
||||||
&v3d_cache_clean_sched_ops, NULL,
|
V3D_CACHE_CLEAN, "v3d_cache_clean");
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
|
||||||
hw_jobs_limit, job_hang_limit,
|
|
||||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
|
||||||
NULL, "v3d_cache_clean", v3d->drm.dev);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = drm_sched_init(&v3d->queue[V3D_CPU].sched,
|
ret = v3d_queue_sched_init(v3d, &v3d_cpu_sched_ops, V3D_CPU, "v3d_cpu");
|
||||||
&v3d_cpu_sched_ops, NULL,
|
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
|
||||||
1, job_hang_limit,
|
|
||||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
|
||||||
NULL, "v3d_cpu", v3d->drm.dev);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@ -336,6 +336,15 @@ static const struct drm_sched_backend_ops drm_sched_ops = {
|
|||||||
static int execlist_exec_queue_init(struct xe_exec_queue *q)
|
static int execlist_exec_queue_init(struct xe_exec_queue *q)
|
||||||
{
|
{
|
||||||
struct drm_gpu_scheduler *sched;
|
struct drm_gpu_scheduler *sched;
|
||||||
|
const struct drm_sched_init_args args = {
|
||||||
|
.ops = &drm_sched_ops,
|
||||||
|
.num_rqs = 1,
|
||||||
|
.credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
|
||||||
|
.hang_limit = XE_SCHED_HANG_LIMIT,
|
||||||
|
.timeout = XE_SCHED_JOB_TIMEOUT,
|
||||||
|
.name = q->hwe->name,
|
||||||
|
.dev = gt_to_xe(q->gt)->drm.dev,
|
||||||
|
};
|
||||||
struct xe_execlist_exec_queue *exl;
|
struct xe_execlist_exec_queue *exl;
|
||||||
struct xe_device *xe = gt_to_xe(q->gt);
|
struct xe_device *xe = gt_to_xe(q->gt);
|
||||||
int err;
|
int err;
|
||||||
@ -350,11 +359,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
|
|||||||
|
|
||||||
exl->q = q;
|
exl->q = q;
|
||||||
|
|
||||||
err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL, 1,
|
err = drm_sched_init(&exl->sched, &args);
|
||||||
q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
|
|
||||||
XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
|
|
||||||
NULL, NULL, q->hwe->name,
|
|
||||||
gt_to_xe(q->gt)->drm.dev);
|
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free;
|
goto err_free;
|
||||||
|
|
||||||
|
@ -63,13 +63,24 @@ int xe_sched_init(struct xe_gpu_scheduler *sched,
|
|||||||
atomic_t *score, const char *name,
|
atomic_t *score, const char *name,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
|
const struct drm_sched_init_args args = {
|
||||||
|
.ops = ops,
|
||||||
|
.submit_wq = submit_wq,
|
||||||
|
.num_rqs = 1,
|
||||||
|
.credit_limit = hw_submission,
|
||||||
|
.hang_limit = hang_limit,
|
||||||
|
.timeout = timeout,
|
||||||
|
.timeout_wq = timeout_wq,
|
||||||
|
.score = score,
|
||||||
|
.name = name,
|
||||||
|
.dev = dev,
|
||||||
|
};
|
||||||
|
|
||||||
sched->ops = xe_ops;
|
sched->ops = xe_ops;
|
||||||
INIT_LIST_HEAD(&sched->msgs);
|
INIT_LIST_HEAD(&sched->msgs);
|
||||||
INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
|
INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
|
||||||
|
|
||||||
return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
|
return drm_sched_init(&sched->base, &args);
|
||||||
hang_limit, timeout, timeout_wq, score, name,
|
|
||||||
dev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void xe_sched_fini(struct xe_gpu_scheduler *sched)
|
void xe_sched_fini(struct xe_gpu_scheduler *sched)
|
||||||
|
@ -540,12 +540,38 @@ struct drm_gpu_scheduler {
|
|||||||
struct device *dev;
|
struct device *dev;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler
|
||||||
|
*
|
||||||
|
* @ops: backend operations provided by the driver
|
||||||
|
* @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
|
||||||
|
* allocated and used.
|
||||||
|
* @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT,
|
||||||
|
* as there's usually one run-queue per priority, but may be less.
|
||||||
|
* @credit_limit: the number of credits this scheduler can hold from all jobs
|
||||||
|
* @hang_limit: number of times to allow a job to hang before dropping it.
|
||||||
|
* This mechanism is DEPRECATED. Set it to 0.
|
||||||
|
* @timeout: timeout value in jiffies for submitted jobs.
|
||||||
|
* @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used.
|
||||||
|
* @score: score atomic shared with other schedulers. May be NULL.
|
||||||
|
* @name: name (typically the driver's name). Used for debugging
|
||||||
|
* @dev: associated device. Used for debugging
|
||||||
|
*/
|
||||||
|
struct drm_sched_init_args {
|
||||||
|
const struct drm_sched_backend_ops *ops;
|
||||||
|
struct workqueue_struct *submit_wq;
|
||||||
|
struct workqueue_struct *timeout_wq;
|
||||||
|
u32 num_rqs;
|
||||||
|
u32 credit_limit;
|
||||||
|
unsigned int hang_limit;
|
||||||
|
long timeout;
|
||||||
|
atomic_t *score;
|
||||||
|
const char *name;
|
||||||
|
struct device *dev;
|
||||||
|
};
|
||||||
|
|
||||||
int drm_sched_init(struct drm_gpu_scheduler *sched,
|
int drm_sched_init(struct drm_gpu_scheduler *sched,
|
||||||
const struct drm_sched_backend_ops *ops,
|
const struct drm_sched_init_args *args);
|
||||||
struct workqueue_struct *submit_wq,
|
|
||||||
u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
|
|
||||||
long timeout, struct workqueue_struct *timeout_wq,
|
|
||||||
atomic_t *score, const char *name, struct device *dev);
|
|
||||||
|
|
||||||
void drm_sched_fini(struct drm_gpu_scheduler *sched);
|
void drm_sched_fini(struct drm_gpu_scheduler *sched);
|
||||||
int drm_sched_job_init(struct drm_sched_job *job,
|
int drm_sched_job_init(struct drm_sched_job *job,
|
||||||
|
Loading…
Reference in New Issue
Block a user