mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-01 15:14:52 +00:00

Customer is reporting a really subtle issue where we get random DMAR faults, hangs and other nasties for kernel migration jobs when stressing stuff like s2idle/s3/s4. The explosions seems to happen somewhere after resuming the system with splats looking something like: PM: suspend exit rfkill: input handler disabled xe 0000:00:02.0: [drm] GT0: Engine reset: engine_class=bcs, logical_mask: 0x2, guc_id=0 xe 0000:00:02.0: [drm] GT0: Timedout job: seqno=24496, lrc_seqno=24496, guc_id=0, flags=0x13 in no process [-1] xe 0000:00:02.0: [drm] GT0: Kernel-submitted job timed out The likely cause appears to be a race between suspend cancelling the worker that processes the free_job()'s, such that we still have pending jobs to be freed after the cancel. Following from this, on resume the pending_list will now contain at least one already complete job, but it looks like we call drm_sched_resubmit_jobs(), which will then call run_job() on everything still on the pending_list. But if the job was already complete, then all the resources tied to the job, like the bb itself, any memory that is being accessed, the iommu mappings etc. might be long gone since those are usually tied to the fence signalling. This scenario can be seen in ftrace when running a slightly modified xe_pm IGT (kernel was only modified to inject artificial latency into free_job to make the race easier to hit): xe_sched_job_run: dev=0000:00:02.0, fence=0xffff888276cc8540, seqno=0, lrc_seqno=0, gt=0, guc_id=0, batch_addr=0x000000146910 ... xe_exec_queue_stop: dev=0000:00:02.0, 3:0x2, gt=0, width=1, guc_id=0, guc_state=0x0, flags=0x13 xe_exec_queue_stop: dev=0000:00:02.0, 3:0x2, gt=0, width=1, guc_id=1, guc_state=0x0, flags=0x4 xe_exec_queue_stop: dev=0000:00:02.0, 4:0x1, gt=1, width=1, guc_id=0, guc_state=0x0, flags=0x3 xe_exec_queue_stop: dev=0000:00:02.0, 1:0x1, gt=1, width=1, guc_id=1, guc_state=0x0, flags=0x3 xe_exec_queue_stop: dev=0000:00:02.0, 4:0x1, gt=1, width=1, guc_id=2, guc_state=0x0, flags=0x3 xe_exec_queue_resubmit: dev=0000:00:02.0, 3:0x2, gt=0, width=1, guc_id=0, guc_state=0x0, flags=0x13 xe_sched_job_run: dev=0000:00:02.0, fence=0xffff888276cc8540, seqno=0, lrc_seqno=0, gt=0, guc_id=0, batch_addr=0x000000146910 ... ..... xe_exec_queue_memory_cat_error: dev=0000:00:02.0, 3:0x2, gt=0, width=1, guc_id=0, guc_state=0x3, flags=0x13 So the job_run() is clearly triggered twice for the same job, even though the first must have already signalled to completion during suspend. We can also see a CAT error after the re-submit. To prevent this only resubmit jobs on the pending_list that have not yet signalled. v2: - Make sure to re-arm the fence callbacks with sched_start(). v3 (Matt B): - Stop using drm_sched_resubmit_jobs(), which appears to be deprecated and just open-code a simple loop such that we skip calling run_job() on anything already signalled. Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4856 Fixes:dd08ebf6c3
("drm/xe: Introduce a new DRM driver for Intel GPUs") Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: William Tseng <william.tseng@intel.com> Cc: <stable@vger.kernel.org> # v6.8+ Reviewed-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Tejas Upadhyay <tejas.upadhyay@intel.com> Link: https://lore.kernel.org/r/20250528113328.289392-2-matthew.auld@intel.com (cherry picked from commit38fafa9f39
) Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
104 lines
2.8 KiB
C
104 lines
2.8 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2023 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_GPU_SCHEDULER_H_
|
|
#define _XE_GPU_SCHEDULER_H_
|
|
|
|
#include "xe_gpu_scheduler_types.h"
|
|
#include "xe_sched_job_types.h"
|
|
|
|
int xe_sched_init(struct xe_gpu_scheduler *sched,
|
|
const struct drm_sched_backend_ops *ops,
|
|
const struct xe_sched_backend_ops *xe_ops,
|
|
struct workqueue_struct *submit_wq,
|
|
uint32_t hw_submission, unsigned hang_limit,
|
|
long timeout, struct workqueue_struct *timeout_wq,
|
|
atomic_t *score, const char *name,
|
|
struct device *dev);
|
|
void xe_sched_fini(struct xe_gpu_scheduler *sched);
|
|
|
|
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
|
|
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
|
|
|
|
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
|
|
|
|
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
|
|
struct xe_sched_msg *msg);
|
|
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
|
|
struct xe_sched_msg *msg);
|
|
|
|
static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
|
|
{
|
|
spin_lock(&sched->base.job_list_lock);
|
|
}
|
|
|
|
static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
|
|
{
|
|
spin_unlock(&sched->base.job_list_lock);
|
|
}
|
|
|
|
static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
|
|
{
|
|
drm_sched_stop(&sched->base, NULL);
|
|
}
|
|
|
|
static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
|
|
{
|
|
drm_sched_tdr_queue_imm(&sched->base);
|
|
}
|
|
|
|
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
|
|
{
|
|
struct drm_sched_job *s_job;
|
|
|
|
list_for_each_entry(s_job, &sched->base.pending_list, list) {
|
|
struct drm_sched_fence *s_fence = s_job->s_fence;
|
|
struct dma_fence *hw_fence = s_fence->parent;
|
|
|
|
if (hw_fence && !dma_fence_is_signaled(hw_fence))
|
|
sched->base.ops->run_job(s_job);
|
|
}
|
|
}
|
|
|
|
static inline bool
|
|
xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
|
|
{
|
|
return drm_sched_invalidate_job(&job->drm, threshold);
|
|
}
|
|
|
|
static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
|
|
struct xe_sched_job *job)
|
|
{
|
|
spin_lock(&sched->base.job_list_lock);
|
|
list_add(&job->drm.list, &sched->base.pending_list);
|
|
spin_unlock(&sched->base.job_list_lock);
|
|
}
|
|
|
|
static inline
|
|
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
|
|
{
|
|
struct xe_sched_job *job;
|
|
|
|
spin_lock(&sched->base.job_list_lock);
|
|
job = list_first_entry_or_null(&sched->base.pending_list,
|
|
struct xe_sched_job, drm.list);
|
|
spin_unlock(&sched->base.job_list_lock);
|
|
|
|
return job;
|
|
}
|
|
|
|
static inline int
|
|
xe_sched_entity_init(struct xe_sched_entity *entity,
|
|
struct xe_gpu_scheduler *sched)
|
|
{
|
|
return drm_sched_entity_init(entity, 0,
|
|
(struct drm_gpu_scheduler **)&sched,
|
|
1, NULL);
|
|
}
|
|
|
|
#define xe_sched_entity_fini drm_sched_entity_fini
|
|
|
|
#endif
|