mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-08-28 06:05:05 +00:00
accel/ivpu: Use workqueue for IRQ handling
Convert IRQ bottom half from the thread handler into workqueue. This increases a stability in rare scenarios where driver on debugging/hardening kernels processes IRQ too slow and misses some interrupts due to it. Workqueue handler also gives a very minor performance increase. Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com> Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250107173238.381120-6-maciej.falkowski@linux.intel.com
This commit is contained in:
parent
7bfc9fa995
commit
bc3e5f48b7
@ -7,6 +7,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
|
#include <linux/workqueue.h>
|
||||||
#include <generated/utsrelease.h>
|
#include <generated/utsrelease.h>
|
||||||
|
|
||||||
#include <drm/drm_accel.h>
|
#include <drm/drm_accel.h>
|
||||||
@ -421,6 +422,9 @@ void ivpu_prepare_for_reset(struct ivpu_device *vdev)
|
|||||||
{
|
{
|
||||||
ivpu_hw_irq_disable(vdev);
|
ivpu_hw_irq_disable(vdev);
|
||||||
disable_irq(vdev->irq);
|
disable_irq(vdev->irq);
|
||||||
|
cancel_work_sync(&vdev->irq_ipc_work);
|
||||||
|
cancel_work_sync(&vdev->irq_dct_work);
|
||||||
|
cancel_work_sync(&vdev->context_abort_work);
|
||||||
ivpu_ipc_disable(vdev);
|
ivpu_ipc_disable(vdev);
|
||||||
ivpu_mmu_disable(vdev);
|
ivpu_mmu_disable(vdev);
|
||||||
}
|
}
|
||||||
@ -465,31 +469,6 @@ static const struct drm_driver driver = {
|
|||||||
.major = 1,
|
.major = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
|
|
||||||
{
|
|
||||||
struct ivpu_device *vdev = arg;
|
|
||||||
u8 irq_src;
|
|
||||||
|
|
||||||
if (kfifo_is_empty(&vdev->hw->irq.fifo))
|
|
||||||
return IRQ_NONE;
|
|
||||||
|
|
||||||
while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) {
|
|
||||||
switch (irq_src) {
|
|
||||||
case IVPU_HW_IRQ_SRC_IPC:
|
|
||||||
ivpu_ipc_irq_thread_handler(vdev);
|
|
||||||
break;
|
|
||||||
case IVPU_HW_IRQ_SRC_DCT:
|
|
||||||
ivpu_pm_dct_irq_thread_handler(vdev);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ivpu_irq_init(struct ivpu_device *vdev)
|
static int ivpu_irq_init(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
|
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
|
||||||
@ -501,12 +480,16 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn);
|
||||||
|
INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn);
|
||||||
|
INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn);
|
||||||
|
|
||||||
ivpu_irq_handlers_init(vdev);
|
ivpu_irq_handlers_init(vdev);
|
||||||
|
|
||||||
vdev->irq = pci_irq_vector(pdev, 0);
|
vdev->irq = pci_irq_vector(pdev, 0);
|
||||||
|
|
||||||
ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
|
ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
|
||||||
ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
|
IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
|
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
|
||||||
|
|
||||||
@ -599,8 +582,6 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
|
|||||||
vdev->db_limit.min = IVPU_MIN_DB;
|
vdev->db_limit.min = IVPU_MIN_DB;
|
||||||
vdev->db_limit.max = IVPU_MAX_DB;
|
vdev->db_limit.max = IVPU_MAX_DB;
|
||||||
|
|
||||||
INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_thread_handler);
|
|
||||||
|
|
||||||
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
|
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_xa_destroy;
|
goto err_xa_destroy;
|
||||||
|
@ -137,12 +137,15 @@ struct ivpu_device {
|
|||||||
struct mutex context_list_lock; /* Protects user context addition/removal */
|
struct mutex context_list_lock; /* Protects user context addition/removal */
|
||||||
struct xarray context_xa;
|
struct xarray context_xa;
|
||||||
struct xa_limit context_xa_limit;
|
struct xa_limit context_xa_limit;
|
||||||
struct work_struct context_abort_work;
|
|
||||||
|
|
||||||
struct xarray db_xa;
|
struct xarray db_xa;
|
||||||
struct xa_limit db_limit;
|
struct xa_limit db_limit;
|
||||||
u32 db_next;
|
u32 db_next;
|
||||||
|
|
||||||
|
struct work_struct irq_ipc_work;
|
||||||
|
struct work_struct irq_dct_work;
|
||||||
|
struct work_struct context_abort_work;
|
||||||
|
|
||||||
struct mutex bo_list_lock; /* Protects bo_list */
|
struct mutex bo_list_lock; /* Protects bo_list */
|
||||||
struct list_head bo_list;
|
struct list_head bo_list;
|
||||||
|
|
||||||
|
@ -285,8 +285,6 @@ void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
|
|||||||
|
|
||||||
void ivpu_irq_handlers_init(struct ivpu_device *vdev)
|
void ivpu_irq_handlers_init(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
INIT_KFIFO(vdev->hw->irq.fifo);
|
|
||||||
|
|
||||||
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
|
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
|
||||||
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
|
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
|
||||||
else
|
else
|
||||||
@ -300,7 +298,6 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev)
|
|||||||
|
|
||||||
void ivpu_hw_irq_enable(struct ivpu_device *vdev)
|
void ivpu_hw_irq_enable(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
kfifo_reset(&vdev->hw->irq.fifo);
|
|
||||||
ivpu_hw_ip_irq_enable(vdev);
|
ivpu_hw_ip_irq_enable(vdev);
|
||||||
ivpu_hw_btrs_irq_enable(vdev);
|
ivpu_hw_btrs_irq_enable(vdev);
|
||||||
}
|
}
|
||||||
@ -327,8 +324,6 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
|
|||||||
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
|
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
|
||||||
ivpu_hw_btrs_global_int_enable(vdev);
|
ivpu_hw_btrs_global_int_enable(vdev);
|
||||||
|
|
||||||
if (!kfifo_is_empty(&vdev->hw->irq.fifo))
|
|
||||||
return IRQ_WAKE_THREAD;
|
|
||||||
if (ip_handled || btrs_handled)
|
if (ip_handled || btrs_handled)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
|
@ -6,18 +6,10 @@
|
|||||||
#ifndef __IVPU_HW_H__
|
#ifndef __IVPU_HW_H__
|
||||||
#define __IVPU_HW_H__
|
#define __IVPU_HW_H__
|
||||||
|
|
||||||
#include <linux/kfifo.h>
|
|
||||||
|
|
||||||
#include "ivpu_drv.h"
|
#include "ivpu_drv.h"
|
||||||
#include "ivpu_hw_btrs.h"
|
#include "ivpu_hw_btrs.h"
|
||||||
#include "ivpu_hw_ip.h"
|
#include "ivpu_hw_ip.h"
|
||||||
|
|
||||||
#define IVPU_HW_IRQ_FIFO_LENGTH 1024
|
|
||||||
|
|
||||||
#define IVPU_HW_IRQ_SRC_IPC 1
|
|
||||||
#define IVPU_HW_IRQ_SRC_MMU_EVTQ 2
|
|
||||||
#define IVPU_HW_IRQ_SRC_DCT 3
|
|
||||||
|
|
||||||
struct ivpu_addr_range {
|
struct ivpu_addr_range {
|
||||||
resource_size_t start;
|
resource_size_t start;
|
||||||
resource_size_t end;
|
resource_size_t end;
|
||||||
@ -27,7 +19,6 @@ struct ivpu_hw_info {
|
|||||||
struct {
|
struct {
|
||||||
bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
|
bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
|
||||||
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
|
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
|
||||||
DECLARE_KFIFO(fifo, u8, IVPU_HW_IRQ_FIFO_LENGTH);
|
|
||||||
} irq;
|
} irq;
|
||||||
struct {
|
struct {
|
||||||
struct ivpu_addr_range global;
|
struct ivpu_addr_range global;
|
||||||
|
@ -630,8 +630,7 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
|
|||||||
|
|
||||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
|
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
|
||||||
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
|
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
|
||||||
if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT))
|
queue_work(system_wq, &vdev->irq_dct_work);
|
||||||
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
|
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
|
||||||
|
@ -459,13 +459,12 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!list_empty(&ipc->cb_msg_list))
|
queue_work(system_wq, &vdev->irq_ipc_work);
|
||||||
if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC))
|
|
||||||
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
|
void ivpu_ipc_irq_work_fn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
|
struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_ipc_work);
|
||||||
struct ivpu_ipc_info *ipc = vdev->ipc;
|
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||||
struct ivpu_ipc_rx_msg *rx_msg, *r;
|
struct ivpu_ipc_rx_msg *rx_msg, *r;
|
||||||
struct list_head cb_msg_list;
|
struct list_head cb_msg_list;
|
||||||
|
@ -90,7 +90,7 @@ void ivpu_ipc_disable(struct ivpu_device *vdev);
|
|||||||
void ivpu_ipc_reset(struct ivpu_device *vdev);
|
void ivpu_ipc_reset(struct ivpu_device *vdev);
|
||||||
|
|
||||||
void ivpu_ipc_irq_handler(struct ivpu_device *vdev);
|
void ivpu_ipc_irq_handler(struct ivpu_device *vdev);
|
||||||
void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
|
void ivpu_ipc_irq_work_fn(struct work_struct *work);
|
||||||
|
|
||||||
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
||||||
u32 channel, ivpu_ipc_rx_callback_t callback);
|
u32 channel, ivpu_ipc_rx_callback_t callback);
|
||||||
|
@ -935,7 +935,7 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
|
|||||||
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
|
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ivpu_context_abort_thread_handler(struct work_struct *work)
|
void ivpu_context_abort_work_fn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
|
struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
|
||||||
struct ivpu_file_priv *file_priv;
|
struct ivpu_file_priv *file_priv;
|
||||||
|
@ -72,7 +72,7 @@ void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
|
|||||||
|
|
||||||
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
|
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
|
||||||
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
|
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
|
||||||
void ivpu_context_abort_thread_handler(struct work_struct *work);
|
void ivpu_context_abort_work_fn(struct work_struct *work);
|
||||||
|
|
||||||
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
|
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
|
||||||
|
|
||||||
|
@ -452,8 +452,9 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev)
|
void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
|
struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_dct_work);
|
||||||
bool enable;
|
bool enable;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -45,6 +45,6 @@ void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
|
|||||||
int ivpu_pm_dct_init(struct ivpu_device *vdev);
|
int ivpu_pm_dct_init(struct ivpu_device *vdev);
|
||||||
int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent);
|
int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent);
|
||||||
int ivpu_pm_dct_disable(struct ivpu_device *vdev);
|
int ivpu_pm_dct_disable(struct ivpu_device *vdev);
|
||||||
void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev);
|
void ivpu_pm_irq_dct_work_fn(struct work_struct *work);
|
||||||
|
|
||||||
#endif /* __IVPU_PM_H__ */
|
#endif /* __IVPU_PM_H__ */
|
||||||
|
Loading…
Reference in New Issue
Block a user