mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-08-23 09:46:12 +00:00

To prevent looping infinitely in MMU event handler we stop generating new events by removing 'R' (record) bit from context descriptor, but to ensure this change has effect KMD has to perform configuration invalidation followed by sync command. Because of that move parts of the interrupt handler that can take longer to a thread not to block in interrupt handler for too long. This includes: * disabling event queue for the time KMD updates MMU event queue consumer to ensure proper synchronization between MMU and KMD * removal of 'R' (record) bit from context descriptor to ensure no more faults are recorded until that context is destroyed Signed-off-by: Karol Wachowski <karol.wachowski@intel.com> Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com> Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250107173238.381120-8-maciej.falkowski@linux.intel.com
54 lines
1.3 KiB
C
54 lines
1.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2020-2023 Intel Corporation
|
|
*/
|
|
|
|
#ifndef __IVPU_MMU_H__
|
|
#define __IVPU_MMU_H__
|
|
|
|
struct ivpu_device;
|
|
|
|
struct ivpu_mmu_cdtab {
|
|
void *base;
|
|
dma_addr_t dma;
|
|
};
|
|
|
|
struct ivpu_mmu_strtab {
|
|
void *base;
|
|
dma_addr_t dma;
|
|
u64 dma_q;
|
|
u32 base_cfg;
|
|
};
|
|
|
|
struct ivpu_mmu_queue {
|
|
void *base;
|
|
dma_addr_t dma;
|
|
u64 dma_q;
|
|
u32 prod;
|
|
u32 cons;
|
|
};
|
|
|
|
struct ivpu_mmu_info {
|
|
struct mutex lock; /* Protects cdtab, strtab, cmdq, on */
|
|
struct ivpu_mmu_cdtab cdtab;
|
|
struct ivpu_mmu_strtab strtab;
|
|
struct ivpu_mmu_queue cmdq;
|
|
struct ivpu_mmu_queue evtq;
|
|
bool on;
|
|
};
|
|
|
|
int ivpu_mmu_init(struct ivpu_device *vdev);
|
|
void ivpu_mmu_disable(struct ivpu_device *vdev);
|
|
int ivpu_mmu_enable(struct ivpu_device *vdev);
|
|
int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
|
|
void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid);
|
|
int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
|
|
|
|
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
|
|
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
|
|
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
|
|
void ivpu_mmu_discard_events(struct ivpu_device *vdev);
|
|
int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid);
|
|
|
|
#endif /* __IVPU_MMU_H__ */
|