linux/drivers/accel/ivpu/ivpu_mmu_context.h
Karol Wachowski 0240fa18d2 accel/ivpu: Dump only first MMU fault from single context
Stop dumping consecutive faults from an already faulty context immediately,
instead of waiting for the context abort thread handler (IRQ handler bottom
half) to abort currently executing jobs.

Remove 'R' (record events) bit from context descriptor of a faulty
context to prevent future faults generation.

This change speeds up the IRQ handler by eliminating the need to print the
fault content repeatedly. Additionally, it prevents flooding dmesg with
errors, which was occurring due to the delay in the bottom half of the
handler stopping fault-generating jobs.

Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250107173238.381120-7-maciej.falkowski@linux.intel.com
2025-01-09 09:35:44 +01:00

52 lines
1.7 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __IVPU_MMU_CONTEXT_H__
#define __IVPU_MMU_CONTEXT_H__
#include <drm/drm_mm.h>
struct ivpu_device;
struct ivpu_file_priv;
struct ivpu_addr_range;
#define IVPU_MMU_PGTABLE_ENTRIES 512ull
struct ivpu_mmu_pgtable {
u64 ***pte_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
u64 **pmd_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
u64 *pud_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
u64 *pgd_dma_ptr;
dma_addr_t pgd_dma;
};
struct ivpu_mmu_context {
struct mutex lock; /* Protects: mm, pgtable, is_cd_valid */
struct drm_mm mm;
struct ivpu_mmu_pgtable pgtable;
bool is_cd_valid;
u32 id;
};
void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id);
void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
void ivpu_mmu_global_context_init(struct ivpu_device *vdev);
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev);
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev);
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
u64 size, struct drm_mm_node *node);
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt);
int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, size_t size);
#endif /* __IVPU_MMU_CONTEXT_H__ */