linux-loongson/drivers/gpu/drm/msm/msm_mmu.h
Rob Clark e601ea31d6 drm/msm: Support pgtable preallocation
Introduce a mechanism to count the worst case # of pages required in a
VM_BIND op.

Note that previously we would have had to somehow account for
allocations in unmap, when splitting a block.  This behavior was removed
in commit 33729a5fc0 ("iommu/io-pgtable-arm: Remove split on unmap
behavior)"

Signed-off-by: Rob Clark <robdclark@chromium.org>
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Tested-by: Antonino Maniscalco <antomani103@gmail.com>
Reviewed-by: Antonino Maniscalco <antomani103@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/661515/
2025-07-04 17:48:37 -07:00

96 lines
2.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#ifndef __MSM_MMU_H__
#define __MSM_MMU_H__
#include <linux/iommu.h>
struct msm_mmu_prealloc;
struct msm_mmu;
struct msm_gpu;
struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu);
void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
uint64_t iova, size_t len);
int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
size_t off, size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
void (*set_stall)(struct msm_mmu *mmu, bool enable);
};
enum msm_mmu_type {
MSM_MMU_GPUMMU,
MSM_MMU_IOMMU,
MSM_MMU_IOMMU_PAGETABLE,
};
/**
* struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates.
*/
struct msm_mmu_prealloc {
/** @count: Number of pages reserved. */
uint32_t count;
/** @ptr: Index of first unused page in @pages */
uint32_t ptr;
/**
* @pages: Array of pages preallocated for MMU table updates.
*
* After a VM operation, there might be free pages remaining in this
* array (since the amount allocated is a worst-case). These are
* returned to the pt_cache at mmu->prealloc_cleanup().
*/
void **pages;
};
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags, void *data);
void *arg;
enum msm_mmu_type type;
/**
* @prealloc: pre-allocated pages for pgtable
*
* Set while a VM_BIND job is running, serialized under
* msm_gem_vm::mmu_lock.
*/
struct msm_mmu_prealloc *prealloc;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
const struct msm_mmu_funcs *funcs, enum msm_mmu_type type)
{
mmu->dev = dev;
mmu->funcs = funcs;
mmu->type = type;
}
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks);
struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
int (*handler)(void *arg, unsigned long iova, int flags, void *data))
{
mmu->arg = arg;
mmu->handler = handler;
}
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
int *asid);
int msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]);
struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);
#endif /* __MSM_MMU_H__ */