mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-08-27 10:34:13 +00:00

Add SVM range invalidation vfunc which invalidates PTEs. A new PT layer function which accepts a SVM range is added to support this. In addition, add the basic page fault handler which allocates a SVM range which is used by SVM range invalidation vfunc. v2: - Don't run invalidation if VM is closed - Cycle notifier lock in xe_svm_close - Drop xe_gt_tlb_invalidation_fence_fini v3: - Better commit message (Thomas) - Add lockdep asserts (Thomas) - Add kernel doc (Thomas) - s/change/changed (Thomas) - Use new GPU SVM range / notifier structures - Ensure PTEs are zapped / dma mappings are unmapped on VM close (Thomas) v4: - Fix macro (Checkpatch) v5: - Use range start/end helpers (Thomas) - Use notifier start/end helpers (Thomas) v6: - Use min/max helpers (Himal) - Only compile if CONFIG_DRM_GPUSVM selected (CI, Lucas) Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250306012657.3505757-13-matthew.brost@intel.com
52 lines
1.4 KiB
C
52 lines
1.4 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2022 Intel Corporation
|
|
*/
|
|
#ifndef _XE_PT_H_
|
|
#define _XE_PT_H_
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include "xe_pt_types.h"
|
|
|
|
struct dma_fence;
|
|
struct xe_bo;
|
|
struct xe_device;
|
|
struct xe_exec_queue;
|
|
struct xe_svm_range;
|
|
struct xe_sync_entry;
|
|
struct xe_tile;
|
|
struct xe_vm;
|
|
struct xe_vma;
|
|
struct xe_vma_ops;
|
|
|
|
/* Largest huge pte is currently 1GiB. May become device dependent. */
|
|
#define MAX_HUGEPTE_LEVEL 2
|
|
|
|
#define xe_pt_write(xe, map, idx, data) \
|
|
xe_map_wr(xe, map, (idx) * sizeof(u64), u64, data)
|
|
|
|
unsigned int xe_pt_shift(unsigned int level);
|
|
|
|
struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
|
|
unsigned int level);
|
|
|
|
void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
|
|
struct xe_pt *pt);
|
|
|
|
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
|
|
|
|
void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt);
|
|
|
|
int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
|
|
struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
|
|
struct xe_vma_ops *vops);
|
|
void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops);
|
|
void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops);
|
|
|
|
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
|
|
bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
|
|
struct xe_svm_range *range);
|
|
|
|
#endif
|