mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-09-01 09:36:29 +00:00

If userptr pages are freed after a call to the xe mmu notifier, the device will not be blocked out from theoretically accessing these pages unless they are also unmapped from the iommu, and this violates some aspects of the iommu-imposed security. Ensure that userptrs are unmapped in the mmu notifier to mitigate this. A naive attempt would try to free the sg table, but the sg table itself may be accessed by a concurrent bind operation, so settle for only unmapping. v3: - Update lockdep asserts. - Fix a typo (Matthew Auld) Fixes:81e058a3e7
("drm/xe: Introduce helper to populate userptr") Cc: Oak Zeng <oak.zeng@intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: <stable@vger.kernel.org> # v6.10+ Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Acked-by: Matthew Brost <matthew.brost@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250304173342.22009-4-thomas.hellstrom@linux.intel.com (cherry picked from commitba767b9d01
) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
19 lines
379 B
C
19 lines
379 B
C
/* SPDX-License-Identifier: MIT
|
|
*
|
|
* Copyright © 2024 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_HMM_H_
|
|
#define _XE_HMM_H_
|
|
|
|
#include <linux/types.h>
|
|
|
|
struct xe_userptr_vma;
|
|
|
|
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
|
|
|
|
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
|
|
|
|
void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
|
|
#endif
|