drm/xe/pxp/uapi: Add userspace and LRC support for PXP-using queues

Userspace is required to mark a queue as using PXP to guarantee that the
PXP instructions will work. In addition to managing the PXP sessions,
when a PXP queue is created the driver will set the relevant bits in
its context control register.

On submission of a valid PXP queue, the driver will validate all
encrypted objects mapped to the VM to ensured they were encrypted with
the current key.

v2: Remove pxp_types include outside of PXP code (Jani), better comments
and code cleanup (John)

v3: split the internal PXP management to a separate patch for ease of
review. re-order ioctl checks to always return -EINVAL if parameters are
invalid, rebase on msix changes.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250129174140.948829-9-daniele.ceraolospurio@intel.com
This commit is contained in:
Daniele Ceraolo Spurio 2025-01-29 09:41:32 -08:00
parent f8caa80154
commit 72d479601d
10 changed files with 153 additions and 14 deletions

View File

@ -132,6 +132,7 @@
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4) #define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED) #define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_PXP_ENABLE REG_BIT(10)
#define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8) #define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
#define CTX_CTRL_RUN_ALONE REG_BIT(7) #define CTX_CTRL_RUN_ALONE REG_BIT(7)
#define CTX_CTRL_INDIRECT_RING_STATE_ENABLE REG_BIT(4) #define CTX_CTRL_INDIRECT_RING_STATE_ENABLE REG_BIT(4)

View File

@ -25,6 +25,7 @@
#include "xe_ring_ops_types.h" #include "xe_ring_ops_types.h"
#include "xe_trace.h" #include "xe_trace.h"
#include "xe_vm.h" #include "xe_vm.h"
#include "xe_pxp.h"
enum xe_exec_queue_sched_prop { enum xe_exec_queue_sched_prop {
XE_EXEC_QUEUE_JOB_TIMEOUT = 0, XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
@ -38,6 +39,8 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
static void __xe_exec_queue_free(struct xe_exec_queue *q) static void __xe_exec_queue_free(struct xe_exec_queue *q)
{ {
if (xe_exec_queue_uses_pxp(q))
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
if (q->vm) if (q->vm)
xe_vm_put(q->vm); xe_vm_put(q->vm);
@ -113,6 +116,21 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
{ {
struct xe_vm *vm = q->vm; struct xe_vm *vm = q->vm;
int i, err; int i, err;
u32 flags = 0;
/*
* PXP workloads executing on RCS or CCS must run in isolation (i.e. no
* other workload can use the EUs at the same time). On MTL this is done
* by setting the RUNALONE bit in the LRC, while starting on Xe2 there
* is a dedicated bit for it.
*/
if (xe_exec_queue_uses_pxp(q) &&
(q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
flags |= XE_LRC_CREATE_PXP;
else
flags |= XE_LRC_CREATE_RUNALONE;
}
if (vm) { if (vm) {
err = xe_vm_lock(vm, true); err = xe_vm_lock(vm, true);
@ -121,7 +139,7 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
} }
for (i = 0; i < q->width; ++i) { for (i = 0; i < q->width; ++i) {
q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec); q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
if (IS_ERR(q->lrc[i])) { if (IS_ERR(q->lrc[i])) {
err = PTR_ERR(q->lrc[i]); err = PTR_ERR(q->lrc[i]);
goto err_unlock; goto err_unlock;
@ -166,6 +184,19 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (err) if (err)
goto err_post_alloc; goto err_post_alloc;
/*
* We can only add the queue to the PXP list after the init is complete,
* because the PXP termination can call exec_queue_kill and that will
* go bad if the queue is only half-initialized. This means that we
* can't do it when we handle the PXP extension in __xe_exec_queue_alloc
* and we need to do it here instead.
*/
if (xe_exec_queue_uses_pxp(q)) {
err = xe_pxp_exec_queue_add(xe->pxp, q);
if (err)
goto err_post_alloc;
}
return q; return q;
err_post_alloc: err_post_alloc:
@ -254,6 +285,9 @@ void xe_exec_queue_destroy(struct kref *ref)
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
struct xe_exec_queue *eq, *next; struct xe_exec_queue *eq, *next;
if (xe_exec_queue_uses_pxp(q))
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
xe_exec_queue_last_fence_put_unlocked(q); xe_exec_queue_last_fence_put_unlocked(q);
if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
list_for_each_entry_safe(eq, next, &q->multi_gt_list, list_for_each_entry_safe(eq, next, &q->multi_gt_list,
@ -409,6 +443,22 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
return 0; return 0;
} }
static int
exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
{
if (value == DRM_XE_PXP_TYPE_NONE)
return 0;
/* we only support HWDRM sessions right now */
if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
return -EINVAL;
if (!xe_pxp_is_enabled(xe->pxp))
return -ENODEV;
return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
}
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe, typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q, struct xe_exec_queue *q,
u64 value); u64 value);
@ -416,6 +466,7 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = { static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
}; };
static int exec_queue_user_ext_set_property(struct xe_device *xe, static int exec_queue_user_ext_set_property(struct xe_device *xe,
@ -435,7 +486,8 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
ARRAY_SIZE(exec_queue_set_property_funcs)) || ARRAY_SIZE(exec_queue_set_property_funcs)) ||
XE_IOCTL_DBG(xe, ext.pad) || XE_IOCTL_DBG(xe, ext.pad) ||
XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE)) ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE))
return -EINVAL; return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs)); idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));

View File

@ -57,6 +57,11 @@ static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
return q->width > 1; return q->width > 1;
} }
static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
{
return q->pxp.type;
}
bool xe_exec_queue_is_lr(struct xe_exec_queue *q); bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
bool xe_exec_queue_ring_full(struct xe_exec_queue *q); bool xe_exec_queue_ring_full(struct xe_exec_queue *q);

View File

@ -132,6 +132,8 @@ struct xe_exec_queue {
/** @pxp: PXP info tracking */ /** @pxp: PXP info tracking */
struct { struct {
/** @pxp.type: PXP session type used by this queue */
u8 type;
/** @pxp.link: link into the list of PXP exec queues */ /** @pxp.link: link into the list of PXP exec queues */
struct list_head link; struct list_head link;
} pxp; } pxp;

View File

@ -269,7 +269,7 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
port->hwe = hwe; port->hwe = hwe;
port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX); port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX, 0);
if (IS_ERR(port->lrc)) { if (IS_ERR(port->lrc)) {
err = PTR_ERR(port->lrc); err = PTR_ERR(port->lrc);
goto err; goto err;

View File

@ -883,7 +883,8 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1) #define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_vm *vm, u32 ring_size, u16 msix_vec) struct xe_vm *vm, u32 ring_size, u16 msix_vec,
u32 init_flags)
{ {
struct xe_gt *gt = hwe->gt; struct xe_gt *gt = hwe->gt;
struct xe_tile *tile = gt_to_tile(gt); struct xe_tile *tile = gt_to_tile(gt);
@ -979,6 +980,16 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
RING_CTL_SIZE(lrc->ring.size) | RING_VALID); RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
} }
if (init_flags & XE_LRC_CREATE_RUNALONE)
xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
_MASKED_BIT_ENABLE(CTX_CTRL_RUN_ALONE));
if (init_flags & XE_LRC_CREATE_PXP)
xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
_MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0); xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
if (xe->info.has_asid && vm) if (xe->info.has_asid && vm)
@ -1021,6 +1032,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
* @vm: The VM (address space) * @vm: The VM (address space)
* @ring_size: LRC ring size * @ring_size: LRC ring size
* @msix_vec: MSI-X interrupt vector (for platforms that support it) * @msix_vec: MSI-X interrupt vector (for platforms that support it)
* @flags: LRC initialization flags
* *
* Allocate and initialize the Logical Ring Context (LRC). * Allocate and initialize the Logical Ring Context (LRC).
* *
@ -1028,7 +1040,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
* upon failure. * upon failure.
*/ */
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm, struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
u32 ring_size, u16 msix_vec) u32 ring_size, u16 msix_vec, u32 flags)
{ {
struct xe_lrc *lrc; struct xe_lrc *lrc;
int err; int err;
@ -1037,7 +1049,7 @@ struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
if (!lrc) if (!lrc)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec); err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec, flags);
if (err) { if (err) {
kfree(lrc); kfree(lrc);
return ERR_PTR(err); return ERR_PTR(err);

View File

@ -42,8 +42,10 @@ struct xe_lrc_snapshot {
#define LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR (0x34 * 4) #define LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR (0x34 * 4)
#define LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR (0x40 * 4) #define LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR (0x40 * 4)
#define XE_LRC_CREATE_RUNALONE 0x1
#define XE_LRC_CREATE_PXP 0x2
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm, struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
u32 ring_size, u16 msix_vec); u32 ring_size, u16 msix_vec, u32 flags);
void xe_lrc_destroy(struct kref *ref); void xe_lrc_destroy(struct kref *ref);
/** /**

View File

@ -6,6 +6,7 @@
#include "xe_pxp.h" #include "xe_pxp.h"
#include <drm/drm_managed.h> #include <drm/drm_managed.h>
#include <uapi/drm/xe_drm.h>
#include "xe_device_types.h" #include "xe_device_types.h"
#include "xe_exec_queue.h" #include "xe_exec_queue.h"
@ -47,7 +48,7 @@ bool xe_pxp_is_supported(const struct xe_device *xe)
return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY); return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY);
} }
static bool pxp_is_enabled(const struct xe_pxp *pxp) bool xe_pxp_is_enabled(const struct xe_pxp *pxp)
{ {
return pxp; return pxp;
} }
@ -249,7 +250,7 @@ void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
{ {
struct xe_pxp *pxp = xe->pxp; struct xe_pxp *pxp = xe->pxp;
if (!pxp_is_enabled(pxp)) { if (!xe_pxp_is_enabled(pxp)) {
drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir); drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir);
return; return;
} }
@ -424,6 +425,27 @@ static int __pxp_start_arb_session(struct xe_pxp *pxp)
return ret; return ret;
} }
/**
* xe_pxp_exec_queue_set_type - Mark a queue as using PXP
* @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
* @q: the queue to mark as using PXP
* @type: the type of PXP session this queue will use
*
* Returns 0 if the selected PXP type is supported, -ENODEV otherwise.
*/
int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type)
{
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
/* we only support HWDRM sessions right now */
xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
q->pxp.type = type;
return 0;
}
static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q) static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
{ {
spin_lock_irq(&pxp->queues.lock); spin_lock_irq(&pxp->queues.lock);
@ -449,9 +471,12 @@ int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
{ {
int ret = 0; int ret = 0;
if (!pxp_is_enabled(pxp)) if (!xe_pxp_is_enabled(pxp))
return -ENODEV; return -ENODEV;
/* we only support HWDRM sessions right now */
xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM);
/* /*
* Runtime suspend kills PXP, so we take a reference to prevent it from * Runtime suspend kills PXP, so we take a reference to prevent it from
* happening while we have active queues that use PXP * happening while we have active queues that use PXP
@ -589,7 +614,7 @@ void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
{ {
bool need_pm_put = false; bool need_pm_put = false;
if (!pxp_is_enabled(pxp)) if (!xe_pxp_is_enabled(pxp))
return; return;
spin_lock_irq(&pxp->queues.lock); spin_lock_irq(&pxp->queues.lock);
@ -599,6 +624,8 @@ void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
need_pm_put = true; need_pm_put = true;
} }
q->pxp.type = DRM_XE_PXP_TYPE_NONE;
spin_unlock_irq(&pxp->queues.lock); spin_unlock_irq(&pxp->queues.lock);
if (need_pm_put) if (need_pm_put)

View File

@ -12,13 +12,13 @@ struct xe_device;
struct xe_exec_queue; struct xe_exec_queue;
struct xe_pxp; struct xe_pxp;
#define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xF /* TODO: move to uapi */
bool xe_pxp_is_supported(const struct xe_device *xe); bool xe_pxp_is_supported(const struct xe_device *xe);
bool xe_pxp_is_enabled(const struct xe_pxp *pxp);
int xe_pxp_init(struct xe_device *xe); int xe_pxp_init(struct xe_device *xe);
void xe_pxp_irq_handler(struct xe_device *xe, u16 iir); void xe_pxp_irq_handler(struct xe_device *xe, u16 iir);
int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type);
int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q); int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q);
void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q); void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q);

View File

@ -1114,6 +1114,24 @@ struct drm_xe_vm_bind {
/** /**
* struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
* *
* This ioctl supports setting the following properties via the
* %DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY extension, which uses the
* generic @drm_xe_ext_set_property struct:
*
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority.
* CAP_SYS_NICE is required to set a value above normal.
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice
* duration in microseconds.
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
* this queue will be used with. Valid values are listed in enum
* drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so
* there is no need to explicitly set that. When a queue of type
* %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session
* (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running.
* Given that going into a power-saving state kills PXP HWDRM sessions,
* runtime PM will be blocked while queues of this type are alive.
* All PXP queues will be killed if a PXP invalidation event occurs.
*
* The example below shows how to use @drm_xe_exec_queue_create to create * The example below shows how to use @drm_xe_exec_queue_create to create
* a simple exec_queue (no parallel submission) of class * a simple exec_queue (no parallel submission) of class
* &DRM_XE_ENGINE_CLASS_RENDER. * &DRM_XE_ENGINE_CLASS_RENDER.
@ -1137,7 +1155,7 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2
/** @extensions: Pointer to the first extension struct, if any */ /** @extensions: Pointer to the first extension struct, if any */
__u64 extensions; __u64 extensions;
@ -1756,6 +1774,26 @@ struct drm_xe_oa_stream_info {
__u64 reserved[3]; __u64 reserved[3];
}; };
/**
* enum drm_xe_pxp_session_type - Supported PXP session types.
*
* We currently only support HWDRM sessions, which are used for protected
* content that ends up being displayed, but the HW supports multiple types, so
* we might extend support in the future.
*/
enum drm_xe_pxp_session_type {
/** @DRM_XE_PXP_TYPE_NONE: PXP not used */
DRM_XE_PXP_TYPE_NONE = 0,
/**
* @DRM_XE_PXP_TYPE_HWDRM: HWDRM sessions are used for content that ends
* up on the display.
*/
DRM_XE_PXP_TYPE_HWDRM = 1,
};
/* ID of the protected content session managed by Xe when PXP is active */
#define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf
#if defined(__cplusplus) #if defined(__cplusplus)
} }
#endif #endif