linux/drivers/gpu/drm/xe/xe_map.h
Tomasz Lis cef88d1265 drm/xe/vf: Fixup CTB send buffer messages after migration
During post-migration recovery of a VF, it is necessary to update
GGTT references included in messages which are going to be sent
to GuC. GuC will start consuming messages after VF KMD will inform
it about fixups being done; before that, the VF KMD is expected
to update any H2G messages which are already in send buffer but
were not consumed by GuC.

Only a small subset of messages allowed for VFs have GGTT references
in them. This patch adds the functionality to parse the CTB send
ring buffer and shift addresses contained within.

While fixing the CTB content, ct->lock is not taken. This means
the only barrier taken remains GGTT address lock - which is ok,
because only requests with GGTT addresses matter, but it also means
tail changes can happen during the CTB fixups execution (which may
be ignored as any new messages will not have anything to fix).

The GGTT address locking will be introduced in a future series.

v2: removed storing shift as that's now done in VMA nodes patch;
  macros to inlines; warns to asserts; log messages fixes (Michal)
v3: removed inline keywords, enums for offsets in CTB messages,
  less error messages, if return unused then made functs void (Michal)
v4: update the cached head before starting fixups
v5: removed/updated comments, wrapped lines, converted assert into
  error, enums for offsets to separate patch, reused xe_map_rd
v6: define xe_map_*_array() macros, support CTB wrap which divides
  a message, updated comments, moved one function to an earlier patch
v7: renamed few functions, wider use on previously introduced helper,
  separate cases in parsing messges, documented a static funct
v8: Introduced more helpers, fixed coding style mistakes
v9: Move xe_map*() functs to macros, add asserts, add debug print
v10: Errors in place of some asserts, style fixes
v11: Fixed invalid conditionals, added debug-only local pointer
v12: Removed redundant __maybe_unused

Signed-off-by: Tomasz Lis <tomasz.lis@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://lore.kernel.org/r/20250512114018.361843-5-tomasz.lis@intel.com
2025-05-12 15:53:38 +02:00

112 lines
3.3 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef _XE_MAP_H_
#define _XE_MAP_H_
#include <linux/iosys-map.h>
#include <xe_device.h>
/**
* DOC: Map layer
*
* All access to any memory shared with a device (both sysmem and vram) in the
* XE driver should go through this layer (xe_map). This layer is built on top
* of :ref:`driver-api/device-io:Generalizing Access to System and I/O Memory`
* and with extra hooks into the XE driver that allows adding asserts to memory
* accesses (e.g. for blocking runtime_pm D3Cold on Discrete Graphics).
*/
static inline void xe_map_memcpy_to(struct xe_device *xe, struct iosys_map *dst,
size_t dst_offset, const void *src,
size_t len)
{
xe_device_assert_mem_access(xe);
iosys_map_memcpy_to(dst, dst_offset, src, len);
}
static inline void xe_map_memcpy_from(struct xe_device *xe, void *dst,
const struct iosys_map *src,
size_t src_offset, size_t len)
{
xe_device_assert_mem_access(xe);
iosys_map_memcpy_from(dst, src, src_offset, len);
}
static inline void xe_map_memset(struct xe_device *xe,
struct iosys_map *dst, size_t offset,
int value, size_t len)
{
xe_device_assert_mem_access(xe);
iosys_map_memset(dst, offset, value, len);
}
/* FIXME: We likely should kill these two functions sooner or later */
static inline u32 xe_map_read32(struct xe_device *xe, struct iosys_map *map)
{
xe_device_assert_mem_access(xe);
if (map->is_iomem)
return readl(map->vaddr_iomem);
else
return READ_ONCE(*(u32 *)map->vaddr);
}
static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
u32 val)
{
xe_device_assert_mem_access(xe);
if (map->is_iomem)
writel(val, map->vaddr_iomem);
else
*(u32 *)map->vaddr = val;
}
#define xe_map_rd(xe__, map__, offset__, type__) ({ \
struct xe_device *__xe = xe__; \
xe_device_assert_mem_access(__xe); \
iosys_map_rd(map__, offset__, type__); \
})
#define xe_map_wr(xe__, map__, offset__, type__, val__) ({ \
struct xe_device *__xe = xe__; \
xe_device_assert_mem_access(__xe); \
iosys_map_wr(map__, offset__, type__, val__); \
})
#define xe_map_rd_array(xe__, map__, index__, type__) \
xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__)
#define xe_map_wr_array(xe__, map__, index__, type__, val__) \
xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__)
#define xe_map_rd_array_u32(xe__, map__, index__) \
xe_map_rd_array(xe__, map__, index__, u32)
#define xe_map_wr_array_u32(xe__, map__, index__, val__) \
xe_map_wr_array(xe__, map__, index__, u32, val__)
#define xe_map_rd_ring_u32(xe__, map__, index__, size__) \
xe_map_rd_array_u32(xe__, map__, (index__) % (size__))
#define xe_map_wr_ring_u32(xe__, map__, index__, size__, val__) \
xe_map_wr_array_u32(xe__, map__, (index__) % (size__), val__)
#define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \
struct xe_device *__xe = xe__; \
xe_device_assert_mem_access(__xe); \
iosys_map_rd_field(map__, struct_offset__, struct_type__, field__); \
})
#define xe_map_wr_field(xe__, map__, struct_offset__, struct_type__, field__, val__) ({ \
struct xe_device *__xe = xe__; \
xe_device_assert_mem_access(__xe); \
iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__); \
})
#endif