mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-05 20:30:41 +00:00
drm/ttm/xe: drop unused force_alloc flag
This flag used to be used in the old memory tracking code, that code got migrated into the vmwgfx driver[1], and then got removed from the tree[2], but this piece got left behind. [1]f07069da6b
("drm/ttm: move memory accounting into vmwgfx v4") [2]8aadeb8ad8
("drm/vmwgfx: Remove the dedicated memory accounting") Cleanup the dead code. Reviewed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
b60301774a
commit
55df7c0c62
@ -220,7 +220,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
|||||||
struct ttm_operation_ctx ctx = {
|
struct ttm_operation_ctx ctx = {
|
||||||
.interruptible = true,
|
.interruptible = true,
|
||||||
.no_wait_gpu = false,
|
.no_wait_gpu = false,
|
||||||
.force_alloc = true
|
|
||||||
};
|
};
|
||||||
|
|
||||||
ttm = bo->ttm;
|
ttm = bo->ttm;
|
||||||
|
@ -548,7 +548,6 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
|
|||||||
struct ttm_operation_ctx ctx = {
|
struct ttm_operation_ctx ctx = {
|
||||||
.interruptible = false,
|
.interruptible = false,
|
||||||
.no_wait_gpu = false,
|
.no_wait_gpu = false,
|
||||||
.force_alloc = true
|
|
||||||
};
|
};
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -2759,19 +2759,17 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
|
|||||||
/**
|
/**
|
||||||
* xe_bo_evict - Evict an object to evict placement
|
* xe_bo_evict - Evict an object to evict placement
|
||||||
* @bo: The buffer object to migrate.
|
* @bo: The buffer object to migrate.
|
||||||
* @force_alloc: Set force_alloc in ttm_operation_ctx
|
|
||||||
*
|
*
|
||||||
* On successful completion, the object memory will be moved to evict
|
* On successful completion, the object memory will be moved to evict
|
||||||
* placement. This function blocks until the object has been fully moved.
|
* placement. This function blocks until the object has been fully moved.
|
||||||
*
|
*
|
||||||
* Return: 0 on success. Negative error code on failure.
|
* Return: 0 on success. Negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
|
int xe_bo_evict(struct xe_bo *bo)
|
||||||
{
|
{
|
||||||
struct ttm_operation_ctx ctx = {
|
struct ttm_operation_ctx ctx = {
|
||||||
.interruptible = false,
|
.interruptible = false,
|
||||||
.no_wait_gpu = false,
|
.no_wait_gpu = false,
|
||||||
.force_alloc = force_alloc,
|
|
||||||
.gfp_retry_mayfail = true,
|
.gfp_retry_mayfail = true,
|
||||||
};
|
};
|
||||||
struct ttm_placement placement;
|
struct ttm_placement placement;
|
||||||
|
@ -271,7 +271,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res);
|
|||||||
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
|
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
|
||||||
|
|
||||||
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
|
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
|
||||||
int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
|
int xe_bo_evict(struct xe_bo *bo);
|
||||||
|
|
||||||
int xe_bo_evict_pinned(struct xe_bo *bo);
|
int xe_bo_evict_pinned(struct xe_bo *bo);
|
||||||
int xe_bo_restore_pinned(struct xe_bo *bo);
|
int xe_bo_restore_pinned(struct xe_bo *bo);
|
||||||
|
@ -236,7 +236,7 @@ static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
|
|||||||
struct drm_gem_object *obj = attach->importer_priv;
|
struct drm_gem_object *obj = attach->importer_priv;
|
||||||
struct xe_bo *bo = gem_to_xe_bo(obj);
|
struct xe_bo *bo = gem_to_xe_bo(obj);
|
||||||
|
|
||||||
XE_WARN_ON(xe_bo_evict(bo, false));
|
XE_WARN_ON(xe_bo_evict(bo));
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
|
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
|
||||||
|
@ -172,7 +172,6 @@ struct ttm_bo_kmap_obj {
|
|||||||
* @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
|
* @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
|
||||||
* @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
|
* @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
|
||||||
* BOs share the same reservation object.
|
* BOs share the same reservation object.
|
||||||
* @force_alloc: Don't check the memory account during suspend or CPU page
|
|
||||||
* faults. Should only be used by TTM internally.
|
* faults. Should only be used by TTM internally.
|
||||||
* @resv: Reservation object to allow reserved evictions with.
|
* @resv: Reservation object to allow reserved evictions with.
|
||||||
* @bytes_moved: Statistics on how many bytes have been moved.
|
* @bytes_moved: Statistics on how many bytes have been moved.
|
||||||
@ -185,7 +184,6 @@ struct ttm_operation_ctx {
|
|||||||
bool no_wait_gpu;
|
bool no_wait_gpu;
|
||||||
bool gfp_retry_mayfail;
|
bool gfp_retry_mayfail;
|
||||||
bool allow_res_evict;
|
bool allow_res_evict;
|
||||||
bool force_alloc;
|
|
||||||
struct dma_resv *resv;
|
struct dma_resv *resv;
|
||||||
uint64_t bytes_moved;
|
uint64_t bytes_moved;
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user