mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-07 14:02:58 +00:00
drm/amdgpu: prepare map process for multi-process debug devices
Unlike single process debug devices, multi-process debug devices allow debug mode setting per-VMID (non-device-global). Because the HWS manages PASID-VMID mapping, the new MAP_PROCESS API allows the KFD to forward the required SPI debug register write requests. To request a new debug mode setting change, the KFD must be able to preempt all queues then remap all queues with these new setting requests for MAP_PROCESS to take effect. Note that by default, trap enablement in non-debug mode must be disabled for performance reasons for multi-process debug devices due to setup overhead in FW. Signed-off-by: Jonathan Kim <jonathan.kim@amd.com> Reviewed-by: Felix Kuehling <felix.kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
97ae3c8cce
commit
0de4ec9a03
@ -29,4 +29,9 @@ int kfd_dbg_trap_disable(struct kfd_process *target);
|
|||||||
int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
|
int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
|
||||||
void __user *runtime_info,
|
void __user *runtime_info,
|
||||||
uint32_t *runtime_info_size);
|
uint32_t *runtime_info_size);
|
||||||
|
static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
|
||||||
|
{
|
||||||
|
return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2);
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#include "kfd_kernel_queue.h"
|
#include "kfd_kernel_queue.h"
|
||||||
#include "amdgpu_amdkfd.h"
|
#include "amdgpu_amdkfd.h"
|
||||||
#include "mes_api_def.h"
|
#include "mes_api_def.h"
|
||||||
|
#include "kfd_debug.h"
|
||||||
|
|
||||||
/* Size of the per-pipe EOP queue */
|
/* Size of the per-pipe EOP queue */
|
||||||
#define CIK_HPD_EOP_BYTES_LOG2 11
|
#define CIK_HPD_EOP_BYTES_LOG2 11
|
||||||
@ -2594,6 +2595,56 @@ int release_debug_trap_vmid(struct device_queue_manager *dqm,
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int debug_lock_and_unmap(struct device_queue_manager *dqm)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
|
||||||
|
pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
dqm_lock(dqm);
|
||||||
|
|
||||||
|
r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false);
|
||||||
|
if (r)
|
||||||
|
dqm_unlock(dqm);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
int debug_map_and_unlock(struct device_queue_manager *dqm)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
|
||||||
|
pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
r = map_queues_cpsch(dqm);
|
||||||
|
|
||||||
|
dqm_unlock(dqm);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
int debug_refresh_runlist(struct device_queue_manager *dqm)
|
||||||
|
{
|
||||||
|
int r = debug_lock_and_unmap(dqm);
|
||||||
|
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
return debug_map_and_unlock(dqm);
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
|
|
||||||
static void seq_reg_dump(struct seq_file *m,
|
static void seq_reg_dump(struct seq_file *m,
|
||||||
|
@ -290,6 +290,9 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
|
|||||||
struct qcm_process_device *qpd);
|
struct qcm_process_device *qpd);
|
||||||
int release_debug_trap_vmid(struct device_queue_manager *dqm,
|
int release_debug_trap_vmid(struct device_queue_manager *dqm,
|
||||||
struct qcm_process_device *qpd);
|
struct qcm_process_device *qpd);
|
||||||
|
int debug_lock_and_unmap(struct device_queue_manager *dqm);
|
||||||
|
int debug_map_and_unlock(struct device_queue_manager *dqm);
|
||||||
|
int debug_refresh_runlist(struct device_queue_manager *dqm);
|
||||||
|
|
||||||
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
|
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
|
||||||
{
|
{
|
||||||
|
@ -88,6 +88,10 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
|
|||||||
{
|
{
|
||||||
struct pm4_mes_map_process_aldebaran *packet;
|
struct pm4_mes_map_process_aldebaran *packet;
|
||||||
uint64_t vm_page_table_base_addr = qpd->page_table_base;
|
uint64_t vm_page_table_base_addr = qpd->page_table_base;
|
||||||
|
struct kfd_dev *kfd = pm->dqm->dev->kfd;
|
||||||
|
struct kfd_process_device *pdd =
|
||||||
|
container_of(qpd, struct kfd_process_device, qpd);
|
||||||
|
int i;
|
||||||
|
|
||||||
packet = (struct pm4_mes_map_process_aldebaran *)buffer;
|
packet = (struct pm4_mes_map_process_aldebaran *)buffer;
|
||||||
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
|
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
|
||||||
@ -102,6 +106,16 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
|
|||||||
packet->bitfields14.num_oac = qpd->num_oac;
|
packet->bitfields14.num_oac = qpd->num_oac;
|
||||||
packet->bitfields14.sdma_enable = 1;
|
packet->bitfields14.sdma_enable = 1;
|
||||||
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
|
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
|
||||||
|
packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override |
|
||||||
|
pdd->spi_dbg_launch_mode;
|
||||||
|
|
||||||
|
if (pdd->process->debug_trap_enabled) {
|
||||||
|
for (i = 0; i < kfd->device_info.num_of_watch_points; i++)
|
||||||
|
packet->tcp_watch_cntl[i] = pdd->watch_points[i];
|
||||||
|
|
||||||
|
packet->bitfields2.single_memops =
|
||||||
|
!!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP);
|
||||||
|
}
|
||||||
|
|
||||||
packet->sh_mem_config = qpd->sh_mem_config;
|
packet->sh_mem_config = qpd->sh_mem_config;
|
||||||
packet->sh_mem_bases = qpd->sh_mem_bases;
|
packet->sh_mem_bases = qpd->sh_mem_bases;
|
||||||
|
@ -816,6 +816,12 @@ struct kfd_process_device {
|
|||||||
uint64_t faults;
|
uint64_t faults;
|
||||||
uint64_t page_in;
|
uint64_t page_in;
|
||||||
uint64_t page_out;
|
uint64_t page_out;
|
||||||
|
|
||||||
|
/* Tracks debug per-vmid request settings */
|
||||||
|
uint32_t spi_dbg_override;
|
||||||
|
uint32_t spi_dbg_launch_mode;
|
||||||
|
uint32_t watch_points[4];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this process has been checkpointed before, then the user
|
* If this process has been checkpointed before, then the user
|
||||||
* application will use the original gpu_id on the
|
* application will use the original gpu_id on the
|
||||||
@ -952,6 +958,9 @@ struct kfd_process {
|
|||||||
|
|
||||||
bool xnack_enabled;
|
bool xnack_enabled;
|
||||||
|
|
||||||
|
/* Tracks debug per-vmid request for debug flags */
|
||||||
|
bool dbg_flags;
|
||||||
|
|
||||||
atomic_t poison;
|
atomic_t poison;
|
||||||
/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
|
/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
|
||||||
bool queues_paused;
|
bool queues_paused;
|
||||||
|
@ -1612,6 +1612,11 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
p->pdds[p->n_pdds++] = pdd;
|
p->pdds[p->n_pdds++] = pdd;
|
||||||
|
if (kfd_dbg_is_per_vmid_supported(pdd->dev))
|
||||||
|
pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
|
||||||
|
pdd->dev->adev,
|
||||||
|
false,
|
||||||
|
0);
|
||||||
|
|
||||||
/* Init idr used for memory handle translation */
|
/* Init idr used for memory handle translation */
|
||||||
idr_init(&pdd->alloc_idr);
|
idr_init(&pdd->alloc_idr);
|
||||||
|
Loading…
Reference in New Issue
Block a user