mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2025-12-07 16:41:15 +00:00
When switching tasks, the CLOSID and RMID that the new task should use are stored in struct task_struct. For x86 the CLOSID known by resctrl, the value in task_struct, and the value written to the CPU register are all the same thing. MPAM's CPU interface has two different PARTIDs - one for data accesses the other for instruction fetch. Storing resctrl's CLOSID value in struct task_struct implies the arch code knows whether resctrl is using CDP. Move the matching and setting of the struct task_struct properties to use helpers. This allows arm64 to store the hardware format of the register, instead of having to convert it each time. __rdtgroup_move_task()s use of READ_ONCE()/WRITE_ONCE() ensures torn values aren't seen as another CPU may schedule the task being moved while the value is being changed. MPAM has an additional corner-case here as the PMG bits extend the PARTID space. If the scheduler sees a new-CLOSID but old-RMID, the task will dirty an RMID that the limbo code is not watching causing an inaccurate count. x86's RMID are independent values, so the limbo code will still be watching the old-RMID in this circumstance. To avoid this, arm64 needs both the CLOSID/RMID WRITE_ONCE()d together. Both values must be provided together. Because MPAM's RMID values are not unique, the CLOSID must be provided when matching the RMID. Signed-off-by: James Morse <james.morse@arm.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Reviewed-by: Shaopeng Tan <tan.shaopeng@fujitsu.com> Reviewed-by: Reinette Chatre <reinette.chatre@intel.com> Reviewed-by: Babu Moger <babu.moger@amd.com> Tested-by: Shaopeng Tan <tan.shaopeng@fujitsu.com> Tested-by: Peter Newman <peternewman@google.com> Tested-by: Babu Moger <babu.moger@amd.com> Tested-by: Carl Worth <carl@os.amperecomputing.com> # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-12-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
149 lines
4.1 KiB
C
149 lines
4.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_RESCTRL_H
|
|
#define _ASM_X86_RESCTRL_H
|
|
|
|
#ifdef CONFIG_X86_CPU_RESCTRL
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/jump_label.h>
|
|
|
|
/*
|
|
* This value can never be a valid CLOSID, and is used when mapping a
|
|
* (closid, rmid) pair to an index and back. On x86 only the RMID is
|
|
* needed. The index is a software defined value.
|
|
*/
|
|
#define X86_RESCTRL_EMPTY_CLOSID ((u32)~0)
|
|
|
|
/**
|
|
* struct resctrl_pqr_state - State cache for the PQR MSR
|
|
* @cur_rmid: The cached Resource Monitoring ID
|
|
* @cur_closid: The cached Class Of Service ID
|
|
* @default_rmid: The user assigned Resource Monitoring ID
|
|
* @default_closid: The user assigned cached Class Of Service ID
|
|
*
|
|
* The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
|
|
* lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
|
|
* contains both parts, so we need to cache them. This also
|
|
* stores the user configured per cpu CLOSID and RMID.
|
|
*
|
|
* The cache also helps to avoid pointless updates if the value does
|
|
* not change.
|
|
*/
|
|
struct resctrl_pqr_state {
|
|
u32 cur_rmid;
|
|
u32 cur_closid;
|
|
u32 default_rmid;
|
|
u32 default_closid;
|
|
};
|
|
|
|
DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
|
|
|
|
DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
|
|
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
|
|
DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
|
|
|
|
/*
|
|
* __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
|
|
*
|
|
* Following considerations are made so that this has minimal impact
|
|
* on scheduler hot path:
|
|
* - This will stay as no-op unless we are running on an Intel SKU
|
|
* which supports resource control or monitoring and we enable by
|
|
* mounting the resctrl file system.
|
|
* - Caches the per cpu CLOSid/RMID values and does the MSR write only
|
|
* when a task with a different CLOSid/RMID is scheduled in.
|
|
* - We allocate RMIDs/CLOSids globally in order to keep this as
|
|
* simple as possible.
|
|
* Must be called with preemption disabled.
|
|
*/
|
|
static inline void __resctrl_sched_in(struct task_struct *tsk)
|
|
{
|
|
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
|
|
u32 closid = state->default_closid;
|
|
u32 rmid = state->default_rmid;
|
|
u32 tmp;
|
|
|
|
/*
|
|
* If this task has a closid/rmid assigned, use it.
|
|
* Else use the closid/rmid assigned to this cpu.
|
|
*/
|
|
if (static_branch_likely(&rdt_alloc_enable_key)) {
|
|
tmp = READ_ONCE(tsk->closid);
|
|
if (tmp)
|
|
closid = tmp;
|
|
}
|
|
|
|
if (static_branch_likely(&rdt_mon_enable_key)) {
|
|
tmp = READ_ONCE(tsk->rmid);
|
|
if (tmp)
|
|
rmid = tmp;
|
|
}
|
|
|
|
if (closid != state->cur_closid || rmid != state->cur_rmid) {
|
|
state->cur_closid = closid;
|
|
state->cur_rmid = rmid;
|
|
wrmsr(MSR_IA32_PQR_ASSOC, rmid, closid);
|
|
}
|
|
}
|
|
|
|
static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
|
|
{
|
|
unsigned int scale = boot_cpu_data.x86_cache_occ_scale;
|
|
|
|
/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
|
|
val /= scale;
|
|
return val * scale;
|
|
}
|
|
|
|
static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk,
|
|
u32 closid, u32 rmid)
|
|
{
|
|
WRITE_ONCE(tsk->closid, closid);
|
|
WRITE_ONCE(tsk->rmid, rmid);
|
|
}
|
|
|
|
static inline bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid)
|
|
{
|
|
return READ_ONCE(tsk->closid) == closid;
|
|
}
|
|
|
|
static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored,
|
|
u32 rmid)
|
|
{
|
|
return READ_ONCE(tsk->rmid) == rmid;
|
|
}
|
|
|
|
static inline void resctrl_sched_in(struct task_struct *tsk)
|
|
{
|
|
if (static_branch_likely(&rdt_enable_key))
|
|
__resctrl_sched_in(tsk);
|
|
}
|
|
|
|
static inline u32 resctrl_arch_system_num_rmid_idx(void)
|
|
{
|
|
/* RMID are independent numbers for x86. num_rmid_idx == num_rmid */
|
|
return boot_cpu_data.x86_cache_max_rmid + 1;
|
|
}
|
|
|
|
static inline void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid)
|
|
{
|
|
*rmid = idx;
|
|
*closid = X86_RESCTRL_EMPTY_CLOSID;
|
|
}
|
|
|
|
static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid)
|
|
{
|
|
return rmid;
|
|
}
|
|
|
|
void resctrl_cpu_detect(struct cpuinfo_x86 *c);
|
|
|
|
#else
|
|
|
|
static inline void resctrl_sched_in(struct task_struct *tsk) {}
|
|
static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
|
|
|
|
#endif /* CONFIG_X86_CPU_RESCTRL */
|
|
|
|
#endif /* _ASM_X86_RESCTRL_H */
|