mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-29 02:59:13 +00:00
cpufreq/sched: Move cpufreq-specific EAS checks to cpufreq
Doing cpufreq-specific EAS checks that require accessing policy internals directly from sched_is_eas_possible() is a bit unfortunate, so introduce cpufreq_ready_for_eas() in cpufreq, move those checks into that new function and make sched_is_eas_possible() call it. While at it, address a possible race between the EAS governor check and governor change by doing the former under the policy rwsem. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Christian Loehle <christian.loehle@arm.com> Tested-by: Christian Loehle <christian.loehle@arm.com> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Link: https://patch.msgid.link/2317800.iZASKD2KPV@rjwysocki.net
This commit is contained in:
parent
f42c8556a0
commit
4854649b1f
@ -3056,6 +3056,38 @@ static int __init cpufreq_core_init(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy __free(put_cpufreq_policy);
|
||||
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (!policy) {
|
||||
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
guard(cpufreq_policy_read)(policy);
|
||||
|
||||
return sugov_is_governor(policy);
|
||||
}
|
||||
|
||||
bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
/* Do not attempt EAS if schedutil is not being used. */
|
||||
for_each_cpu(cpu, cpu_mask) {
|
||||
if (!cpufreq_policy_is_good_for_eas(cpu)) {
|
||||
pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
|
||||
cpumask_pr_args(cpu_mask));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
module_param(off, int, 0444);
|
||||
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
|
||||
core_initcall(cpufreq_core_init);
|
||||
|
@ -1237,6 +1237,8 @@ void cpufreq_generic_init(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table,
|
||||
unsigned int transition_latency);
|
||||
|
||||
bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask);
|
||||
|
||||
static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
|
||||
{
|
||||
dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
|
||||
|
@ -212,8 +212,6 @@ static bool sched_energy_update;
|
||||
static bool sched_is_eas_possible(const struct cpumask *cpu_mask)
|
||||
{
|
||||
bool any_asym_capacity = false;
|
||||
struct cpufreq_policy *policy;
|
||||
bool policy_is_ready;
|
||||
int i;
|
||||
|
||||
/* EAS is enabled for asymmetric CPU capacity topologies. */
|
||||
@ -248,25 +246,12 @@ static bool sched_is_eas_possible(const struct cpumask *cpu_mask)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Do not attempt EAS if schedutil is not being used. */
|
||||
for_each_cpu(i, cpu_mask) {
|
||||
policy = cpufreq_cpu_get(i);
|
||||
if (!policy) {
|
||||
if (sched_debug()) {
|
||||
pr_info("rd %*pbl: Checking EAS, cpufreq policy not set for CPU: %d",
|
||||
cpumask_pr_args(cpu_mask), i);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
policy_is_ready = sugov_is_governor(policy);
|
||||
cpufreq_cpu_put(policy);
|
||||
if (!policy_is_ready) {
|
||||
if (sched_debug()) {
|
||||
pr_info("rd %*pbl: Checking EAS, schedutil is mandatory\n",
|
||||
cpumask_pr_args(cpu_mask));
|
||||
}
|
||||
return false;
|
||||
if (!cpufreq_ready_for_eas(cpu_mask)) {
|
||||
if (sched_debug()) {
|
||||
pr_info("rd %*pbl: Checking EAS: cpufreq is not ready\n",
|
||||
cpumask_pr_args(cpu_mask));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
Loading…
Reference in New Issue
Block a user