diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c index 1afbdd1dd7777bf4e0d06a2dc28ee493febeb757..458d2790d9be2f34bfdf7c8b9a685e5131514bcb 100644 --- a/arch/x86/kernel/itmt.c +++ b/arch/x86/kernel/itmt.c @@ -131,6 +131,7 @@ int sched_set_itmt_support(void) return 0; } +EXPORT_SYMBOL_GPL(sched_set_itmt_support); /** * sched_clear_itmt_support() - Revoke platform's support of ITMT @@ -203,3 +204,4 @@ void sched_set_itmt_core_prio(int prio, int core_cpu) i++; } } +EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio); diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 11b582dcd6a9f8f8c88ffb3f3face825249231ec..eeb4d46b06591860a1ccae3fb13eb6970e0a148b 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -634,6 +634,9 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) #endif #ifdef CONFIG_ACPI_CPPC_LIB +static bool cppc_highest_perf_diff; +static struct cpumask core_prior_mask; + static u64 get_max_boost_ratio(unsigned int cpu) { struct cppc_perf_caps perf_caps; @@ -643,6 +646,10 @@ static u64 get_max_boost_ratio(unsigned int cpu) if (acpi_pstate_strict) return 0; + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + return 0; + ret = cppc_get_perf_caps(cpu, &perf_caps); if (ret) { pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", @@ -669,8 +676,59 @@ static u64 get_max_boost_ratio(unsigned int cpu) return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); } + +/* The work item is needed to avoid CPU hotplug locking issues */ +static void sched_itmt_work_fn(struct work_struct *work) +{ + sched_set_itmt_support(); +} + +static DECLARE_WORK(sched_itmt_work, sched_itmt_work_fn); + +static void core_set_itmt_prio(int cpu) +{ + struct cppc_perf_caps perf_caps; + u64 highest_perf = 0; + static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; + int ret; + + ret = cppc_get_perf_caps(cpu, &perf_caps); + if (ret) { + pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret); + return; + } + + highest_perf = perf_caps.highest_perf; + + sched_set_itmt_core_prio(highest_perf, cpu); + cpumask_set_cpu(cpu, &core_prior_mask); + + if (max_highest_perf <= min_highest_perf) { + if (perf_caps.highest_perf > max_highest_perf) + max_highest_perf = perf_caps.highest_perf; + + if (perf_caps.highest_perf < min_highest_perf) + min_highest_perf = perf_caps.highest_perf; + + if (max_highest_perf > min_highest_perf) { + /* + * This code can be run during CPU online under the + * CPU hotplug locks, so sched_set_itmt_support() + * cannot be called from here. Queue up a work item + * to invoke it. + */ + cppc_highest_perf_diff = true; + } + } + + if (cppc_highest_perf_diff && cpumask_equal(&core_prior_mask, cpu_online_mask)) { + pr_debug("queue a work to set itmt enabled\n"); + schedule_work(&sched_itmt_work); + } +} #else static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } +static inline void core_set_itmt_prio(int cpu) { } #endif static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -683,7 +741,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) unsigned int valid_states = 0; unsigned int result = 0; u64 max_boost_ratio; - unsigned int i; + unsigned int i, j = 0; #ifdef CONFIG_SMP static int blacklisted; #endif @@ -748,6 +806,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) } #endif +#if defined(CONFIG_X86) + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + for_each_cpu(j, policy->cpus) + core_set_itmt_prio(j); + } +#endif + /* capability check */ if (perf->state_count <= 1) { pr_debug("No P-States\n"); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index f533c6a00f3eb46467f22eac60edf38809bb6a0c..222743da250cc7e75082b6fc4685f0517801870a 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2280,6 +2280,17 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att } } +#ifdef CONFIG_X86 + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 == 7 && boot_cpu_data.x86_model == 0x5b)) { + for_each_cpu(i, cpu_map) { + for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) + sd->flags |= SD_ASYM_PACKING; + } + } +#endif + /* Calculate CPU capacity for physical packages and nodes */ for (i = nr_cpumask_bits-1; i >= 0; i--) { if (!cpumask_test_cpu(i, cpu_map))