mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 04:53:36 +01:00
Power management fixes for 6.12-rc2
- Fix CPU device node reference counting in the cpufreq core (Miquel Sabaté Solà). - Turn the spinlock used by the intel_pstate driver in hard IRQ context into a raw one to prevent the driver from crashing when PREEMPT_RT is enabled (Uwe Kleine-König). -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAmcAJMUSHHJqd0Byand5 c29ja2kubmV0AAoJEILEb/54YlRx4o0P/2eVuEGFIOurheROyFs7UrYRwODvWpgd omE2iyihyi+E85IhOD3pPhwv8G156MhfGQ17Bwmt15jczzAs+j9ijPoX9tNxYMlh oUq9lsF3B+1sF2VD31iRK6oUjwSQIrjoCzlEuKfW/2VQKmHFFNbj1oj6gYhtWJnb 0NfozwDMVeN+hqRAZ1TNXnQcohgpQJjkaalHJccTgSNlTJ6J3nCvamdrBRMbBJLU DbqYWkLHw26AzARxV3isTj8dFb9R/ng6g/uCBuHGID9JbYCpQS72n2H/9yff7Msf wHnRpaDYaAniJPFXlZDyct/LYR9JI477nYIO8jJenfNLr1640hFGBF8DyM7EREcU ACnnbQTY5omGyhNoTRrEnSdwa54ukzABlNcldCLn1ilIvWqFrR3YFwX/tNsyBYFL Y2HGwfEHpFoKYdUXs3wLN0prnaV0ILr71Kv9OR+d2lB/KmPCSMvYvJSpYVMrl93R hsOkZUprd0DXZCRDr4mSFbUZcNzOwMi7c1QgueRcdJuoG8rXzi0nq3RrtV2rgoTz beOnxUieEzWrl4OVTtbiBu2H2o2N0VM7znaNxr1hWjwzHeXpxs0ChmQRwmY450fW bvOpqhEkrrBTGuQ0xgn41KpIV0G/X0ULrjsC9W9bmf4Oi6aUxQvtRTTvYDjH9pil Gy3P1hPfPUkb =mgVf -----END PGP SIGNATURE----- Merge tag 'pm-6.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull power management fixes from Rafael Wysocki: "These fix two cpufreq issues, one in the core and one in the intel_pstate driver: - Fix CPU device node reference counting in the cpufreq core (Miquel Sabaté Solà) - Turn the spinlock used by the intel_pstate driver in hard IRQ context into a raw one to prevent the driver from crashing when PREEMPT_RT is enabled (Uwe Kleine-König)" * tag 'pm-6.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: cpufreq: Avoid a bad reference count on CPU node cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock
This commit is contained in:
commit
5d18081de2
2 changed files with 9 additions and 13 deletions
|
@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
|
|||
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(hwp_notify_lock);
|
||||
static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
|
||||
static cpumask_t hwp_intr_enable_mask;
|
||||
|
||||
#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0)
|
||||
|
@ -1868,7 +1868,7 @@ void notify_hwp_interrupt(void)
|
|||
if (!(value & status_mask))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&hwp_notify_lock, flags);
|
||||
raw_spin_lock_irqsave(&hwp_notify_lock, flags);
|
||||
|
||||
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
|
||||
goto ack_intr;
|
||||
|
@ -1876,13 +1876,13 @@ void notify_hwp_interrupt(void)
|
|||
schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
|
||||
msecs_to_jiffies(10));
|
||||
|
||||
spin_unlock_irqrestore(&hwp_notify_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
|
||||
|
||||
return;
|
||||
|
||||
ack_intr:
|
||||
wrmsrl_safe(MSR_HWP_STATUS, 0);
|
||||
spin_unlock_irqrestore(&hwp_notify_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
|
||||
}
|
||||
|
||||
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
|
||||
|
@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
|
|||
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
|
||||
|
||||
spin_lock_irq(&hwp_notify_lock);
|
||||
raw_spin_lock_irq(&hwp_notify_lock);
|
||||
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
|
||||
spin_unlock_irq(&hwp_notify_lock);
|
||||
raw_spin_unlock_irq(&hwp_notify_lock);
|
||||
|
||||
if (cancel_work)
|
||||
cancel_delayed_work_sync(&cpudata->hwp_notify_work);
|
||||
|
@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
|
|||
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
|
||||
u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
|
||||
|
||||
spin_lock_irq(&hwp_notify_lock);
|
||||
raw_spin_lock_irq(&hwp_notify_lock);
|
||||
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
|
||||
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
|
||||
spin_unlock_irq(&hwp_notify_lock);
|
||||
raw_spin_unlock_irq(&hwp_notify_lock);
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
|
||||
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
|
||||
|
|
|
@ -1107,10 +1107,9 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
|
|||
const char *cell_name,
|
||||
struct of_phandle_args *args)
|
||||
{
|
||||
struct device_node *cpu_np;
|
||||
int ret;
|
||||
|
||||
cpu_np = of_cpu_device_node_get(cpu);
|
||||
struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu);
|
||||
if (!cpu_np)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1118,9 +1117,6 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
|
|||
args);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
of_node_put(cpu_np);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue