diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index aaea9a39eced..b0018f371ea3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } -static DEFINE_SPINLOCK(hwp_notify_lock); +static DEFINE_RAW_SPINLOCK(hwp_notify_lock); static cpumask_t hwp_intr_enable_mask; #define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0) @@ -1868,7 +1868,7 @@ void notify_hwp_interrupt(void) if (!(value & status_mask)) return; - spin_lock_irqsave(&hwp_notify_lock, flags); + raw_spin_lock_irqsave(&hwp_notify_lock, flags); if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) goto ack_intr; @@ -1876,13 +1876,13 @@ void notify_hwp_interrupt(void) schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work, msecs_to_jiffies(10)); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); return; ack_intr: wrmsrl_safe(MSR_HWP_STATUS, 0); - spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); } static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) @@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); - spin_lock_irq(&hwp_notify_lock); + raw_spin_lock_irq(&hwp_notify_lock); cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); - spin_unlock_irq(&hwp_notify_lock); + raw_spin_unlock_irq(&hwp_notify_lock); if (cancel_work) cancel_delayed_work_sync(&cpudata->hwp_notify_work); @@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ; - spin_lock_irq(&hwp_notify_lock); + raw_spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); - spin_unlock_irq(&hwp_notify_lock); + raw_spin_unlock_irq(&hwp_notify_lock); if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index e0e19d9c1323..7fe0981a7e46 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -1107,10 +1107,9 @@ static inline int parse_perf_domain(int cpu, const char *list_name, const char *cell_name, struct of_phandle_args *args) { - struct device_node *cpu_np; int ret; - cpu_np = of_cpu_device_node_get(cpu); + struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu); if (!cpu_np) return -ENODEV; @@ -1118,9 +1117,6 @@ static inline int parse_perf_domain(int cpu, const char *list_name, args); if (ret < 0) return ret; - - of_node_put(cpu_np); - return 0; }