Skip to content
Snippets Groups Projects
Commit 20582e34 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull ACPI and power management fixes from
 "These fix two bugs in the intel_pstate driver, a hibernate bug leading
  to nasty resume failures sometimes and acpi-cpufreq initialization bug
  that causes problems to happen during module unload when intel_pstate
  is in use.

  Specifics:

   - Fix for rounding errors in intel_pstate causing CPU utilization to
     be underestimated from Brennan Shacklett.

   - intel_pstate fix to always use the correct max pstate value when
     computing the min pstate from Dirk Brandewie.

   - Hibernation fix for deadlocking resume in cases when the probing of
     the device containing the image is deferred from Russ Dill.

   - acpi-cpufreq fix to prevent the module from staying in memory when
     the driver cannot be registered and then attempting to unregister
     things that have never been registered on exit"

* tag 'pm+acpi-3.12-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  acpi-cpufreq: Fail initialization if driver cannot be registered
  PM / hibernate: Move software_resume to late_initcall_sync
  intel_pstate: Correct calculation of min pstate value
  intel_pstate: Improve accuracy by not truncating until final result
parents d255c59a 75c07581
No related branches found
No related tags found
No related merge requests found
...@@ -986,12 +986,12 @@ static int __init acpi_cpufreq_init(void) ...@@ -986,12 +986,12 @@ static int __init acpi_cpufreq_init(void)
{ {
int ret; int ret;
if (acpi_disabled)
return -ENODEV;
/* don't keep reloading if cpufreq_driver exists */ /* don't keep reloading if cpufreq_driver exists */
if (cpufreq_get_current_driver()) if (cpufreq_get_current_driver())
return 0; return -EEXIST;
if (acpi_disabled)
return 0;
pr_debug("acpi_cpufreq_init\n"); pr_debug("acpi_cpufreq_init\n");
......
...@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y) ...@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
} }
struct sample { struct sample {
int core_pct_busy; int32_t core_pct_busy;
u64 aperf; u64 aperf;
u64 mperf; u64 mperf;
int freq; int freq;
...@@ -68,7 +68,7 @@ struct _pid { ...@@ -68,7 +68,7 @@ struct _pid {
int32_t i_gain; int32_t i_gain;
int32_t d_gain; int32_t d_gain;
int deadband; int deadband;
int last_err; int32_t last_err;
}; };
struct cpudata { struct cpudata {
...@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent) ...@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
} }
static signed int pid_calc(struct _pid *pid, int busy) static signed int pid_calc(struct _pid *pid, int32_t busy)
{ {
signed int err, result; signed int result;
int32_t pterm, dterm, fp_error; int32_t pterm, dterm, fp_error;
int32_t integral_limit; int32_t integral_limit;
err = pid->setpoint - busy; fp_error = int_tofp(pid->setpoint) - busy;
fp_error = int_tofp(err);
if (abs(err) <= pid->deadband) if (abs(fp_error) <= int_tofp(pid->deadband))
return 0; return 0;
pterm = mul_fp(pid->p_gain, fp_error); pterm = mul_fp(pid->p_gain, fp_error);
...@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy) ...@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
if (pid->integral < -integral_limit) if (pid->integral < -integral_limit)
pid->integral = -integral_limit; pid->integral = -integral_limit;
dterm = mul_fp(pid->d_gain, (err - pid->last_err)); dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
pid->last_err = err; pid->last_err = fp_error;
result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
...@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void) ...@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{ {
int max_perf = cpu->pstate.turbo_pstate; int max_perf = cpu->pstate.turbo_pstate;
int max_perf_adj;
int min_perf; int min_perf;
if (limits.no_turbo) if (limits.no_turbo)
max_perf = cpu->pstate.max_pstate; max_perf = cpu->pstate.max_pstate;
max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
*max = clamp_t(int, max_perf, *max = clamp_t(int, max_perf_adj,
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
...@@ -436,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, ...@@ -436,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample) struct sample *sample)
{ {
u64 core_pct; u64 core_pct;
core_pct = div64_u64(sample->aperf * 100, sample->mperf); core_pct = div64_u64(int_tofp(sample->aperf * 100),
sample->freq = cpu->pstate.max_pstate * core_pct * 1000; sample->mperf);
sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
sample->core_pct_busy = core_pct; sample->core_pct_busy = core_pct;
} }
...@@ -469,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) ...@@ -469,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
mod_timer_pinned(&cpu->timer, jiffies + delay); mod_timer_pinned(&cpu->timer, jiffies + delay);
} }
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
{ {
int32_t busy_scaled;
int32_t core_busy, max_pstate, current_pstate; int32_t core_busy, max_pstate, current_pstate;
core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate); max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate); current_pstate = int_tofp(cpu->pstate.current_pstate);
busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
return fp_toint(busy_scaled);
} }
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{ {
int busy_scaled; int32_t busy_scaled;
struct _pid *pid; struct _pid *pid;
signed int ctl = 0; signed int ctl = 0;
int steps; int steps;
......
...@@ -846,7 +846,7 @@ static int software_resume(void) ...@@ -846,7 +846,7 @@ static int software_resume(void)
goto Finish; goto Finish;
} }
late_initcall(software_resume); late_initcall_sync(software_resume);
static const char * const hibernation_modes[] = { static const char * const hibernation_modes[] = {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment