diff options
author | Todd Poynor | 2013-01-02 15:14:00 -0600 |
---|---|---|
committer | Arve Hjønnevåg | 2013-03-11 17:25:17 -0500 |
commit | dbbcb65d808d59b4a9f7af8b3d54f33ebb4d1bf5 (patch) | |
tree | 042363174282398486cb453dc60b7f20eb5d0fb1 | |
parent | fc81b7b2464ff9a2ad8ea95487a7d00e9b1b6739 (diff) | |
download | kernel-common-dbbcb65d808d59b4a9f7af8b3d54f33ebb4d1bf5.tar.gz kernel-common-dbbcb65d808d59b4a9f7af8b3d54f33ebb4d1bf5.tar.xz kernel-common-dbbcb65d808d59b4a9f7af8b3d54f33ebb4d1bf5.zip |
cpufreq: interactive: fix deadlock on spinlock in timer
Need to use irqsave/restore spinlock calls to avoid a deadlock in calls
from the timer.
Change-Id: I15b6b590045ba1447e34ca7b5ff342723e53a605
Signed-off-by: Todd Poynor <toddpoynor@google.com>
-rw-r--r-- | drivers/cpufreq/cpufreq_interactive.c | 29 |
1 files changed, 17 insertions, 12 deletions
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c index 51c34bf9c17..e7f26aae186 100644 --- a/drivers/cpufreq/cpufreq_interactive.c +++ b/drivers/cpufreq/cpufreq_interactive.c | |||
@@ -126,6 +126,7 @@ static void cpufreq_interactive_timer_resched( | |||
126 | struct cpufreq_interactive_cpuinfo *pcpu) | 126 | struct cpufreq_interactive_cpuinfo *pcpu) |
127 | { | 127 | { |
128 | unsigned long expires = jiffies + usecs_to_jiffies(timer_rate); | 128 | unsigned long expires = jiffies + usecs_to_jiffies(timer_rate); |
129 | unsigned long flags; | ||
129 | 130 | ||
130 | mod_timer_pinned(&pcpu->cpu_timer, expires); | 131 | mod_timer_pinned(&pcpu->cpu_timer, expires); |
131 | if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { | 132 | if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { |
@@ -133,27 +134,28 @@ static void cpufreq_interactive_timer_resched( | |||
133 | mod_timer_pinned(&pcpu->cpu_slack_timer, expires); | 134 | mod_timer_pinned(&pcpu->cpu_slack_timer, expires); |
134 | } | 135 | } |
135 | 136 | ||
136 | spin_lock(&pcpu->load_lock); | 137 | spin_lock_irqsave(&pcpu->load_lock, flags); |
137 | pcpu->time_in_idle = | 138 | pcpu->time_in_idle = |
138 | get_cpu_idle_time_us(smp_processor_id(), | 139 | get_cpu_idle_time_us(smp_processor_id(), |
139 | &pcpu->time_in_idle_timestamp); | 140 | &pcpu->time_in_idle_timestamp); |
140 | pcpu->cputime_speedadj = 0; | 141 | pcpu->cputime_speedadj = 0; |
141 | pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; | 142 | pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; |
142 | spin_unlock(&pcpu->load_lock); | 143 | spin_unlock_irqrestore(&pcpu->load_lock, flags); |
143 | } | 144 | } |
144 | 145 | ||
145 | static unsigned int freq_to_targetload(unsigned int freq) | 146 | static unsigned int freq_to_targetload(unsigned int freq) |
146 | { | 147 | { |
147 | int i; | 148 | int i; |
148 | unsigned int ret; | 149 | unsigned int ret; |
150 | unsigned long flags; | ||
149 | 151 | ||
150 | spin_lock(&target_loads_lock); | 152 | spin_lock_irqsave(&target_loads_lock, flags); |
151 | 153 | ||
152 | for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2) | 154 | for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2) |
153 | ; | 155 | ; |
154 | 156 | ||
155 | ret = target_loads[i]; | 157 | ret = target_loads[i]; |
156 | spin_unlock(&target_loads_lock); | 158 | spin_unlock_irqrestore(&target_loads_lock, flags); |
157 | return ret; | 159 | return ret; |
158 | } | 160 | } |
159 | 161 | ||
@@ -284,11 +286,11 @@ static void cpufreq_interactive_timer(unsigned long data) | |||
284 | if (!pcpu->governor_enabled) | 286 | if (!pcpu->governor_enabled) |
285 | goto exit; | 287 | goto exit; |
286 | 288 | ||
287 | spin_lock(&pcpu->load_lock); | 289 | spin_lock_irqsave(&pcpu->load_lock, flags); |
288 | now = update_load(data); | 290 | now = update_load(data); |
289 | delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp); | 291 | delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp); |
290 | cputime_speedadj = pcpu->cputime_speedadj; | 292 | cputime_speedadj = pcpu->cputime_speedadj; |
291 | spin_unlock(&pcpu->load_lock); | 293 | spin_unlock_irqrestore(&pcpu->load_lock, flags); |
292 | 294 | ||
293 | if (WARN_ON_ONCE(!delta_time)) | 295 | if (WARN_ON_ONCE(!delta_time)) |
294 | goto rearm; | 296 | goto rearm; |
@@ -549,6 +551,7 @@ static int cpufreq_interactive_notifier( | |||
549 | struct cpufreq_freqs *freq = data; | 551 | struct cpufreq_freqs *freq = data; |
550 | struct cpufreq_interactive_cpuinfo *pcpu; | 552 | struct cpufreq_interactive_cpuinfo *pcpu; |
551 | int cpu; | 553 | int cpu; |
554 | unsigned long flags; | ||
552 | 555 | ||
553 | if (val == CPUFREQ_POSTCHANGE) { | 556 | if (val == CPUFREQ_POSTCHANGE) { |
554 | pcpu = &per_cpu(cpuinfo, freq->cpu); | 557 | pcpu = &per_cpu(cpuinfo, freq->cpu); |
@@ -562,9 +565,9 @@ static int cpufreq_interactive_notifier( | |||
562 | for_each_cpu(cpu, pcpu->policy->cpus) { | 565 | for_each_cpu(cpu, pcpu->policy->cpus) { |
563 | struct cpufreq_interactive_cpuinfo *pjcpu = | 566 | struct cpufreq_interactive_cpuinfo *pjcpu = |
564 | &per_cpu(cpuinfo, cpu); | 567 | &per_cpu(cpuinfo, cpu); |
565 | spin_lock(&pjcpu->load_lock); | 568 | spin_lock_irqsave(&pjcpu->load_lock, flags); |
566 | update_load(cpu); | 569 | update_load(cpu); |
567 | spin_unlock(&pjcpu->load_lock); | 570 | spin_unlock_irqrestore(&pjcpu->load_lock, flags); |
568 | } | 571 | } |
569 | 572 | ||
570 | up_read(&pcpu->enable_sem); | 573 | up_read(&pcpu->enable_sem); |
@@ -581,15 +584,16 @@ static ssize_t show_target_loads( | |||
581 | { | 584 | { |
582 | int i; | 585 | int i; |
583 | ssize_t ret = 0; | 586 | ssize_t ret = 0; |
587 | unsigned long flags; | ||
584 | 588 | ||
585 | spin_lock(&target_loads_lock); | 589 | spin_lock_irqsave(&target_loads_lock, flags); |
586 | 590 | ||
587 | for (i = 0; i < ntarget_loads; i++) | 591 | for (i = 0; i < ntarget_loads; i++) |
588 | ret += sprintf(buf + ret, "%u%s", target_loads[i], | 592 | ret += sprintf(buf + ret, "%u%s", target_loads[i], |
589 | i & 0x1 ? ":" : " "); | 593 | i & 0x1 ? ":" : " "); |
590 | 594 | ||
591 | ret += sprintf(buf + ret, "\n"); | 595 | ret += sprintf(buf + ret, "\n"); |
592 | spin_unlock(&target_loads_lock); | 596 | spin_unlock_irqrestore(&target_loads_lock, flags); |
593 | return ret; | 597 | return ret; |
594 | } | 598 | } |
595 | 599 | ||
@@ -602,6 +606,7 @@ static ssize_t store_target_loads( | |||
602 | unsigned int *new_target_loads = NULL; | 606 | unsigned int *new_target_loads = NULL; |
603 | int ntokens = 1; | 607 | int ntokens = 1; |
604 | int i; | 608 | int i; |
609 | unsigned long flags; | ||
605 | 610 | ||
606 | cp = buf; | 611 | cp = buf; |
607 | while ((cp = strpbrk(cp + 1, " :"))) | 612 | while ((cp = strpbrk(cp + 1, " :"))) |
@@ -631,12 +636,12 @@ static ssize_t store_target_loads( | |||
631 | if (i != ntokens) | 636 | if (i != ntokens) |
632 | goto err_inval; | 637 | goto err_inval; |
633 | 638 | ||
634 | spin_lock(&target_loads_lock); | 639 | spin_lock_irqsave(&target_loads_lock, flags); |
635 | if (target_loads != default_target_loads) | 640 | if (target_loads != default_target_loads) |
636 | kfree(target_loads); | 641 | kfree(target_loads); |
637 | target_loads = new_target_loads; | 642 | target_loads = new_target_loads; |
638 | ntarget_loads = ntokens; | 643 | ntarget_loads = ntokens; |
639 | spin_unlock(&target_loads_lock); | 644 | spin_unlock_irqrestore(&target_loads_lock, flags); |
640 | return count; | 645 | return count; |
641 | 646 | ||
642 | err_inval: | 647 | err_inval: |