aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTodd Poynor2013-01-02 15:14:00 -0600
committerArve Hjønnevåg2013-02-19 19:56:13 -0600
commite09763675acc17ab96de05e9c905b4f2300fae34 (patch)
treefa95bbdbfd1d764b248388e04473a246e5c4caa2
parent0fb8a55e19f992cfd1bc5c241a0fb8bad791270a (diff)
downloadkernel-common-e09763675acc17ab96de05e9c905b4f2300fae34.tar.gz
kernel-common-e09763675acc17ab96de05e9c905b4f2300fae34.tar.xz
kernel-common-e09763675acc17ab96de05e9c905b4f2300fae34.zip
cpufreq: interactive: fix deadlock on spinlock in timer
Need to use irqsave/restore spinlock calls to avoid a deadlock in calls from the timer. Change-Id: I15b6b590045ba1447e34ca7b5ff342723e53a605 Signed-off-by: Todd Poynor <toddpoynor@google.com>
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 286781ff9dd..c70ebf53415 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -125,6 +125,7 @@ static void cpufreq_interactive_timer_resched(
125 struct cpufreq_interactive_cpuinfo *pcpu) 125 struct cpufreq_interactive_cpuinfo *pcpu)
126{ 126{
127 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate); 127 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
128 unsigned long flags;
128 129
129 mod_timer_pinned(&pcpu->cpu_timer, expires); 130 mod_timer_pinned(&pcpu->cpu_timer, expires);
130 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { 131 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
@@ -132,27 +133,28 @@ static void cpufreq_interactive_timer_resched(
132 mod_timer_pinned(&pcpu->cpu_slack_timer, expires); 133 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
133 } 134 }
134 135
135 spin_lock(&pcpu->load_lock); 136 spin_lock_irqsave(&pcpu->load_lock, flags);
136 pcpu->time_in_idle = 137 pcpu->time_in_idle =
137 get_cpu_idle_time_us(smp_processor_id(), 138 get_cpu_idle_time_us(smp_processor_id(),
138 &pcpu->time_in_idle_timestamp); 139 &pcpu->time_in_idle_timestamp);
139 pcpu->cputime_speedadj = 0; 140 pcpu->cputime_speedadj = 0;
140 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; 141 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
141 spin_unlock(&pcpu->load_lock); 142 spin_unlock_irqrestore(&pcpu->load_lock, flags);
142} 143}
143 144
144static unsigned int freq_to_targetload(unsigned int freq) 145static unsigned int freq_to_targetload(unsigned int freq)
145{ 146{
146 int i; 147 int i;
147 unsigned int ret; 148 unsigned int ret;
149 unsigned long flags;
148 150
149 spin_lock(&target_loads_lock); 151 spin_lock_irqsave(&target_loads_lock, flags);
150 152
151 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2) 153 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
152 ; 154 ;
153 155
154 ret = target_loads[i]; 156 ret = target_loads[i];
155 spin_unlock(&target_loads_lock); 157 spin_unlock_irqrestore(&target_loads_lock, flags);
156 return ret; 158 return ret;
157} 159}
158 160
@@ -283,11 +285,11 @@ static void cpufreq_interactive_timer(unsigned long data)
283 if (!pcpu->governor_enabled) 285 if (!pcpu->governor_enabled)
284 goto exit; 286 goto exit;
285 287
286 spin_lock(&pcpu->load_lock); 288 spin_lock_irqsave(&pcpu->load_lock, flags);
287 now = update_load(data); 289 now = update_load(data);
288 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp); 290 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
289 cputime_speedadj = pcpu->cputime_speedadj; 291 cputime_speedadj = pcpu->cputime_speedadj;
290 spin_unlock(&pcpu->load_lock); 292 spin_unlock_irqrestore(&pcpu->load_lock, flags);
291 293
292 if (WARN_ON_ONCE(!delta_time)) 294 if (WARN_ON_ONCE(!delta_time))
293 goto rearm; 295 goto rearm;
@@ -548,6 +550,7 @@ static int cpufreq_interactive_notifier(
548 struct cpufreq_freqs *freq = data; 550 struct cpufreq_freqs *freq = data;
549 struct cpufreq_interactive_cpuinfo *pcpu; 551 struct cpufreq_interactive_cpuinfo *pcpu;
550 int cpu; 552 int cpu;
553 unsigned long flags;
551 554
552 if (val == CPUFREQ_POSTCHANGE) { 555 if (val == CPUFREQ_POSTCHANGE) {
553 pcpu = &per_cpu(cpuinfo, freq->cpu); 556 pcpu = &per_cpu(cpuinfo, freq->cpu);
@@ -561,9 +564,9 @@ static int cpufreq_interactive_notifier(
561 for_each_cpu(cpu, pcpu->policy->cpus) { 564 for_each_cpu(cpu, pcpu->policy->cpus) {
562 struct cpufreq_interactive_cpuinfo *pjcpu = 565 struct cpufreq_interactive_cpuinfo *pjcpu =
563 &per_cpu(cpuinfo, cpu); 566 &per_cpu(cpuinfo, cpu);
564 spin_lock(&pjcpu->load_lock); 567 spin_lock_irqsave(&pjcpu->load_lock, flags);
565 update_load(cpu); 568 update_load(cpu);
566 spin_unlock(&pjcpu->load_lock); 569 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
567 } 570 }
568 571
569 up_read(&pcpu->enable_sem); 572 up_read(&pcpu->enable_sem);
@@ -580,15 +583,16 @@ static ssize_t show_target_loads(
580{ 583{
581 int i; 584 int i;
582 ssize_t ret = 0; 585 ssize_t ret = 0;
586 unsigned long flags;
583 587
584 spin_lock(&target_loads_lock); 588 spin_lock_irqsave(&target_loads_lock, flags);
585 589
586 for (i = 0; i < ntarget_loads; i++) 590 for (i = 0; i < ntarget_loads; i++)
587 ret += sprintf(buf + ret, "%u%s", target_loads[i], 591 ret += sprintf(buf + ret, "%u%s", target_loads[i],
588 i & 0x1 ? ":" : " "); 592 i & 0x1 ? ":" : " ");
589 593
590 ret += sprintf(buf + ret, "\n"); 594 ret += sprintf(buf + ret, "\n");
591 spin_unlock(&target_loads_lock); 595 spin_unlock_irqrestore(&target_loads_lock, flags);
592 return ret; 596 return ret;
593} 597}
594 598
@@ -601,6 +605,7 @@ static ssize_t store_target_loads(
601 unsigned int *new_target_loads = NULL; 605 unsigned int *new_target_loads = NULL;
602 int ntokens = 1; 606 int ntokens = 1;
603 int i; 607 int i;
608 unsigned long flags;
604 609
605 cp = buf; 610 cp = buf;
606 while ((cp = strpbrk(cp + 1, " :"))) 611 while ((cp = strpbrk(cp + 1, " :")))
@@ -630,12 +635,12 @@ static ssize_t store_target_loads(
630 if (i != ntokens) 635 if (i != ntokens)
631 goto err_inval; 636 goto err_inval;
632 637
633 spin_lock(&target_loads_lock); 638 spin_lock_irqsave(&target_loads_lock, flags);
634 if (target_loads != default_target_loads) 639 if (target_loads != default_target_loads)
635 kfree(target_loads); 640 kfree(target_loads);
636 target_loads = new_target_loads; 641 target_loads = new_target_loads;
637 ntarget_loads = ntokens; 642 ntarget_loads = ntokens;
638 spin_unlock(&target_loads_lock); 643 spin_unlock_irqrestore(&target_loads_lock, flags);
639 return count; 644 return count;
640 645
641err_inval: 646err_inval: