aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTodd Poynor2012-12-18 19:50:10 -0600
committerArve Hjønnevåg2013-02-19 19:56:08 -0600
commit9edf06dc7ef35dc9a0ad8f3bd3cc0f940876046a (patch)
tree0049aa68a3d3ed5b4b05870e4b386b7422a8846c
parent44d0cb81d297c96fecdc71392133c33908feb45d (diff)
downloadkernel-common-9edf06dc7ef35dc9a0ad8f3bd3cc0f940876046a.tar.gz
kernel-common-9edf06dc7ef35dc9a0ad8f3bd3cc0f940876046a.tar.xz
kernel-common-9edf06dc7ef35dc9a0ad8f3bd3cc0f940876046a.zip
cpufreq: interactive: add timer slack to limit idle at speed > min
Always use deferrable timer for load sampling. Set a non-deferrable timer to an additional slack time to allow prior to waking up from idle to drop speed when not at minimum speed. Slack value -1 avoids wakeups to drop speed. Default is 80ms. Remove the governidle module param and its timer management in idle. For platforms on which holding speed above mimum in idle costs power, use the new timer slack to select how long to wait before waking up to drop speed. Change-Id: I270b3980667e2c70a68e5bff534124b4411dbad5 Signed-off-by: Todd Poynor <toddpoynor@google.com>
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c101
1 files changed, 61 insertions, 40 deletions
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 604f06af6dc..ddface4d02c 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -39,7 +39,7 @@ static atomic_t active_count = ATOMIC_INIT(0);
39 39
40struct cpufreq_interactive_cpuinfo { 40struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer; 41 struct timer_list cpu_timer;
42 int timer_idlecancel; 42 struct timer_list cpu_slack_timer;
43 spinlock_t load_lock; /* protects the next 4 fields */ 43 spinlock_t load_lock; /* protects the next 4 fields */
44 u64 time_in_idle; 44 u64 time_in_idle;
45 u64 time_in_idle_timestamp; 45 u64 time_in_idle_timestamp;
@@ -101,10 +101,12 @@ static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
101/* End time of boost pulse in ktime converted to usecs */ 101/* End time of boost pulse in ktime converted to usecs */
102static u64 boostpulse_endtime; 102static u64 boostpulse_endtime;
103 103
104static bool governidle; 104/*
105module_param(governidle, bool, S_IWUSR | S_IRUGO); 105 * Max additional time to wait in idle, beyond timer_rate, at speeds above
106MODULE_PARM_DESC(governidle, 106 * minimum before wakeup to reduce speed, or -1 if unnecessary.
107 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)"); 107 */
108#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
109static int timer_slack_val = DEFAULT_TIMER_SLACK;
108 110
109static int cpufreq_governor_interactive(struct cpufreq_policy *policy, 111static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
110 unsigned int event); 112 unsigned int event);
@@ -122,8 +124,14 @@ struct cpufreq_governor cpufreq_gov_interactive = {
122static void cpufreq_interactive_timer_resched( 124static void cpufreq_interactive_timer_resched(
123 struct cpufreq_interactive_cpuinfo *pcpu) 125 struct cpufreq_interactive_cpuinfo *pcpu)
124{ 126{
125 mod_timer_pinned(&pcpu->cpu_timer, 127 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
126 jiffies + usecs_to_jiffies(timer_rate)); 128
129 mod_timer_pinned(&pcpu->cpu_timer, expires);
130 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
131 expires += usecs_to_jiffies(timer_slack_val);
132 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
133 }
134
127 spin_lock(&pcpu->load_lock); 135 spin_lock(&pcpu->load_lock);
128 pcpu->time_in_idle = 136 pcpu->time_in_idle =
129 get_cpu_idle_time_us(smp_processor_id(), 137 get_cpu_idle_time_us(smp_processor_id(),
@@ -367,17 +375,8 @@ rearm_if_notmax:
367 goto exit; 375 goto exit;
368 376
369rearm: 377rearm:
370 if (!timer_pending(&pcpu->cpu_timer)) { 378 if (!timer_pending(&pcpu->cpu_timer))
371 /*
372 * If governing speed in idle and already at min, cancel the
373 * timer if that CPU goes idle. We don't need to re-evaluate
374 * speed until the next idle exit.
375 */
376 if (governidle && pcpu->target_freq == pcpu->policy->min)
377 pcpu->timer_idlecancel = 1;
378
379 cpufreq_interactive_timer_resched(pcpu); 379 cpufreq_interactive_timer_resched(pcpu);
380 }
381 380
382exit: 381exit:
383 return; 382 return;
@@ -403,21 +402,8 @@ static void cpufreq_interactive_idle_start(void)
403 * min indefinitely. This should probably be a quirk of 402 * min indefinitely. This should probably be a quirk of
404 * the CPUFreq driver. 403 * the CPUFreq driver.
405 */ 404 */
406 if (!pending) { 405 if (!pending)
407 pcpu->timer_idlecancel = 0;
408 cpufreq_interactive_timer_resched(pcpu); 406 cpufreq_interactive_timer_resched(pcpu);
409 }
410 } else if (governidle) {
411 /*
412 * If at min speed and entering idle after load has
413 * already been evaluated, and a timer has been set just in
414 * case the CPU suddenly goes busy, cancel that timer. The
415 * CPU didn't go busy; we'll recheck things upon idle exit.
416 */
417 if (pending && pcpu->timer_idlecancel) {
418 del_timer(&pcpu->cpu_timer);
419 pcpu->timer_idlecancel = 0;
420 }
421 } 407 }
422 408
423} 409}
@@ -432,11 +418,10 @@ static void cpufreq_interactive_idle_end(void)
432 418
433 /* Arm the timer for 1-2 ticks later if not already. */ 419 /* Arm the timer for 1-2 ticks later if not already. */
434 if (!timer_pending(&pcpu->cpu_timer)) { 420 if (!timer_pending(&pcpu->cpu_timer)) {
435 pcpu->timer_idlecancel = 0;
436 cpufreq_interactive_timer_resched(pcpu); 421 cpufreq_interactive_timer_resched(pcpu);
437 } else if (!governidle && 422 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
438 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
439 del_timer(&pcpu->cpu_timer); 423 del_timer(&pcpu->cpu_timer);
424 del_timer(&pcpu->cpu_slack_timer);
440 cpufreq_interactive_timer(smp_processor_id()); 425 cpufreq_interactive_timer(smp_processor_id());
441 } 426 }
442} 427}
@@ -746,6 +731,29 @@ static ssize_t store_timer_rate(struct kobject *kobj,
746static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644, 731static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
747 show_timer_rate, store_timer_rate); 732 show_timer_rate, store_timer_rate);
748 733
734static ssize_t show_timer_slack(
735 struct kobject *kobj, struct attribute *attr, char *buf)
736{
737 return sprintf(buf, "%d\n", timer_slack_val);
738}
739
740static ssize_t store_timer_slack(
741 struct kobject *kobj, struct attribute *attr, const char *buf,
742 size_t count)
743{
744 int ret;
745 unsigned long val;
746
747 ret = kstrtol(buf, 10, &val);
748 if (ret < 0)
749 return ret;
750
751 timer_slack_val = val;
752 return count;
753}
754
755define_one_global_rw(timer_slack);
756
749static ssize_t show_boost(struct kobject *kobj, struct attribute *attr, 757static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
750 char *buf) 758 char *buf)
751{ 759{
@@ -825,6 +833,7 @@ static struct attribute *interactive_attributes[] = {
825 &above_hispeed_delay.attr, 833 &above_hispeed_delay.attr,
826 &min_sample_time_attr.attr, 834 &min_sample_time_attr.attr,
827 &timer_rate_attr.attr, 835 &timer_rate_attr.attr,
836 &timer_slack.attr,
828 &boost.attr, 837 &boost.attr,
829 &boostpulse.attr, 838 &boostpulse.attr,
830 &boostpulse_duration.attr, 839 &boostpulse_duration.attr,
@@ -875,6 +884,8 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
875 hispeed_freq = policy->max; 884 hispeed_freq = policy->max;
876 885
877 for_each_cpu(j, policy->cpus) { 886 for_each_cpu(j, policy->cpus) {
887 unsigned long expires;
888
878 pcpu = &per_cpu(cpuinfo, j); 889 pcpu = &per_cpu(cpuinfo, j);
879 pcpu->policy = policy; 890 pcpu->policy = policy;
880 pcpu->target_freq = policy->cur; 891 pcpu->target_freq = policy->cur;
@@ -886,9 +897,15 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
886 pcpu->floor_validate_time; 897 pcpu->floor_validate_time;
887 pcpu->governor_enabled = 1; 898 pcpu->governor_enabled = 1;
888 smp_wmb(); 899 smp_wmb();
889 pcpu->cpu_timer.expires = 900 expires = jiffies + usecs_to_jiffies(timer_rate);
890 jiffies + usecs_to_jiffies(timer_rate); 901 pcpu->cpu_timer.expires = expires;
891 add_timer_on(&pcpu->cpu_timer, j); 902 add_timer_on(&pcpu->cpu_timer, j);
903
904 if (timer_slack_val >= 0) {
905 expires += usecs_to_jiffies(timer_slack_val);
906 pcpu->cpu_slack_timer.expires = expires;
907 add_timer_on(&pcpu->cpu_slack_timer, j);
908 }
892 } 909 }
893 910
894 /* 911 /*
@@ -914,6 +931,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
914 pcpu->governor_enabled = 0; 931 pcpu->governor_enabled = 0;
915 smp_wmb(); 932 smp_wmb();
916 del_timer_sync(&pcpu->cpu_timer); 933 del_timer_sync(&pcpu->cpu_timer);
934 del_timer_sync(&pcpu->cpu_slack_timer);
917 } 935 }
918 936
919 if (atomic_dec_return(&active_count) > 0) 937 if (atomic_dec_return(&active_count) > 0)
@@ -939,6 +957,10 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
939 return 0; 957 return 0;
940} 958}
941 959
960static void cpufreq_interactive_nop_timer(unsigned long data)
961{
962}
963
942static int __init cpufreq_interactive_init(void) 964static int __init cpufreq_interactive_init(void)
943{ 965{
944 unsigned int i; 966 unsigned int i;
@@ -953,12 +975,11 @@ static int __init cpufreq_interactive_init(void)
953 /* Initalize per-cpu timers */ 975 /* Initalize per-cpu timers */
954 for_each_possible_cpu(i) { 976 for_each_possible_cpu(i) {
955 pcpu = &per_cpu(cpuinfo, i); 977 pcpu = &per_cpu(cpuinfo, i);
956 if (governidle) 978 init_timer_deferrable(&pcpu->cpu_timer);
957 init_timer(&pcpu->cpu_timer);
958 else
959 init_timer_deferrable(&pcpu->cpu_timer);
960 pcpu->cpu_timer.function = cpufreq_interactive_timer; 979 pcpu->cpu_timer.function = cpufreq_interactive_timer;
961 pcpu->cpu_timer.data = i; 980 pcpu->cpu_timer.data = i;
981 init_timer(&pcpu->cpu_slack_timer);
982 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
962 spin_lock_init(&pcpu->load_lock); 983 spin_lock_init(&pcpu->load_lock);
963 } 984 }
964 985