aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTodd Poynor2012-12-18 19:50:10 -0600
committerArve Hjønnevåg2013-03-11 17:25:12 -0500
commitbc8ffd82102b65f5c0e323d577d27a8de0eb9897 (patch)
treeb724636ac34f5cddf115eb4832fa6982fd975e54
parentc656cb520b90b78edfcfe680b38f1e80a02da8d6 (diff)
downloadkernel-common-bc8ffd82102b65f5c0e323d577d27a8de0eb9897.tar.gz
kernel-common-bc8ffd82102b65f5c0e323d577d27a8de0eb9897.tar.xz
kernel-common-bc8ffd82102b65f5c0e323d577d27a8de0eb9897.zip
cpufreq: interactive: add timer slack to limit idle at speed > min
Always use deferrable timer for load sampling. Set a non-deferrable timer to an additional slack time to allow prior to waking up from idle to drop speed when not at minimum speed. Slack value -1 avoids wakeups to drop speed. Default is 80ms. Remove the governidle module param and its timer management in idle. For platforms on which holding speed above mimum in idle costs power, use the new timer slack to select how long to wait before waking up to drop speed. Change-Id: I270b3980667e2c70a68e5bff534124b4411dbad5 Signed-off-by: Todd Poynor <toddpoynor@google.com>
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c101
1 files changed, 61 insertions, 40 deletions
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 8b44a82aa14..690be16aef8 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -40,7 +40,7 @@ static atomic_t active_count = ATOMIC_INIT(0);
40 40
41struct cpufreq_interactive_cpuinfo { 41struct cpufreq_interactive_cpuinfo {
42 struct timer_list cpu_timer; 42 struct timer_list cpu_timer;
43 int timer_idlecancel; 43 struct timer_list cpu_slack_timer;
44 spinlock_t load_lock; /* protects the next 4 fields */ 44 spinlock_t load_lock; /* protects the next 4 fields */
45 u64 time_in_idle; 45 u64 time_in_idle;
46 u64 time_in_idle_timestamp; 46 u64 time_in_idle_timestamp;
@@ -102,10 +102,12 @@ static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
102/* End time of boost pulse in ktime converted to usecs */ 102/* End time of boost pulse in ktime converted to usecs */
103static u64 boostpulse_endtime; 103static u64 boostpulse_endtime;
104 104
105static bool governidle; 105/*
106module_param(governidle, bool, S_IWUSR | S_IRUGO); 106 * Max additional time to wait in idle, beyond timer_rate, at speeds above
107MODULE_PARM_DESC(governidle, 107 * minimum before wakeup to reduce speed, or -1 if unnecessary.
108 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)"); 108 */
109#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
110static int timer_slack_val = DEFAULT_TIMER_SLACK;
109 111
110static int cpufreq_governor_interactive(struct cpufreq_policy *policy, 112static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
111 unsigned int event); 113 unsigned int event);
@@ -123,8 +125,14 @@ struct cpufreq_governor cpufreq_gov_interactive = {
123static void cpufreq_interactive_timer_resched( 125static void cpufreq_interactive_timer_resched(
124 struct cpufreq_interactive_cpuinfo *pcpu) 126 struct cpufreq_interactive_cpuinfo *pcpu)
125{ 127{
126 mod_timer_pinned(&pcpu->cpu_timer, 128 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
127 jiffies + usecs_to_jiffies(timer_rate)); 129
130 mod_timer_pinned(&pcpu->cpu_timer, expires);
131 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
132 expires += usecs_to_jiffies(timer_slack_val);
133 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
134 }
135
128 spin_lock(&pcpu->load_lock); 136 spin_lock(&pcpu->load_lock);
129 pcpu->time_in_idle = 137 pcpu->time_in_idle =
130 get_cpu_idle_time_us(smp_processor_id(), 138 get_cpu_idle_time_us(smp_processor_id(),
@@ -368,17 +376,8 @@ rearm_if_notmax:
368 goto exit; 376 goto exit;
369 377
370rearm: 378rearm:
371 if (!timer_pending(&pcpu->cpu_timer)) { 379 if (!timer_pending(&pcpu->cpu_timer))
372 /*
373 * If governing speed in idle and already at min, cancel the
374 * timer if that CPU goes idle. We don't need to re-evaluate
375 * speed until the next idle exit.
376 */
377 if (governidle && pcpu->target_freq == pcpu->policy->min)
378 pcpu->timer_idlecancel = 1;
379
380 cpufreq_interactive_timer_resched(pcpu); 380 cpufreq_interactive_timer_resched(pcpu);
381 }
382 381
383exit: 382exit:
384 return; 383 return;
@@ -404,21 +403,8 @@ static void cpufreq_interactive_idle_start(void)
404 * min indefinitely. This should probably be a quirk of 403 * min indefinitely. This should probably be a quirk of
405 * the CPUFreq driver. 404 * the CPUFreq driver.
406 */ 405 */
407 if (!pending) { 406 if (!pending)
408 pcpu->timer_idlecancel = 0;
409 cpufreq_interactive_timer_resched(pcpu); 407 cpufreq_interactive_timer_resched(pcpu);
410 }
411 } else if (governidle) {
412 /*
413 * If at min speed and entering idle after load has
414 * already been evaluated, and a timer has been set just in
415 * case the CPU suddenly goes busy, cancel that timer. The
416 * CPU didn't go busy; we'll recheck things upon idle exit.
417 */
418 if (pending && pcpu->timer_idlecancel) {
419 del_timer(&pcpu->cpu_timer);
420 pcpu->timer_idlecancel = 0;
421 }
422 } 408 }
423 409
424} 410}
@@ -433,11 +419,10 @@ static void cpufreq_interactive_idle_end(void)
433 419
434 /* Arm the timer for 1-2 ticks later if not already. */ 420 /* Arm the timer for 1-2 ticks later if not already. */
435 if (!timer_pending(&pcpu->cpu_timer)) { 421 if (!timer_pending(&pcpu->cpu_timer)) {
436 pcpu->timer_idlecancel = 0;
437 cpufreq_interactive_timer_resched(pcpu); 422 cpufreq_interactive_timer_resched(pcpu);
438 } else if (!governidle && 423 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
439 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
440 del_timer(&pcpu->cpu_timer); 424 del_timer(&pcpu->cpu_timer);
425 del_timer(&pcpu->cpu_slack_timer);
441 cpufreq_interactive_timer(smp_processor_id()); 426 cpufreq_interactive_timer(smp_processor_id());
442 } 427 }
443} 428}
@@ -747,6 +732,29 @@ static ssize_t store_timer_rate(struct kobject *kobj,
747static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644, 732static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
748 show_timer_rate, store_timer_rate); 733 show_timer_rate, store_timer_rate);
749 734
735static ssize_t show_timer_slack(
736 struct kobject *kobj, struct attribute *attr, char *buf)
737{
738 return sprintf(buf, "%d\n", timer_slack_val);
739}
740
741static ssize_t store_timer_slack(
742 struct kobject *kobj, struct attribute *attr, const char *buf,
743 size_t count)
744{
745 int ret;
746 unsigned long val;
747
748 ret = kstrtol(buf, 10, &val);
749 if (ret < 0)
750 return ret;
751
752 timer_slack_val = val;
753 return count;
754}
755
756define_one_global_rw(timer_slack);
757
750static ssize_t show_boost(struct kobject *kobj, struct attribute *attr, 758static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
751 char *buf) 759 char *buf)
752{ 760{
@@ -826,6 +834,7 @@ static struct attribute *interactive_attributes[] = {
826 &above_hispeed_delay.attr, 834 &above_hispeed_delay.attr,
827 &min_sample_time_attr.attr, 835 &min_sample_time_attr.attr,
828 &timer_rate_attr.attr, 836 &timer_rate_attr.attr,
837 &timer_slack.attr,
829 &boost.attr, 838 &boost.attr,
830 &boostpulse.attr, 839 &boostpulse.attr,
831 &boostpulse_duration.attr, 840 &boostpulse_duration.attr,
@@ -876,6 +885,8 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
876 hispeed_freq = policy->max; 885 hispeed_freq = policy->max;
877 886
878 for_each_cpu(j, policy->cpus) { 887 for_each_cpu(j, policy->cpus) {
888 unsigned long expires;
889
879 pcpu = &per_cpu(cpuinfo, j); 890 pcpu = &per_cpu(cpuinfo, j);
880 pcpu->policy = policy; 891 pcpu->policy = policy;
881 pcpu->target_freq = policy->cur; 892 pcpu->target_freq = policy->cur;
@@ -887,9 +898,15 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
887 pcpu->floor_validate_time; 898 pcpu->floor_validate_time;
888 pcpu->governor_enabled = 1; 899 pcpu->governor_enabled = 1;
889 smp_wmb(); 900 smp_wmb();
890 pcpu->cpu_timer.expires = 901 expires = jiffies + usecs_to_jiffies(timer_rate);
891 jiffies + usecs_to_jiffies(timer_rate); 902 pcpu->cpu_timer.expires = expires;
892 add_timer_on(&pcpu->cpu_timer, j); 903 add_timer_on(&pcpu->cpu_timer, j);
904
905 if (timer_slack_val >= 0) {
906 expires += usecs_to_jiffies(timer_slack_val);
907 pcpu->cpu_slack_timer.expires = expires;
908 add_timer_on(&pcpu->cpu_slack_timer, j);
909 }
893 } 910 }
894 911
895 /* 912 /*
@@ -915,6 +932,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
915 pcpu->governor_enabled = 0; 932 pcpu->governor_enabled = 0;
916 smp_wmb(); 933 smp_wmb();
917 del_timer_sync(&pcpu->cpu_timer); 934 del_timer_sync(&pcpu->cpu_timer);
935 del_timer_sync(&pcpu->cpu_slack_timer);
918 } 936 }
919 937
920 if (atomic_dec_return(&active_count) > 0) 938 if (atomic_dec_return(&active_count) > 0)
@@ -940,6 +958,10 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
940 return 0; 958 return 0;
941} 959}
942 960
961static void cpufreq_interactive_nop_timer(unsigned long data)
962{
963}
964
943static int __init cpufreq_interactive_init(void) 965static int __init cpufreq_interactive_init(void)
944{ 966{
945 unsigned int i; 967 unsigned int i;
@@ -954,12 +976,11 @@ static int __init cpufreq_interactive_init(void)
954 /* Initalize per-cpu timers */ 976 /* Initalize per-cpu timers */
955 for_each_possible_cpu(i) { 977 for_each_possible_cpu(i) {
956 pcpu = &per_cpu(cpuinfo, i); 978 pcpu = &per_cpu(cpuinfo, i);
957 if (governidle) 979 init_timer_deferrable(&pcpu->cpu_timer);
958 init_timer(&pcpu->cpu_timer);
959 else
960 init_timer_deferrable(&pcpu->cpu_timer);
961 pcpu->cpu_timer.function = cpufreq_interactive_timer; 980 pcpu->cpu_timer.function = cpufreq_interactive_timer;
962 pcpu->cpu_timer.data = i; 981 pcpu->cpu_timer.data = i;
982 init_timer(&pcpu->cpu_slack_timer);
983 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
963 spin_lock_init(&pcpu->load_lock); 984 spin_lock_init(&pcpu->load_lock);
964 } 985 }
965 986