aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c212
-rw-r--r--kernel/cpu.c20
-rw-r--r--kernel/fork.c18
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/irq/pm.c7
-rw-r--r--kernel/panic.c13
-rw-r--r--kernel/pm_qos_params.c6
-rw-r--r--kernel/power/Kconfig74
-rw-r--r--kernel/power/Makefile6
-rw-r--r--kernel/power/consoleearlysuspend.c78
-rw-r--r--kernel/power/earlysuspend.c187
-rw-r--r--kernel/power/fbearlysuspend.c153
-rw-r--r--kernel/power/main.c20
-rw-r--r--kernel/power/power.h24
-rw-r--r--kernel/power/process.c27
-rw-r--r--kernel/power/suspend.c3
-rw-r--r--kernel/power/suspend_time.c111
-rw-r--r--kernel/power/userwakelock.c219
-rw-r--r--kernel/power/wakelock.c634
-rw-r--r--kernel/printk.c56
-rw-r--r--kernel/rtmutex.c2
-rw-r--r--kernel/sched.c101
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/timekeeping.c2
25 files changed, 1885 insertions, 100 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index b964f9e406f..460aa1b0f50 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -268,6 +268,33 @@ static void cgroup_release_agent(struct work_struct *work);
268static DECLARE_WORK(release_agent_work, cgroup_release_agent); 268static DECLARE_WORK(release_agent_work, cgroup_release_agent);
269static void check_for_release(struct cgroup *cgrp); 269static void check_for_release(struct cgroup *cgrp);
270 270
271/*
272 * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
273 * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
274 * reference to css->refcnt. In general, this refcnt is expected to goes down
275 * to zero, soon.
276 *
277 * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
278 */
279DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
280
281static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
282{
283 if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
284 wake_up_all(&cgroup_rmdir_waitq);
285}
286
287void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
288{
289 css_get(css);
290}
291
292void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
293{
294 cgroup_wakeup_rmdir_waiter(css->cgroup);
295 css_put(css);
296}
297
271/* Link structure for associating css_set objects with cgroups */ 298/* Link structure for associating css_set objects with cgroups */
272struct cg_cgroup_link { 299struct cg_cgroup_link {
273 /* 300 /*
@@ -327,60 +354,51 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
327 return &css_set_table[index]; 354 return &css_set_table[index];
328} 355}
329 356
330/* We don't maintain the lists running through each css_set to its 357static void free_css_set_work(struct work_struct *work)
331 * task until after the first call to cgroup_iter_start(). This
332 * reduces the fork()/exit() overhead for people who have cgroups
333 * compiled into their kernel but not actually in use */
334static int use_task_css_set_links __read_mostly;
335
336static void __put_css_set(struct css_set *cg, int taskexit)
337{ 358{
359 struct css_set *cg = container_of(work, struct css_set, work);
338 struct cg_cgroup_link *link; 360 struct cg_cgroup_link *link;
339 struct cg_cgroup_link *saved_link; 361 struct cg_cgroup_link *saved_link;
340 /*
341 * Ensure that the refcount doesn't hit zero while any readers
342 * can see it. Similar to atomic_dec_and_lock(), but for an
343 * rwlock
344 */
345 if (atomic_add_unless(&cg->refcount, -1, 1))
346 return;
347 write_lock(&css_set_lock);
348 if (!atomic_dec_and_test(&cg->refcount)) {
349 write_unlock(&css_set_lock);
350 return;
351 }
352
353 /* This css_set is dead. unlink it and release cgroup refcounts */
354 hlist_del(&cg->hlist);
355 css_set_count--;
356 362
363 write_lock(&css_set_lock);
357 list_for_each_entry_safe(link, saved_link, &cg->cg_links, 364 list_for_each_entry_safe(link, saved_link, &cg->cg_links,
358 cg_link_list) { 365 cg_link_list) {
359 struct cgroup *cgrp = link->cgrp; 366 struct cgroup *cgrp = link->cgrp;
360 list_del(&link->cg_link_list); 367 list_del(&link->cg_link_list);
361 list_del(&link->cgrp_link_list); 368 list_del(&link->cgrp_link_list);
362
363 /* 369 /*
364 * We may not be holding cgroup_mutex, and if cgrp->count is 370 * We may not be holding cgroup_mutex, and if cgrp->count is
365 * dropped to 0 the cgroup can be destroyed at any time, hence 371 * dropped to 0 the cgroup can be destroyed at any time, hence
366 * rcu_read_lock is used to keep it alive. 372 * rcu_read_lock is used to keep it alive.
367 */ 373 */
368 rcu_read_lock(); 374 rcu_read_lock();
369 if (atomic_dec_and_test(&cgrp->count) && 375 if (atomic_dec_and_test(&cgrp->count)) {
370 notify_on_release(cgrp)) {
371 if (taskexit)
372 set_bit(CGRP_RELEASABLE, &cgrp->flags);
373 check_for_release(cgrp); 376 check_for_release(cgrp);
377 cgroup_wakeup_rmdir_waiter(cgrp);
374 } 378 }
375 rcu_read_unlock(); 379 rcu_read_unlock();
376 380
377 kfree(link); 381 kfree(link);
378 } 382 }
379
380 write_unlock(&css_set_lock); 383 write_unlock(&css_set_lock);
381 kfree_rcu(cg, rcu_head); 384
385 kfree(cg);
382} 386}
383 387
388static void free_css_set_rcu(struct rcu_head *obj)
389{
390 struct css_set *cg = container_of(obj, struct css_set, rcu_head);
391
392 INIT_WORK(&cg->work, free_css_set_work);
393 schedule_work(&cg->work);
394}
395
396/* We don't maintain the lists running through each css_set to its
397 * task until after the first call to cgroup_iter_start(). This
398 * reduces the fork()/exit() overhead for people who have cgroups
399 * compiled into their kernel but not actually in use */
400static int use_task_css_set_links __read_mostly;
401
384/* 402/*
385 * refcounted get/put for css_set objects 403 * refcounted get/put for css_set objects
386 */ 404 */
@@ -389,14 +407,26 @@ static inline void get_css_set(struct css_set *cg)
389 atomic_inc(&cg->refcount); 407 atomic_inc(&cg->refcount);
390} 408}
391 409
392static inline void put_css_set(struct css_set *cg) 410static void put_css_set(struct css_set *cg)
393{ 411{
394 __put_css_set(cg, 0); 412 /*
395} 413 * Ensure that the refcount doesn't hit zero while any readers
414 * can see it. Similar to atomic_dec_and_lock(), but for an
415 * rwlock
416 */
417 if (atomic_add_unless(&cg->refcount, -1, 1))
418 return;
419 write_lock(&css_set_lock);
420 if (!atomic_dec_and_test(&cg->refcount)) {
421 write_unlock(&css_set_lock);
422 return;
423 }
396 424
397static inline void put_css_set_taskexit(struct css_set *cg) 425 hlist_del(&cg->hlist);
398{ 426 css_set_count--;
399 __put_css_set(cg, 1); 427
428 write_unlock(&css_set_lock);
429 call_rcu(&cg->rcu_head, free_css_set_rcu);
400} 430}
401 431
402/* 432/*
@@ -728,9 +758,9 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
728 * cgroup_attach_task(), which overwrites one tasks cgroup pointer with 758 * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
729 * another. It does so using cgroup_mutex, however there are 759 * another. It does so using cgroup_mutex, however there are
730 * several performance critical places that need to reference 760 * several performance critical places that need to reference
731 * task->cgroup without the expense of grabbing a system global 761 * task->cgroups without the expense of grabbing a system global
732 * mutex. Therefore except as noted below, when dereferencing or, as 762 * mutex. Therefore except as noted below, when dereferencing or, as
733 * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use 763 * in cgroup_attach_task(), modifying a task's cgroups pointer we use
734 * task_lock(), which acts on a spinlock (task->alloc_lock) already in 764 * task_lock(), which acts on a spinlock (task->alloc_lock) already in
735 * the task_struct routinely used for such matters. 765 * the task_struct routinely used for such matters.
736 * 766 *
@@ -920,33 +950,6 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
920} 950}
921 951
922/* 952/*
923 * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
924 * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
925 * reference to css->refcnt. In general, this refcnt is expected to goes down
926 * to zero, soon.
927 *
928 * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
929 */
930DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
931
932static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
933{
934 if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
935 wake_up_all(&cgroup_rmdir_waitq);
936}
937
938void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
939{
940 css_get(css);
941}
942
943void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
944{
945 cgroup_wakeup_rmdir_waiter(css->cgroup);
946 css_put(css);
947}
948
949/*
950 * Call with cgroup_mutex held. Drops reference counts on modules, including 953 * Call with cgroup_mutex held. Drops reference counts on modules, including
951 * any duplicate ones that parse_cgroupfs_options took. If this function 954 * any duplicate ones that parse_cgroupfs_options took. If this function
952 * returns an error, no reference counts are touched. 955 * returns an error, no reference counts are touched.
@@ -1827,6 +1830,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1827 struct cgroup_subsys *ss, *failed_ss = NULL; 1830 struct cgroup_subsys *ss, *failed_ss = NULL;
1828 struct cgroup *oldcgrp; 1831 struct cgroup *oldcgrp;
1829 struct cgroupfs_root *root = cgrp->root; 1832 struct cgroupfs_root *root = cgrp->root;
1833 struct css_set *cg;
1830 1834
1831 /* Nothing to do if the task is already in that cgroup */ 1835 /* Nothing to do if the task is already in that cgroup */
1832 oldcgrp = task_cgroup_from_root(tsk, root); 1836 oldcgrp = task_cgroup_from_root(tsk, root);
@@ -1856,6 +1860,11 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1856 } 1860 }
1857 } 1861 }
1858 1862
1863 task_lock(tsk);
1864 cg = tsk->cgroups;
1865 get_css_set(cg);
1866 task_unlock(tsk);
1867
1859 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false); 1868 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
1860 if (retval) 1869 if (retval)
1861 goto out; 1870 goto out;
@@ -1868,8 +1877,9 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1868 if (ss->attach) 1877 if (ss->attach)
1869 ss->attach(ss, cgrp, oldcgrp, tsk); 1878 ss->attach(ss, cgrp, oldcgrp, tsk);
1870 } 1879 }
1871 1880 set_bit(CGRP_RELEASABLE, &cgrp->flags);
1872 synchronize_rcu(); 1881 /* put_css_set will not destroy cg until after an RCU grace period */
1882 put_css_set(cg);
1873 1883
1874 /* 1884 /*
1875 * wake up rmdir() waiter. the rmdir should fail since the cgroup 1885 * wake up rmdir() waiter. the rmdir should fail since the cgroup
@@ -2191,6 +2201,24 @@ out_free_group_list:
2191 return retval; 2201 return retval;
2192} 2202}
2193 2203
2204static int cgroup_allow_attach(struct cgroup *cgrp, struct task_struct *tsk)
2205{
2206 struct cgroup_subsys *ss;
2207 int ret;
2208
2209 for_each_subsys(cgrp->root, ss) {
2210 if (ss->allow_attach) {
2211 ret = ss->allow_attach(cgrp, tsk);
2212 if (ret)
2213 return ret;
2214 } else {
2215 return -EACCES;
2216 }
2217 }
2218
2219 return 0;
2220}
2221
2194/* 2222/*
2195 * Find the task_struct of the task to attach by vpid and pass it along to the 2223 * Find the task_struct of the task to attach by vpid and pass it along to the
2196 * function to attach either it or all tasks in its threadgroup. Will take 2224 * function to attach either it or all tasks in its threadgroup. Will take
@@ -2236,9 +2264,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2236 if (cred->euid && 2264 if (cred->euid &&
2237 cred->euid != tcred->uid && 2265 cred->euid != tcred->uid &&
2238 cred->euid != tcred->suid) { 2266 cred->euid != tcred->suid) {
2239 rcu_read_unlock(); 2267 /*
2240 cgroup_unlock(); 2268 * if the default permission check fails, give each
2241 return -EACCES; 2269 * cgroup a chance to extend the permission check
2270 */
2271 ret = cgroup_allow_attach(cgrp, tsk);
2272 if (ret) {
2273 rcu_read_unlock();
2274 cgroup_unlock();
2275 return ret;
2276 }
2242 } 2277 }
2243 get_task_struct(tsk); 2278 get_task_struct(tsk);
2244 rcu_read_unlock(); 2279 rcu_read_unlock();
@@ -3810,6 +3845,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
3810 if (err < 0) 3845 if (err < 0)
3811 goto err_remove; 3846 goto err_remove;
3812 3847
3848 set_bit(CGRP_RELEASABLE, &parent->flags);
3849
3813 /* The cgroup directory was pre-locked for us */ 3850 /* The cgroup directory was pre-locked for us */
3814 BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex)); 3851 BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
3815 3852
@@ -3941,6 +3978,21 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp)
3941 return !failed; 3978 return !failed;
3942} 3979}
3943 3980
3981/* checks if all of the css_sets attached to a cgroup have a refcount of 0.
3982 * Must be called with css_set_lock held */
3983static int cgroup_css_sets_empty(struct cgroup *cgrp)
3984{
3985 struct cg_cgroup_link *link;
3986
3987 list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
3988 struct css_set *cg = link->cg;
3989 if (atomic_read(&cg->refcount) > 0)
3990 return 0;
3991 }
3992
3993 return 1;
3994}
3995
3944static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) 3996static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
3945{ 3997{
3946 struct cgroup *cgrp = dentry->d_fsdata; 3998 struct cgroup *cgrp = dentry->d_fsdata;
@@ -3953,7 +4005,7 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
3953 /* the vfs holds both inode->i_mutex already */ 4005 /* the vfs holds both inode->i_mutex already */
3954again: 4006again:
3955 mutex_lock(&cgroup_mutex); 4007 mutex_lock(&cgroup_mutex);
3956 if (atomic_read(&cgrp->count) != 0) { 4008 if (!cgroup_css_sets_empty(cgrp)) {
3957 mutex_unlock(&cgroup_mutex); 4009 mutex_unlock(&cgroup_mutex);
3958 return -EBUSY; 4010 return -EBUSY;
3959 } 4011 }
@@ -3986,7 +4038,7 @@ again:
3986 4038
3987 mutex_lock(&cgroup_mutex); 4039 mutex_lock(&cgroup_mutex);
3988 parent = cgrp->parent; 4040 parent = cgrp->parent;
3989 if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { 4041 if (!cgroup_css_sets_empty(cgrp) || !list_empty(&cgrp->children)) {
3990 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); 4042 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
3991 mutex_unlock(&cgroup_mutex); 4043 mutex_unlock(&cgroup_mutex);
3992 return -EBUSY; 4044 return -EBUSY;
@@ -4026,7 +4078,6 @@ again:
4026 cgroup_d_remove_dir(d); 4078 cgroup_d_remove_dir(d);
4027 dput(d); 4079 dput(d);
4028 4080
4029 set_bit(CGRP_RELEASABLE, &parent->flags);
4030 check_for_release(parent); 4081 check_for_release(parent);
4031 4082
4032 /* 4083 /*
@@ -4626,7 +4677,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
4626 task_unlock(tsk); 4677 task_unlock(tsk);
4627 4678
4628 if (cg) 4679 if (cg)
4629 put_css_set_taskexit(cg); 4680 put_css_set(cg);
4630} 4681}
4631 4682
4632/** 4683/**
@@ -4680,6 +4731,14 @@ static void check_for_release(struct cgroup *cgrp)
4680} 4731}
4681 4732
4682/* Caller must verify that the css is not for root cgroup */ 4733/* Caller must verify that the css is not for root cgroup */
4734void __css_get(struct cgroup_subsys_state *css, int count)
4735{
4736 atomic_add(count, &css->refcnt);
4737 set_bit(CGRP_RELEASABLE, &css->cgroup->flags);
4738}
4739EXPORT_SYMBOL_GPL(__css_get);
4740
4741/* Caller must verify that the css is not for root cgroup */
4683void __css_put(struct cgroup_subsys_state *css, int count) 4742void __css_put(struct cgroup_subsys_state *css, int count)
4684{ 4743{
4685 struct cgroup *cgrp = css->cgroup; 4744 struct cgroup *cgrp = css->cgroup;
@@ -4687,10 +4746,7 @@ void __css_put(struct cgroup_subsys_state *css, int count)
4687 rcu_read_lock(); 4746 rcu_read_lock();
4688 val = atomic_sub_return(count, &css->refcnt); 4747 val = atomic_sub_return(count, &css->refcnt);
4689 if (val == 1) { 4748 if (val == 1) {
4690 if (notify_on_release(cgrp)) { 4749 check_for_release(cgrp);
4691 set_bit(CGRP_RELEASABLE, &cgrp->flags);
4692 check_for_release(cgrp);
4693 }
4694 cgroup_wakeup_rmdir_waiter(cgrp); 4750 cgroup_wakeup_rmdir_waiter(cgrp);
4695 } 4751 }
4696 rcu_read_unlock(); 4752 rcu_read_unlock();
diff --git a/kernel/cpu.c b/kernel/cpu.c
index aa39dd7a384..eae3d9b3957 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -668,3 +668,23 @@ void init_cpu_online(const struct cpumask *src)
668{ 668{
669 cpumask_copy(to_cpumask(cpu_online_bits), src); 669 cpumask_copy(to_cpumask(cpu_online_bits), src);
670} 670}
671
672static ATOMIC_NOTIFIER_HEAD(idle_notifier);
673
674void idle_notifier_register(struct notifier_block *n)
675{
676 atomic_notifier_chain_register(&idle_notifier, n);
677}
678EXPORT_SYMBOL_GPL(idle_notifier_register);
679
680void idle_notifier_unregister(struct notifier_block *n)
681{
682 atomic_notifier_chain_unregister(&idle_notifier, n);
683}
684EXPORT_SYMBOL_GPL(idle_notifier_unregister);
685
686void idle_notifier_call_chain(unsigned long val)
687{
688 atomic_notifier_call_chain(&idle_notifier, val, NULL);
689}
690EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
diff --git a/kernel/fork.c b/kernel/fork.c
index 3d42aa3dad3..158ca4f026c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -156,6 +156,9 @@ struct kmem_cache *vm_area_cachep;
156/* SLAB cache for mm_struct structures (tsk->mm) */ 156/* SLAB cache for mm_struct structures (tsk->mm) */
157static struct kmem_cache *mm_cachep; 157static struct kmem_cache *mm_cachep;
158 158
159/* Notifier list called when a task struct is freed */
160static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
161
159static void account_kernel_stack(struct thread_info *ti, int account) 162static void account_kernel_stack(struct thread_info *ti, int account)
160{ 163{
161 struct zone *zone = page_zone(virt_to_page(ti)); 164 struct zone *zone = page_zone(virt_to_page(ti));
@@ -187,6 +190,18 @@ static inline void put_signal_struct(struct signal_struct *sig)
187 free_signal_struct(sig); 190 free_signal_struct(sig);
188} 191}
189 192
193int task_free_register(struct notifier_block *n)
194{
195 return atomic_notifier_chain_register(&task_free_notifier, n);
196}
197EXPORT_SYMBOL(task_free_register);
198
199int task_free_unregister(struct notifier_block *n)
200{
201 return atomic_notifier_chain_unregister(&task_free_notifier, n);
202}
203EXPORT_SYMBOL(task_free_unregister);
204
190void __put_task_struct(struct task_struct *tsk) 205void __put_task_struct(struct task_struct *tsk)
191{ 206{
192 WARN_ON(!tsk->exit_state); 207 WARN_ON(!tsk->exit_state);
@@ -197,6 +212,7 @@ void __put_task_struct(struct task_struct *tsk)
197 delayacct_tsk_free(tsk); 212 delayacct_tsk_free(tsk);
198 put_signal_struct(tsk->signal); 213 put_signal_struct(tsk->signal);
199 214
215 atomic_notifier_call_chain(&task_free_notifier, 0, tsk);
200 if (!profile_handoff_task(tsk)) 216 if (!profile_handoff_task(tsk))
201 free_task(tsk); 217 free_task(tsk);
202} 218}
@@ -1020,7 +1036,7 @@ static void rt_mutex_init_task(struct task_struct *p)
1020{ 1036{
1021 raw_spin_lock_init(&p->pi_lock); 1037 raw_spin_lock_init(&p->pi_lock);
1022#ifdef CONFIG_RT_MUTEXES 1038#ifdef CONFIG_RT_MUTEXES
1023 plist_head_init_raw(&p->pi_waiters, &p->pi_lock); 1039 plist_head_init(&p->pi_waiters);
1024 p->pi_blocked_on = NULL; 1040 p->pi_blocked_on = NULL;
1025#endif 1041#endif
1026} 1042}
diff --git a/kernel/futex.c b/kernel/futex.c
index 91691e9daab..61e554e4125 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2758,7 +2758,7 @@ static int __init futex_init(void)
2758 futex_cmpxchg_enabled = 1; 2758 futex_cmpxchg_enabled = 1;
2759 2759
2760 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { 2760 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2761 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); 2761 plist_head_init(&futex_queues[i].chain);
2762 spin_lock_init(&futex_queues[i].lock); 2762 spin_lock_init(&futex_queues[i].lock);
2763 } 2763 }
2764 2764
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 15e53b1766a..fe4b09cf829 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -104,8 +104,13 @@ int check_wakeup_irqs(void)
104 104
105 for_each_irq_desc(irq, desc) { 105 for_each_irq_desc(irq, desc) {
106 if (irqd_is_wakeup_set(&desc->irq_data)) { 106 if (irqd_is_wakeup_set(&desc->irq_data)) {
107 if (desc->istate & IRQS_PENDING) 107 if (desc->istate & IRQS_PENDING) {
108 pr_info("Wakeup IRQ %d %s pending, suspend aborted\n",
109 irq,
110 desc->action && desc->action->name ?
111 desc->action->name : "");
108 return -EBUSY; 112 return -EBUSY;
113 }
109 continue; 114 continue;
110 } 115 }
111 /* 116 /*
diff --git a/kernel/panic.c b/kernel/panic.c
index 8e48cf6ab56..564c7bc6ecb 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,13 +27,19 @@
27#define PANIC_TIMER_STEP 100 27#define PANIC_TIMER_STEP 100
28#define PANIC_BLINK_SPD 18 28#define PANIC_BLINK_SPD 18
29 29
30/* Machine specific panic information string */
31char *mach_panic_string;
32
30int panic_on_oops; 33int panic_on_oops;
31static unsigned long tainted_mask; 34static unsigned long tainted_mask;
32static int pause_on_oops; 35static int pause_on_oops;
33static int pause_on_oops_flag; 36static int pause_on_oops_flag;
34static DEFINE_SPINLOCK(pause_on_oops_lock); 37static DEFINE_SPINLOCK(pause_on_oops_lock);
35 38
36int panic_timeout; 39#ifndef CONFIG_PANIC_TIMEOUT
40#define CONFIG_PANIC_TIMEOUT 0
41#endif
42int panic_timeout = CONFIG_PANIC_TIMEOUT;
37EXPORT_SYMBOL_GPL(panic_timeout); 43EXPORT_SYMBOL_GPL(panic_timeout);
38 44
39ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 45ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
@@ -350,6 +356,11 @@ late_initcall(init_oops_id);
350void print_oops_end_marker(void) 356void print_oops_end_marker(void)
351{ 357{
352 init_oops_id(); 358 init_oops_id();
359
360 if (mach_panic_string)
361 printk(KERN_WARNING "Board Information: %s\n",
362 mach_panic_string);
363
353 printk(KERN_WARNING "---[ end trace %016llx ]---\n", 364 printk(KERN_WARNING "---[ end trace %016llx ]---\n",
354 (unsigned long long)oops_id); 365 (unsigned long long)oops_id);
355} 366}
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 6824ca7d4d0..37f05d0f079 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(pm_qos_lock);
74static struct pm_qos_object null_pm_qos; 74static struct pm_qos_object null_pm_qos;
75static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); 75static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
76static struct pm_qos_object cpu_dma_pm_qos = { 76static struct pm_qos_object cpu_dma_pm_qos = {
77 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), 77 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests),
78 .notifiers = &cpu_dma_lat_notifier, 78 .notifiers = &cpu_dma_lat_notifier,
79 .name = "cpu_dma_latency", 79 .name = "cpu_dma_latency",
80 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, 80 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
@@ -84,7 +84,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
84 84
85static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); 85static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
86static struct pm_qos_object network_lat_pm_qos = { 86static struct pm_qos_object network_lat_pm_qos = {
87 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), 87 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests),
88 .notifiers = &network_lat_notifier, 88 .notifiers = &network_lat_notifier,
89 .name = "network_latency", 89 .name = "network_latency",
90 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, 90 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
@@ -95,7 +95,7 @@ static struct pm_qos_object network_lat_pm_qos = {
95 95
96static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); 96static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
97static struct pm_qos_object network_throughput_pm_qos = { 97static struct pm_qos_object network_throughput_pm_qos = {
98 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), 98 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests),
99 .notifiers = &network_throughput_notifier, 99 .notifiers = &network_throughput_notifier,
100 .name = "network_throughput", 100 .name = "network_throughput",
101 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, 101 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 87f4d24b55b..b90fb99fe45 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,6 +18,73 @@ config SUSPEND_FREEZER
18 18
19 Turning OFF this setting is NOT recommended! If in doubt, say Y. 19 Turning OFF this setting is NOT recommended! If in doubt, say Y.
20 20
21config HAS_WAKELOCK
22 bool
23
24config HAS_EARLYSUSPEND
25 bool
26
27config WAKELOCK
28 bool "Wake lock"
29 depends on PM && RTC_CLASS
30 default n
31 select HAS_WAKELOCK
32 ---help---
33 Enable wakelocks. When user space request a sleep state the
34 sleep request will be delayed until no wake locks are held.
35
36config WAKELOCK_STAT
37 bool "Wake lock stats"
38 depends on WAKELOCK
39 default y
40 ---help---
41 Report wake lock stats in /proc/wakelocks
42
43config USER_WAKELOCK
44 bool "Userspace wake locks"
45 depends on WAKELOCK
46 default y
47 ---help---
48 User-space wake lock api. Write "lockname" or "lockname timeout"
49 to /sys/power/wake_lock lock and if needed create a wake lock.
50 Write "lockname" to /sys/power/wake_unlock to unlock a user wake
51 lock.
52
53config EARLYSUSPEND
54 bool "Early suspend"
55 depends on WAKELOCK
56 default y
57 select HAS_EARLYSUSPEND
58 ---help---
59 Call early suspend handlers when the user requested sleep state
60 changes.
61
62choice
63 prompt "User-space screen access"
64 default FB_EARLYSUSPEND if !FRAMEBUFFER_CONSOLE
65 default CONSOLE_EARLYSUSPEND
66 depends on HAS_EARLYSUSPEND
67
68 config NO_USER_SPACE_SCREEN_ACCESS_CONTROL
69 bool "None"
70
71 config CONSOLE_EARLYSUSPEND
72 bool "Console switch on early-suspend"
73 depends on HAS_EARLYSUSPEND && VT
74 ---help---
75 Register early suspend handler to perform a console switch to
76 when user-space should stop drawing to the screen and a switch
77 back when it should resume.
78
79 config FB_EARLYSUSPEND
80 bool "Sysfs interface"
81 depends on HAS_EARLYSUSPEND
82 ---help---
83 Register early suspend handler that notifies and waits for
84 user-space through sysfs when user-space should stop drawing
85 to the screen and notifies user-space when it should resume.
86endchoice
87
21config HIBERNATE_CALLBACKS 88config HIBERNATE_CALLBACKS
22 bool 89 bool
23 90
@@ -227,3 +294,10 @@ config PM_OPP
227config PM_RUNTIME_CLK 294config PM_RUNTIME_CLK
228 def_bool y 295 def_bool y
229 depends on PM_RUNTIME && HAVE_CLK 296 depends on PM_RUNTIME && HAVE_CLK
297
298config SUSPEND_TIME
299 bool "Log time spent in suspend"
300 ---help---
301 Prints the time spent in suspend in the kernel log, and
302 keeps statistics on the time spent in suspend in
303 /sys/kernel/debug/suspend_time
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index c5ebc6a9064..9b224e16b19 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -8,5 +8,11 @@ obj-$(CONFIG_SUSPEND) += suspend.o
8obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o 8obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
9obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ 9obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
10 block_io.o 10 block_io.o
11obj-$(CONFIG_WAKELOCK) += wakelock.o
12obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o
13obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o
14obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o
15obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o
16obj-$(CONFIG_SUSPEND_TIME) += suspend_time.o
11 17
12obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 18obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/consoleearlysuspend.c b/kernel/power/consoleearlysuspend.c
new file mode 100644
index 00000000000..a3edcb26738
--- /dev/null
+++ b/kernel/power/consoleearlysuspend.c
@@ -0,0 +1,78 @@
1/* kernel/power/consoleearlysuspend.c
2 *
3 * Copyright (C) 2005-2008 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/console.h>
17#include <linux/earlysuspend.h>
18#include <linux/kbd_kern.h>
19#include <linux/module.h>
20#include <linux/vt_kern.h>
21#include <linux/wait.h>
22
23#define EARLY_SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
24
25static int orig_fgconsole;
26static void console_early_suspend(struct early_suspend *h)
27{
28 acquire_console_sem();
29 orig_fgconsole = fg_console;
30 if (vc_allocate(EARLY_SUSPEND_CONSOLE))
31 goto err;
32 if (set_console(EARLY_SUSPEND_CONSOLE))
33 goto err;
34 release_console_sem();
35
36 if (vt_waitactive(EARLY_SUSPEND_CONSOLE + 1))
37 pr_warning("console_early_suspend: Can't switch VCs.\n");
38 return;
39err:
40 pr_warning("console_early_suspend: Can't set console\n");
41 release_console_sem();
42}
43
44static void console_late_resume(struct early_suspend *h)
45{
46 int ret;
47 acquire_console_sem();
48 ret = set_console(orig_fgconsole);
49 release_console_sem();
50 if (ret) {
51 pr_warning("console_late_resume: Can't set console.\n");
52 return;
53 }
54
55 if (vt_waitactive(orig_fgconsole + 1))
56 pr_warning("console_late_resume: Can't switch VCs.\n");
57}
58
59static struct early_suspend console_early_suspend_desc = {
60 .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
61 .suspend = console_early_suspend,
62 .resume = console_late_resume,
63};
64
65static int __init console_early_suspend_init(void)
66{
67 register_early_suspend(&console_early_suspend_desc);
68 return 0;
69}
70
71static void __exit console_early_suspend_exit(void)
72{
73 unregister_early_suspend(&console_early_suspend_desc);
74}
75
76module_init(console_early_suspend_init);
77module_exit(console_early_suspend_exit);
78
diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c
new file mode 100644
index 00000000000..b15f02eba45
--- /dev/null
+++ b/kernel/power/earlysuspend.c
@@ -0,0 +1,187 @@
1/* kernel/power/earlysuspend.c
2 *
3 * Copyright (C) 2005-2008 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/earlysuspend.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/rtc.h>
20#include <linux/syscalls.h> /* sys_sync */
21#include <linux/wakelock.h>
22#include <linux/workqueue.h>
23
24#include "power.h"
25
26enum {
27 DEBUG_USER_STATE = 1U << 0,
28 DEBUG_SUSPEND = 1U << 2,
29 DEBUG_VERBOSE = 1U << 3,
30};
31static int debug_mask = DEBUG_USER_STATE;
32module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
33
34static DEFINE_MUTEX(early_suspend_lock);
35static LIST_HEAD(early_suspend_handlers);
36static void early_suspend(struct work_struct *work);
37static void late_resume(struct work_struct *work);
38static DECLARE_WORK(early_suspend_work, early_suspend);
39static DECLARE_WORK(late_resume_work, late_resume);
40static DEFINE_SPINLOCK(state_lock);
41enum {
42 SUSPEND_REQUESTED = 0x1,
43 SUSPENDED = 0x2,
44 SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED,
45};
46static int state;
47
48void register_early_suspend(struct early_suspend *handler)
49{
50 struct list_head *pos;
51
52 mutex_lock(&early_suspend_lock);
53 list_for_each(pos, &early_suspend_handlers) {
54 struct early_suspend *e;
55 e = list_entry(pos, struct early_suspend, link);
56 if (e->level > handler->level)
57 break;
58 }
59 list_add_tail(&handler->link, pos);
60 if ((state & SUSPENDED) && handler->suspend)
61 handler->suspend(handler);
62 mutex_unlock(&early_suspend_lock);
63}
64EXPORT_SYMBOL(register_early_suspend);
65
66void unregister_early_suspend(struct early_suspend *handler)
67{
68 mutex_lock(&early_suspend_lock);
69 list_del(&handler->link);
70 mutex_unlock(&early_suspend_lock);
71}
72EXPORT_SYMBOL(unregister_early_suspend);
73
74static void early_suspend(struct work_struct *work)
75{
76 struct early_suspend *pos;
77 unsigned long irqflags;
78 int abort = 0;
79
80 mutex_lock(&early_suspend_lock);
81 spin_lock_irqsave(&state_lock, irqflags);
82 if (state == SUSPEND_REQUESTED)
83 state |= SUSPENDED;
84 else
85 abort = 1;
86 spin_unlock_irqrestore(&state_lock, irqflags);
87
88 if (abort) {
89 if (debug_mask & DEBUG_SUSPEND)
90 pr_info("early_suspend: abort, state %d\n", state);
91 mutex_unlock(&early_suspend_lock);
92 goto abort;
93 }
94
95 if (debug_mask & DEBUG_SUSPEND)
96 pr_info("early_suspend: call handlers\n");
97 list_for_each_entry(pos, &early_suspend_handlers, link) {
98 if (pos->suspend != NULL) {
99 if (debug_mask & DEBUG_VERBOSE)
100 pr_info("early_suspend: calling %pf\n", pos->suspend);
101 pos->suspend(pos);
102 }
103 }
104 mutex_unlock(&early_suspend_lock);
105
106 if (debug_mask & DEBUG_SUSPEND)
107 pr_info("early_suspend: sync\n");
108
109 sys_sync();
110abort:
111 spin_lock_irqsave(&state_lock, irqflags);
112 if (state == SUSPEND_REQUESTED_AND_SUSPENDED)
113 wake_unlock(&main_wake_lock);
114 spin_unlock_irqrestore(&state_lock, irqflags);
115}
116
117static void late_resume(struct work_struct *work)
118{
119 struct early_suspend *pos;
120 unsigned long irqflags;
121 int abort = 0;
122
123 mutex_lock(&early_suspend_lock);
124 spin_lock_irqsave(&state_lock, irqflags);
125 if (state == SUSPENDED)
126 state &= ~SUSPENDED;
127 else
128 abort = 1;
129 spin_unlock_irqrestore(&state_lock, irqflags);
130
131 if (abort) {
132 if (debug_mask & DEBUG_SUSPEND)
133 pr_info("late_resume: abort, state %d\n", state);
134 goto abort;
135 }
136 if (debug_mask & DEBUG_SUSPEND)
137 pr_info("late_resume: call handlers\n");
138 list_for_each_entry_reverse(pos, &early_suspend_handlers, link) {
139 if (pos->resume != NULL) {
140 if (debug_mask & DEBUG_VERBOSE)
141 pr_info("late_resume: calling %pf\n", pos->resume);
142
143 pos->resume(pos);
144 }
145 }
146 if (debug_mask & DEBUG_SUSPEND)
147 pr_info("late_resume: done\n");
148abort:
149 mutex_unlock(&early_suspend_lock);
150}
151
152void request_suspend_state(suspend_state_t new_state)
153{
154 unsigned long irqflags;
155 int old_sleep;
156
157 spin_lock_irqsave(&state_lock, irqflags);
158 old_sleep = state & SUSPEND_REQUESTED;
159 if (debug_mask & DEBUG_USER_STATE) {
160 struct timespec ts;
161 struct rtc_time tm;
162 getnstimeofday(&ts);
163 rtc_time_to_tm(ts.tv_sec, &tm);
164 pr_info("request_suspend_state: %s (%d->%d) at %lld "
165 "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
166 new_state != PM_SUSPEND_ON ? "sleep" : "wakeup",
167 requested_suspend_state, new_state,
168 ktime_to_ns(ktime_get()),
169 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
170 tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
171 }
172 if (!old_sleep && new_state != PM_SUSPEND_ON) {
173 state |= SUSPEND_REQUESTED;
174 queue_work(suspend_work_queue, &early_suspend_work);
175 } else if (old_sleep && new_state == PM_SUSPEND_ON) {
176 state &= ~SUSPEND_REQUESTED;
177 wake_lock(&main_wake_lock);
178 queue_work(suspend_work_queue, &late_resume_work);
179 }
180 requested_suspend_state = new_state;
181 spin_unlock_irqrestore(&state_lock, irqflags);
182}
183
184suspend_state_t get_suspend_state(void)
185{
186 return requested_suspend_state;
187}
diff --git a/kernel/power/fbearlysuspend.c b/kernel/power/fbearlysuspend.c
new file mode 100644
index 00000000000..15137650149
--- /dev/null
+++ b/kernel/power/fbearlysuspend.c
@@ -0,0 +1,153 @@
1/* kernel/power/fbearlysuspend.c
2 *
3 * Copyright (C) 2005-2008 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/earlysuspend.h>
17#include <linux/module.h>
18#include <linux/wait.h>
19
20#include "power.h"
21
22static wait_queue_head_t fb_state_wq;
23static DEFINE_SPINLOCK(fb_state_lock);
24static enum {
25 FB_STATE_STOPPED_DRAWING,
26 FB_STATE_REQUEST_STOP_DRAWING,
27 FB_STATE_DRAWING_OK,
28} fb_state;
29
30/* tell userspace to stop drawing, wait for it to stop */
31static void stop_drawing_early_suspend(struct early_suspend *h)
32{
33 int ret;
34 unsigned long irq_flags;
35
36 spin_lock_irqsave(&fb_state_lock, irq_flags);
37 fb_state = FB_STATE_REQUEST_STOP_DRAWING;
38 spin_unlock_irqrestore(&fb_state_lock, irq_flags);
39
40 wake_up_all(&fb_state_wq);
41 ret = wait_event_timeout(fb_state_wq,
42 fb_state == FB_STATE_STOPPED_DRAWING,
43 HZ);
44 if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING))
45 pr_warning("stop_drawing_early_suspend: timeout waiting for "
46 "userspace to stop drawing\n");
47}
48
49/* tell userspace to start drawing */
50static void start_drawing_late_resume(struct early_suspend *h)
51{
52 unsigned long irq_flags;
53
54 spin_lock_irqsave(&fb_state_lock, irq_flags);
55 fb_state = FB_STATE_DRAWING_OK;
56 spin_unlock_irqrestore(&fb_state_lock, irq_flags);
57 wake_up(&fb_state_wq);
58}
59
60static struct early_suspend stop_drawing_early_suspend_desc = {
61 .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
62 .suspend = stop_drawing_early_suspend,
63 .resume = start_drawing_late_resume,
64};
65
66static ssize_t wait_for_fb_sleep_show(struct kobject *kobj,
67 struct kobj_attribute *attr, char *buf)
68{
69 char *s = buf;
70 int ret;
71
72 ret = wait_event_interruptible(fb_state_wq,
73 fb_state != FB_STATE_DRAWING_OK);
74 if (ret && fb_state == FB_STATE_DRAWING_OK)
75 return ret;
76 else
77 s += sprintf(buf, "sleeping");
78 return s - buf;
79}
80
81static ssize_t wait_for_fb_wake_show(struct kobject *kobj,
82 struct kobj_attribute *attr, char *buf)
83{
84 char *s = buf;
85 int ret;
86 unsigned long irq_flags;
87
88 spin_lock_irqsave(&fb_state_lock, irq_flags);
89 if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) {
90 fb_state = FB_STATE_STOPPED_DRAWING;
91 wake_up(&fb_state_wq);
92 }
93 spin_unlock_irqrestore(&fb_state_lock, irq_flags);
94
95 ret = wait_event_interruptible(fb_state_wq,
96 fb_state == FB_STATE_DRAWING_OK);
97 if (ret && fb_state != FB_STATE_DRAWING_OK)
98 return ret;
99 else
100 s += sprintf(buf, "awake");
101
102 return s - buf;
103}
104
105#define power_ro_attr(_name) \
106static struct kobj_attribute _name##_attr = { \
107 .attr = { \
108 .name = __stringify(_name), \
109 .mode = 0444, \
110 }, \
111 .show = _name##_show, \
112 .store = NULL, \
113}
114
115power_ro_attr(wait_for_fb_sleep);
116power_ro_attr(wait_for_fb_wake);
117
118static struct attribute *g[] = {
119 &wait_for_fb_sleep_attr.attr,
120 &wait_for_fb_wake_attr.attr,
121 NULL,
122};
123
124static struct attribute_group attr_group = {
125 .attrs = g,
126};
127
128static int __init android_power_init(void)
129{
130 int ret;
131
132 init_waitqueue_head(&fb_state_wq);
133 fb_state = FB_STATE_DRAWING_OK;
134
135 ret = sysfs_create_group(power_kobj, &attr_group);
136 if (ret) {
137 pr_err("android_power_init: sysfs_create_group failed\n");
138 return ret;
139 }
140
141 register_early_suspend(&stop_drawing_early_suspend_desc);
142 return 0;
143}
144
145static void __exit android_power_exit(void)
146{
147 unregister_early_suspend(&stop_drawing_early_suspend_desc);
148 sysfs_remove_group(power_kobj, &attr_group);
149}
150
151module_init(android_power_init);
152module_exit(android_power_exit);
153
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 2981af4ce7c..ff29679510d 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -170,7 +170,11 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
170 const char *buf, size_t n) 170 const char *buf, size_t n)
171{ 171{
172#ifdef CONFIG_SUSPEND 172#ifdef CONFIG_SUSPEND
173#ifdef CONFIG_EARLYSUSPEND
174 suspend_state_t state = PM_SUSPEND_ON;
175#else
173 suspend_state_t state = PM_SUSPEND_STANDBY; 176 suspend_state_t state = PM_SUSPEND_STANDBY;
177#endif
174 const char * const *s; 178 const char * const *s;
175#endif 179#endif
176 char *p; 180 char *p;
@@ -192,8 +196,15 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
192 break; 196 break;
193 } 197 }
194 if (state < PM_SUSPEND_MAX && *s) 198 if (state < PM_SUSPEND_MAX && *s)
199#ifdef CONFIG_EARLYSUSPEND
200 if (state == PM_SUSPEND_ON || valid_state(state)) {
201 error = 0;
202 request_suspend_state(state);
203 }
204#else
195 error = enter_state(state); 205 error = enter_state(state);
196#endif 206#endif
207#endif
197 208
198 Exit: 209 Exit:
199 return error ? error : n; 210 return error ? error : n;
@@ -297,6 +308,11 @@ power_attr(pm_trace_dev_match);
297 308
298#endif /* CONFIG_PM_TRACE */ 309#endif /* CONFIG_PM_TRACE */
299 310
311#ifdef CONFIG_USER_WAKELOCK
312power_attr(wake_lock);
313power_attr(wake_unlock);
314#endif
315
300static struct attribute * g[] = { 316static struct attribute * g[] = {
301 &state_attr.attr, 317 &state_attr.attr,
302#ifdef CONFIG_PM_TRACE 318#ifdef CONFIG_PM_TRACE
@@ -309,6 +325,10 @@ static struct attribute * g[] = {
309#ifdef CONFIG_PM_DEBUG 325#ifdef CONFIG_PM_DEBUG
310 &pm_test_attr.attr, 326 &pm_test_attr.attr,
311#endif 327#endif
328#ifdef CONFIG_USER_WAKELOCK
329 &wake_lock_attr.attr,
330 &wake_unlock_attr.attr,
331#endif
312#endif 332#endif
313 NULL, 333 NULL,
314}; 334};
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 9a00a0a2628..b6b9006480f 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -245,3 +245,27 @@ static inline void suspend_thaw_processes(void)
245{ 245{
246} 246}
247#endif 247#endif
248
249#ifdef CONFIG_WAKELOCK
250/* kernel/power/wakelock.c */
251extern struct workqueue_struct *suspend_work_queue;
252extern struct wake_lock main_wake_lock;
253extern suspend_state_t requested_suspend_state;
254#endif
255
256#ifdef CONFIG_USER_WAKELOCK
257ssize_t wake_lock_show(struct kobject *kobj, struct kobj_attribute *attr,
258 char *buf);
259ssize_t wake_lock_store(struct kobject *kobj, struct kobj_attribute *attr,
260 const char *buf, size_t n);
261ssize_t wake_unlock_show(struct kobject *kobj, struct kobj_attribute *attr,
262 char *buf);
263ssize_t wake_unlock_store(struct kobject *kobj, struct kobj_attribute *attr,
264 const char *buf, size_t n);
265#endif
266
267#ifdef CONFIG_EARLYSUSPEND
268/* kernel/power/earlysuspend.c */
269void request_suspend_state(suspend_state_t state);
270suspend_state_t get_suspend_state(void);
271#endif
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0cf3a27a6c9..31338cdeafc 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -16,6 +16,7 @@
16#include <linux/freezer.h> 16#include <linux/freezer.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/workqueue.h> 18#include <linux/workqueue.h>
19#include <linux/wakelock.h>
19 20
20/* 21/*
21 * Timeout for stopping processes 22 * Timeout for stopping processes
@@ -82,6 +83,10 @@ static int try_to_freeze_tasks(bool sig_only)
82 todo += wq_busy; 83 todo += wq_busy;
83 } 84 }
84 85
86 if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
87 wakeup = 1;
88 break;
89 }
85 if (!todo || time_after(jiffies, end_time)) 90 if (!todo || time_after(jiffies, end_time))
86 break; 91 break;
87 92
@@ -108,19 +113,25 @@ static int try_to_freeze_tasks(bool sig_only)
108 * and caller must call thaw_processes() if something fails), 113 * and caller must call thaw_processes() if something fails),
109 * but it cleans up leftover PF_FREEZE requests. 114 * but it cleans up leftover PF_FREEZE requests.
110 */ 115 */
111 printk("\n"); 116 if(wakeup) {
112 printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " 117 printk("\n");
113 "(%d tasks refusing to freeze, wq_busy=%d):\n", 118 printk(KERN_ERR "Freezing of %s aborted\n",
114 wakeup ? "aborted" : "failed", 119 sig_only ? "user space " : "tasks ");
115 elapsed_csecs / 100, elapsed_csecs % 100, 120 }
116 todo - wq_busy, wq_busy); 121 else {
117 122 printk("\n");
123 printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
124 "(%d tasks refusing to freeze, wq_busy=%d):\n",
125 elapsed_csecs / 100, elapsed_csecs % 100,
126 todo - wq_busy, wq_busy);
127 }
118 thaw_workqueues(); 128 thaw_workqueues();
119 129
120 read_lock(&tasklist_lock); 130 read_lock(&tasklist_lock);
121 do_each_thread(g, p) { 131 do_each_thread(g, p) {
122 task_lock(p); 132 task_lock(p);
123 if (!wakeup && freezing(p) && !freezer_should_skip(p)) 133 if (freezing(p) && !freezer_should_skip(p) &&
134 elapsed_csecs > 100)
124 sched_show_task(p); 135 sched_show_task(p);
125 cancel_freezing(p); 136 cancel_freezing(p);
126 task_unlock(p); 137 task_unlock(p);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index e40d20595b1..f5adb6e8d0f 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -29,6 +29,9 @@
29#include "power.h" 29#include "power.h"
30 30
31const char *const pm_states[PM_SUSPEND_MAX] = { 31const char *const pm_states[PM_SUSPEND_MAX] = {
32#ifdef CONFIG_EARLYSUSPEND
33 [PM_SUSPEND_ON] = "on",
34#endif
32 [PM_SUSPEND_STANDBY] = "standby", 35 [PM_SUSPEND_STANDBY] = "standby",
33 [PM_SUSPEND_MEM] = "mem", 36 [PM_SUSPEND_MEM] = "mem",
34}; 37};
diff --git a/kernel/power/suspend_time.c b/kernel/power/suspend_time.c
new file mode 100644
index 00000000000..d2a65da9f22
--- /dev/null
+++ b/kernel/power/suspend_time.c
@@ -0,0 +1,111 @@
1/*
2 * debugfs file to track time spent in suspend
3 *
4 * Copyright (c) 2011, Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/debugfs.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/seq_file.h>
22#include <linux/syscore_ops.h>
23#include <linux/time.h>
24
25static struct timespec suspend_time_before;
26static unsigned int time_in_suspend_bins[32];
27
28#ifdef CONFIG_DEBUG_FS
29static int suspend_time_debug_show(struct seq_file *s, void *data)
30{
31 int bin;
32 seq_printf(s, "time (secs) count\n");
33 seq_printf(s, "------------------\n");
34 for (bin = 0; bin < 32; bin++) {
35 if (time_in_suspend_bins[bin] == 0)
36 continue;
37 seq_printf(s, "%4d - %4d %4u\n",
38 bin ? 1 << (bin - 1) : 0, 1 << bin,
39 time_in_suspend_bins[bin]);
40 }
41 return 0;
42}
43
44static int suspend_time_debug_open(struct inode *inode, struct file *file)
45{
46 return single_open(file, suspend_time_debug_show, NULL);
47}
48
49static const struct file_operations suspend_time_debug_fops = {
50 .open = suspend_time_debug_open,
51 .read = seq_read,
52 .llseek = seq_lseek,
53 .release = single_release,
54};
55
56static int __init suspend_time_debug_init(void)
57{
58 struct dentry *d;
59
60 d = debugfs_create_file("suspend_time", 0755, NULL, NULL,
61 &suspend_time_debug_fops);
62 if (!d) {
63 pr_err("Failed to create suspend_time debug file\n");
64 return -ENOMEM;
65 }
66
67 return 0;
68}
69
70late_initcall(suspend_time_debug_init);
71#endif
72
73static int suspend_time_syscore_suspend(void)
74{
75 read_persistent_clock(&suspend_time_before);
76
77 return 0;
78}
79
80static void suspend_time_syscore_resume(void)
81{
82 struct timespec after;
83
84 read_persistent_clock(&after);
85
86 after = timespec_sub(after, suspend_time_before);
87
88 time_in_suspend_bins[fls(after.tv_sec)]++;
89
90 pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec,
91 after.tv_nsec / NSEC_PER_MSEC);
92}
93
94static struct syscore_ops suspend_time_syscore_ops = {
95 .suspend = suspend_time_syscore_suspend,
96 .resume = suspend_time_syscore_resume,
97};
98
99static int suspend_time_syscore_init(void)
100{
101 register_syscore_ops(&suspend_time_syscore_ops);
102
103 return 0;
104}
105
106static void suspend_time_syscore_exit(void)
107{
108 unregister_syscore_ops(&suspend_time_syscore_ops);
109}
110module_init(suspend_time_syscore_init);
111module_exit(suspend_time_syscore_exit);
diff --git a/kernel/power/userwakelock.c b/kernel/power/userwakelock.c
new file mode 100644
index 00000000000..a28a8db4146
--- /dev/null
+++ b/kernel/power/userwakelock.c
@@ -0,0 +1,219 @@
1/* kernel/power/userwakelock.c
2 *
3 * Copyright (C) 2005-2008 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/ctype.h>
17#include <linux/module.h>
18#include <linux/wakelock.h>
19#include <linux/slab.h>
20
21#include "power.h"
22
23enum {
24 DEBUG_FAILURE = BIT(0),
25 DEBUG_ERROR = BIT(1),
26 DEBUG_NEW = BIT(2),
27 DEBUG_ACCESS = BIT(3),
28 DEBUG_LOOKUP = BIT(4),
29};
30static int debug_mask = DEBUG_FAILURE;
31module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
32
33static DEFINE_MUTEX(tree_lock);
34
35struct user_wake_lock {
36 struct rb_node node;
37 struct wake_lock wake_lock;
38 char name[0];
39};
40struct rb_root user_wake_locks;
41
42static struct user_wake_lock *lookup_wake_lock_name(
43 const char *buf, int allocate, long *timeoutptr)
44{
45 struct rb_node **p = &user_wake_locks.rb_node;
46 struct rb_node *parent = NULL;
47 struct user_wake_lock *l;
48 int diff;
49 u64 timeout;
50 int name_len;
51 const char *arg;
52
53 /* Find length of lock name and start of optional timeout string */
54 arg = buf;
55 while (*arg && !isspace(*arg))
56 arg++;
57 name_len = arg - buf;
58 if (!name_len)
59 goto bad_arg;
60 while (isspace(*arg))
61 arg++;
62
63 /* Process timeout string */
64 if (timeoutptr && *arg) {
65 timeout = simple_strtoull(arg, (char **)&arg, 0);
66 while (isspace(*arg))
67 arg++;
68 if (*arg)
69 goto bad_arg;
70 /* convert timeout from nanoseconds to jiffies > 0 */
71 timeout += (NSEC_PER_SEC / HZ) - 1;
72 do_div(timeout, (NSEC_PER_SEC / HZ));
73 if (timeout <= 0)
74 timeout = 1;
75 *timeoutptr = timeout;
76 } else if (*arg)
77 goto bad_arg;
78 else if (timeoutptr)
79 *timeoutptr = 0;
80
81 /* Lookup wake lock in rbtree */
82 while (*p) {
83 parent = *p;
84 l = rb_entry(parent, struct user_wake_lock, node);
85 diff = strncmp(buf, l->name, name_len);
86 if (!diff && l->name[name_len])
87 diff = -1;
88 if (debug_mask & DEBUG_ERROR)
89 pr_info("lookup_wake_lock_name: compare %.*s %s %d\n",
90 name_len, buf, l->name, diff);
91
92 if (diff < 0)
93 p = &(*p)->rb_left;
94 else if (diff > 0)
95 p = &(*p)->rb_right;
96 else
97 return l;
98 }
99
100 /* Allocate and add new wakelock to rbtree */
101 if (!allocate) {
102 if (debug_mask & DEBUG_ERROR)
103 pr_info("lookup_wake_lock_name: %.*s not found\n",
104 name_len, buf);
105 return ERR_PTR(-EINVAL);
106 }
107 l = kzalloc(sizeof(*l) + name_len + 1, GFP_KERNEL);
108 if (l == NULL) {
109 if (debug_mask & DEBUG_FAILURE)
110 pr_err("lookup_wake_lock_name: failed to allocate "
111 "memory for %.*s\n", name_len, buf);
112 return ERR_PTR(-ENOMEM);
113 }
114 memcpy(l->name, buf, name_len);
115 if (debug_mask & DEBUG_NEW)
116 pr_info("lookup_wake_lock_name: new wake lock %s\n", l->name);
117 wake_lock_init(&l->wake_lock, WAKE_LOCK_SUSPEND, l->name);
118 rb_link_node(&l->node, parent, p);
119 rb_insert_color(&l->node, &user_wake_locks);
120 return l;
121
122bad_arg:
123 if (debug_mask & DEBUG_ERROR)
124 pr_info("lookup_wake_lock_name: wake lock, %.*s, bad arg, %s\n",
125 name_len, buf, arg);
126 return ERR_PTR(-EINVAL);
127}
128
129ssize_t wake_lock_show(
130 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
131{
132 char *s = buf;
133 char *end = buf + PAGE_SIZE;
134 struct rb_node *n;
135 struct user_wake_lock *l;
136
137 mutex_lock(&tree_lock);
138
139 for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) {
140 l = rb_entry(n, struct user_wake_lock, node);
141 if (wake_lock_active(&l->wake_lock))
142 s += scnprintf(s, end - s, "%s ", l->name);
143 }
144 s += scnprintf(s, end - s, "\n");
145
146 mutex_unlock(&tree_lock);
147 return (s - buf);
148}
149
150ssize_t wake_lock_store(
151 struct kobject *kobj, struct kobj_attribute *attr,
152 const char *buf, size_t n)
153{
154 long timeout;
155 struct user_wake_lock *l;
156
157 mutex_lock(&tree_lock);
158 l = lookup_wake_lock_name(buf, 1, &timeout);
159 if (IS_ERR(l)) {
160 n = PTR_ERR(l);
161 goto bad_name;
162 }
163
164 if (debug_mask & DEBUG_ACCESS)
165 pr_info("wake_lock_store: %s, timeout %ld\n", l->name, timeout);
166
167 if (timeout)
168 wake_lock_timeout(&l->wake_lock, timeout);
169 else
170 wake_lock(&l->wake_lock);
171bad_name:
172 mutex_unlock(&tree_lock);
173 return n;
174}
175
176
177ssize_t wake_unlock_show(
178 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
179{
180 char *s = buf;
181 char *end = buf + PAGE_SIZE;
182 struct rb_node *n;
183 struct user_wake_lock *l;
184
185 mutex_lock(&tree_lock);
186
187 for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) {
188 l = rb_entry(n, struct user_wake_lock, node);
189 if (!wake_lock_active(&l->wake_lock))
190 s += scnprintf(s, end - s, "%s ", l->name);
191 }
192 s += scnprintf(s, end - s, "\n");
193
194 mutex_unlock(&tree_lock);
195 return (s - buf);
196}
197
198ssize_t wake_unlock_store(
199 struct kobject *kobj, struct kobj_attribute *attr,
200 const char *buf, size_t n)
201{
202 struct user_wake_lock *l;
203
204 mutex_lock(&tree_lock);
205 l = lookup_wake_lock_name(buf, 0, NULL);
206 if (IS_ERR(l)) {
207 n = PTR_ERR(l);
208 goto not_found;
209 }
210
211 if (debug_mask & DEBUG_ACCESS)
212 pr_info("wake_unlock_store: %s\n", l->name);
213
214 wake_unlock(&l->wake_lock);
215not_found:
216 mutex_unlock(&tree_lock);
217 return n;
218}
219
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
new file mode 100644
index 00000000000..81e1b7c65ca
--- /dev/null
+++ b/kernel/power/wakelock.c
@@ -0,0 +1,634 @@
1/* kernel/power/wakelock.c
2 *
3 * Copyright (C) 2005-2008 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/rtc.h>
19#include <linux/suspend.h>
20#include <linux/syscalls.h> /* sys_sync */
21#include <linux/wakelock.h>
22#ifdef CONFIG_WAKELOCK_STAT
23#include <linux/proc_fs.h>
24#endif
25#include "power.h"
26
27enum {
28 DEBUG_EXIT_SUSPEND = 1U << 0,
29 DEBUG_WAKEUP = 1U << 1,
30 DEBUG_SUSPEND = 1U << 2,
31 DEBUG_EXPIRE = 1U << 3,
32 DEBUG_WAKE_LOCK = 1U << 4,
33};
34static int debug_mask = DEBUG_EXIT_SUSPEND | DEBUG_WAKEUP;
35module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
36
37#define WAKE_LOCK_TYPE_MASK (0x0f)
38#define WAKE_LOCK_INITIALIZED (1U << 8)
39#define WAKE_LOCK_ACTIVE (1U << 9)
40#define WAKE_LOCK_AUTO_EXPIRE (1U << 10)
41#define WAKE_LOCK_PREVENTING_SUSPEND (1U << 11)
42
43static DEFINE_SPINLOCK(list_lock);
44static LIST_HEAD(inactive_locks);
45static struct list_head active_wake_locks[WAKE_LOCK_TYPE_COUNT];
46static int current_event_num;
47struct workqueue_struct *suspend_work_queue;
48struct wake_lock main_wake_lock;
49suspend_state_t requested_suspend_state = PM_SUSPEND_MEM;
50static struct wake_lock unknown_wakeup;
51static struct wake_lock suspend_backoff_lock;
52
53#define SUSPEND_BACKOFF_THRESHOLD 10
54#define SUSPEND_BACKOFF_INTERVAL 10000
55
56static unsigned suspend_short_count;
57
58#ifdef CONFIG_WAKELOCK_STAT
59static struct wake_lock deleted_wake_locks;
60static ktime_t last_sleep_time_update;
61static int wait_for_wakeup;
62
63int get_expired_time(struct wake_lock *lock, ktime_t *expire_time)
64{
65 struct timespec ts;
66 struct timespec kt;
67 struct timespec tomono;
68 struct timespec delta;
69 struct timespec sleep;
70 long timeout;
71
72 if (!(lock->flags & WAKE_LOCK_AUTO_EXPIRE))
73 return 0;
74 get_xtime_and_monotonic_and_sleep_offset(&kt, &tomono, &sleep);
75 timeout = lock->expires - jiffies;
76 if (timeout > 0)
77 return 0;
78 jiffies_to_timespec(-timeout, &delta);
79 set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec,
80 kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec);
81 *expire_time = timespec_to_ktime(ts);
82 return 1;
83}
84
85
86static int print_lock_stat(struct seq_file *m, struct wake_lock *lock)
87{
88 int lock_count = lock->stat.count;
89 int expire_count = lock->stat.expire_count;
90 ktime_t active_time = ktime_set(0, 0);
91 ktime_t total_time = lock->stat.total_time;
92 ktime_t max_time = lock->stat.max_time;
93
94 ktime_t prevent_suspend_time = lock->stat.prevent_suspend_time;
95 if (lock->flags & WAKE_LOCK_ACTIVE) {
96 ktime_t now, add_time;
97 int expired = get_expired_time(lock, &now);
98 if (!expired)
99 now = ktime_get();
100 add_time = ktime_sub(now, lock->stat.last_time);
101 lock_count++;
102 if (!expired)
103 active_time = add_time;
104 else
105 expire_count++;
106 total_time = ktime_add(total_time, add_time);
107 if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND)
108 prevent_suspend_time = ktime_add(prevent_suspend_time,
109 ktime_sub(now, last_sleep_time_update));
110 if (add_time.tv64 > max_time.tv64)
111 max_time = add_time;
112 }
113
114 return seq_printf(m,
115 "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t%lld\n",
116 lock->name, lock_count, expire_count,
117 lock->stat.wakeup_count, ktime_to_ns(active_time),
118 ktime_to_ns(total_time),
119 ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time),
120 ktime_to_ns(lock->stat.last_time));
121}
122
123static int wakelock_stats_show(struct seq_file *m, void *unused)
124{
125 unsigned long irqflags;
126 struct wake_lock *lock;
127 int ret;
128 int type;
129
130 spin_lock_irqsave(&list_lock, irqflags);
131
132 ret = seq_puts(m, "name\tcount\texpire_count\twake_count\tactive_since"
133 "\ttotal_time\tsleep_time\tmax_time\tlast_change\n");
134 list_for_each_entry(lock, &inactive_locks, link)
135 ret = print_lock_stat(m, lock);
136 for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) {
137 list_for_each_entry(lock, &active_wake_locks[type], link)
138 ret = print_lock_stat(m, lock);
139 }
140 spin_unlock_irqrestore(&list_lock, irqflags);
141 return 0;
142}
143
144static void wake_unlock_stat_locked(struct wake_lock *lock, int expired)
145{
146 ktime_t duration;
147 ktime_t now;
148 if (!(lock->flags & WAKE_LOCK_ACTIVE))
149 return;
150 if (get_expired_time(lock, &now))
151 expired = 1;
152 else
153 now = ktime_get();
154 lock->stat.count++;
155 if (expired)
156 lock->stat.expire_count++;
157 duration = ktime_sub(now, lock->stat.last_time);
158 lock->stat.total_time = ktime_add(lock->stat.total_time, duration);
159 if (ktime_to_ns(duration) > ktime_to_ns(lock->stat.max_time))
160 lock->stat.max_time = duration;
161 lock->stat.last_time = ktime_get();
162 if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) {
163 duration = ktime_sub(now, last_sleep_time_update);
164 lock->stat.prevent_suspend_time = ktime_add(
165 lock->stat.prevent_suspend_time, duration);
166 lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND;
167 }
168}
169
170static void update_sleep_wait_stats_locked(int done)
171{
172 struct wake_lock *lock;
173 ktime_t now, etime, elapsed, add;
174 int expired;
175
176 now = ktime_get();
177 elapsed = ktime_sub(now, last_sleep_time_update);
178 list_for_each_entry(lock, &active_wake_locks[WAKE_LOCK_SUSPEND], link) {
179 expired = get_expired_time(lock, &etime);
180 if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) {
181 if (expired)
182 add = ktime_sub(etime, last_sleep_time_update);
183 else
184 add = elapsed;
185 lock->stat.prevent_suspend_time = ktime_add(
186 lock->stat.prevent_suspend_time, add);
187 }
188 if (done || expired)
189 lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND;
190 else
191 lock->flags |= WAKE_LOCK_PREVENTING_SUSPEND;
192 }
193 last_sleep_time_update = now;
194}
195#endif
196
197
198static void expire_wake_lock(struct wake_lock *lock)
199{
200#ifdef CONFIG_WAKELOCK_STAT
201 wake_unlock_stat_locked(lock, 1);
202#endif
203 lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE);
204 list_del(&lock->link);
205 list_add(&lock->link, &inactive_locks);
206 if (debug_mask & (DEBUG_WAKE_LOCK | DEBUG_EXPIRE))
207 pr_info("expired wake lock %s\n", lock->name);
208}
209
210/* Caller must acquire the list_lock spinlock */
211static void print_active_locks(int type)
212{
213 struct wake_lock *lock;
214 bool print_expired = true;
215
216 BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
217 list_for_each_entry(lock, &active_wake_locks[type], link) {
218 if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) {
219 long timeout = lock->expires - jiffies;
220 if (timeout > 0)
221 pr_info("active wake lock %s, time left %ld\n",
222 lock->name, timeout);
223 else if (print_expired)
224 pr_info("wake lock %s, expired\n", lock->name);
225 } else {
226 pr_info("active wake lock %s\n", lock->name);
227 if (!(debug_mask & DEBUG_EXPIRE))
228 print_expired = false;
229 }
230 }
231}
232
233static long has_wake_lock_locked(int type)
234{
235 struct wake_lock *lock, *n;
236 long max_timeout = 0;
237
238 BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
239 list_for_each_entry_safe(lock, n, &active_wake_locks[type], link) {
240 if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) {
241 long timeout = lock->expires - jiffies;
242 if (timeout <= 0)
243 expire_wake_lock(lock);
244 else if (timeout > max_timeout)
245 max_timeout = timeout;
246 } else
247 return -1;
248 }
249 return max_timeout;
250}
251
252long has_wake_lock(int type)
253{
254 long ret;
255 unsigned long irqflags;
256 spin_lock_irqsave(&list_lock, irqflags);
257 ret = has_wake_lock_locked(type);
258 if (ret && (debug_mask & DEBUG_WAKEUP) && type == WAKE_LOCK_SUSPEND)
259 print_active_locks(type);
260 spin_unlock_irqrestore(&list_lock, irqflags);
261 return ret;
262}
263
264static void suspend_backoff(void)
265{
266 pr_info("suspend: too many immediate wakeups, back off\n");
267 wake_lock_timeout(&suspend_backoff_lock,
268 msecs_to_jiffies(SUSPEND_BACKOFF_INTERVAL));
269}
270
271static void suspend(struct work_struct *work)
272{
273 int ret;
274 int entry_event_num;
275 struct timespec ts_entry, ts_exit;
276
277 if (has_wake_lock(WAKE_LOCK_SUSPEND)) {
278 if (debug_mask & DEBUG_SUSPEND)
279 pr_info("suspend: abort suspend\n");
280 return;
281 }
282
283 entry_event_num = current_event_num;
284 sys_sync();
285 if (debug_mask & DEBUG_SUSPEND)
286 pr_info("suspend: enter suspend\n");
287 getnstimeofday(&ts_entry);
288 ret = pm_suspend(requested_suspend_state);
289 getnstimeofday(&ts_exit);
290
291 if (debug_mask & DEBUG_EXIT_SUSPEND) {
292 struct rtc_time tm;
293 rtc_time_to_tm(ts_exit.tv_sec, &tm);
294 pr_info("suspend: exit suspend, ret = %d "
295 "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret,
296 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
297 tm.tm_hour, tm.tm_min, tm.tm_sec, ts_exit.tv_nsec);
298 }
299
300 if (ts_exit.tv_sec - ts_entry.tv_sec <= 1) {
301 ++suspend_short_count;
302
303 if (suspend_short_count == SUSPEND_BACKOFF_THRESHOLD) {
304 suspend_backoff();
305 suspend_short_count = 0;
306 }
307 } else {
308 suspend_short_count = 0;
309 }
310
311 if (current_event_num == entry_event_num) {
312 if (debug_mask & DEBUG_SUSPEND)
313 pr_info("suspend: pm_suspend returned with no event\n");
314 wake_lock_timeout(&unknown_wakeup, HZ / 2);
315 }
316}
317static DECLARE_WORK(suspend_work, suspend);
318
319static void expire_wake_locks(unsigned long data)
320{
321 long has_lock;
322 unsigned long irqflags;
323 if (debug_mask & DEBUG_EXPIRE)
324 pr_info("expire_wake_locks: start\n");
325 spin_lock_irqsave(&list_lock, irqflags);
326 if (debug_mask & DEBUG_SUSPEND)
327 print_active_locks(WAKE_LOCK_SUSPEND);
328 has_lock = has_wake_lock_locked(WAKE_LOCK_SUSPEND);
329 if (debug_mask & DEBUG_EXPIRE)
330 pr_info("expire_wake_locks: done, has_lock %ld\n", has_lock);
331 if (has_lock == 0)
332 queue_work(suspend_work_queue, &suspend_work);
333 spin_unlock_irqrestore(&list_lock, irqflags);
334}
335static DEFINE_TIMER(expire_timer, expire_wake_locks, 0, 0);
336
337static int power_suspend_late(struct device *dev)
338{
339 int ret = has_wake_lock(WAKE_LOCK_SUSPEND) ? -EAGAIN : 0;
340#ifdef CONFIG_WAKELOCK_STAT
341 wait_for_wakeup = !ret;
342#endif
343 if (debug_mask & DEBUG_SUSPEND)
344 pr_info("power_suspend_late return %d\n", ret);
345 return ret;
346}
347
348static struct dev_pm_ops power_driver_pm_ops = {
349 .suspend_noirq = power_suspend_late,
350};
351
352static struct platform_driver power_driver = {
353 .driver.name = "power",
354 .driver.pm = &power_driver_pm_ops,
355};
356static struct platform_device power_device = {
357 .name = "power",
358};
359
360void wake_lock_init(struct wake_lock *lock, int type, const char *name)
361{
362 unsigned long irqflags = 0;
363
364 if (name)
365 lock->name = name;
366 BUG_ON(!lock->name);
367
368 if (debug_mask & DEBUG_WAKE_LOCK)
369 pr_info("wake_lock_init name=%s\n", lock->name);
370#ifdef CONFIG_WAKELOCK_STAT
371 lock->stat.count = 0;
372 lock->stat.expire_count = 0;
373 lock->stat.wakeup_count = 0;
374 lock->stat.total_time = ktime_set(0, 0);
375 lock->stat.prevent_suspend_time = ktime_set(0, 0);
376 lock->stat.max_time = ktime_set(0, 0);
377 lock->stat.last_time = ktime_set(0, 0);
378#endif
379 lock->flags = (type & WAKE_LOCK_TYPE_MASK) | WAKE_LOCK_INITIALIZED;
380
381 INIT_LIST_HEAD(&lock->link);
382 spin_lock_irqsave(&list_lock, irqflags);
383 list_add(&lock->link, &inactive_locks);
384 spin_unlock_irqrestore(&list_lock, irqflags);
385}
386EXPORT_SYMBOL(wake_lock_init);
387
388void wake_lock_destroy(struct wake_lock *lock)
389{
390 unsigned long irqflags;
391 if (debug_mask & DEBUG_WAKE_LOCK)
392 pr_info("wake_lock_destroy name=%s\n", lock->name);
393 spin_lock_irqsave(&list_lock, irqflags);
394 lock->flags &= ~WAKE_LOCK_INITIALIZED;
395#ifdef CONFIG_WAKELOCK_STAT
396 if (lock->stat.count) {
397 deleted_wake_locks.stat.count += lock->stat.count;
398 deleted_wake_locks.stat.expire_count += lock->stat.expire_count;
399 deleted_wake_locks.stat.total_time =
400 ktime_add(deleted_wake_locks.stat.total_time,
401 lock->stat.total_time);
402 deleted_wake_locks.stat.prevent_suspend_time =
403 ktime_add(deleted_wake_locks.stat.prevent_suspend_time,
404 lock->stat.prevent_suspend_time);
405 deleted_wake_locks.stat.max_time =
406 ktime_add(deleted_wake_locks.stat.max_time,
407 lock->stat.max_time);
408 }
409#endif
410 list_del(&lock->link);
411 spin_unlock_irqrestore(&list_lock, irqflags);
412}
413EXPORT_SYMBOL(wake_lock_destroy);
414
415static void wake_lock_internal(
416 struct wake_lock *lock, long timeout, int has_timeout)
417{
418 int type;
419 unsigned long irqflags;
420 long expire_in;
421
422 spin_lock_irqsave(&list_lock, irqflags);
423 type = lock->flags & WAKE_LOCK_TYPE_MASK;
424 BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
425 BUG_ON(!(lock->flags & WAKE_LOCK_INITIALIZED));
426#ifdef CONFIG_WAKELOCK_STAT
427 if (type == WAKE_LOCK_SUSPEND && wait_for_wakeup) {
428 if (debug_mask & DEBUG_WAKEUP)
429 pr_info("wakeup wake lock: %s\n", lock->name);
430 wait_for_wakeup = 0;
431 lock->stat.wakeup_count++;
432 }
433 if ((lock->flags & WAKE_LOCK_AUTO_EXPIRE) &&
434 (long)(lock->expires - jiffies) <= 0) {
435 wake_unlock_stat_locked(lock, 0);
436 lock->stat.last_time = ktime_get();
437 }
438#endif
439 if (!(lock->flags & WAKE_LOCK_ACTIVE)) {
440 lock->flags |= WAKE_LOCK_ACTIVE;
441#ifdef CONFIG_WAKELOCK_STAT
442 lock->stat.last_time = ktime_get();
443#endif
444 }
445 list_del(&lock->link);
446 if (has_timeout) {
447 if (debug_mask & DEBUG_WAKE_LOCK)
448 pr_info("wake_lock: %s, type %d, timeout %ld.%03lu\n",
449 lock->name, type, timeout / HZ,
450 (timeout % HZ) * MSEC_PER_SEC / HZ);
451 lock->expires = jiffies + timeout;
452 lock->flags |= WAKE_LOCK_AUTO_EXPIRE;
453 list_add_tail(&lock->link, &active_wake_locks[type]);
454 } else {
455 if (debug_mask & DEBUG_WAKE_LOCK)
456 pr_info("wake_lock: %s, type %d\n", lock->name, type);
457 lock->expires = LONG_MAX;
458 lock->flags &= ~WAKE_LOCK_AUTO_EXPIRE;
459 list_add(&lock->link, &active_wake_locks[type]);
460 }
461 if (type == WAKE_LOCK_SUSPEND) {
462 current_event_num++;
463#ifdef CONFIG_WAKELOCK_STAT
464 if (lock == &main_wake_lock)
465 update_sleep_wait_stats_locked(1);
466 else if (!wake_lock_active(&main_wake_lock))
467 update_sleep_wait_stats_locked(0);
468#endif
469 if (has_timeout)
470 expire_in = has_wake_lock_locked(type);
471 else
472 expire_in = -1;
473 if (expire_in > 0) {
474 if (debug_mask & DEBUG_EXPIRE)
475 pr_info("wake_lock: %s, start expire timer, "
476 "%ld\n", lock->name, expire_in);
477 mod_timer(&expire_timer, jiffies + expire_in);
478 } else {
479 if (del_timer(&expire_timer))
480 if (debug_mask & DEBUG_EXPIRE)
481 pr_info("wake_lock: %s, stop expire timer\n",
482 lock->name);
483 if (expire_in == 0)
484 queue_work(suspend_work_queue, &suspend_work);
485 }
486 }
487 spin_unlock_irqrestore(&list_lock, irqflags);
488}
489
490void wake_lock(struct wake_lock *lock)
491{
492 wake_lock_internal(lock, 0, 0);
493}
494EXPORT_SYMBOL(wake_lock);
495
496void wake_lock_timeout(struct wake_lock *lock, long timeout)
497{
498 wake_lock_internal(lock, timeout, 1);
499}
500EXPORT_SYMBOL(wake_lock_timeout);
501
502void wake_unlock(struct wake_lock *lock)
503{
504 int type;
505 unsigned long irqflags;
506 spin_lock_irqsave(&list_lock, irqflags);
507 type = lock->flags & WAKE_LOCK_TYPE_MASK;
508#ifdef CONFIG_WAKELOCK_STAT
509 wake_unlock_stat_locked(lock, 0);
510#endif
511 if (debug_mask & DEBUG_WAKE_LOCK)
512 pr_info("wake_unlock: %s\n", lock->name);
513 lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE);
514 list_del(&lock->link);
515 list_add(&lock->link, &inactive_locks);
516 if (type == WAKE_LOCK_SUSPEND) {
517 long has_lock = has_wake_lock_locked(type);
518 if (has_lock > 0) {
519 if (debug_mask & DEBUG_EXPIRE)
520 pr_info("wake_unlock: %s, start expire timer, "
521 "%ld\n", lock->name, has_lock);
522 mod_timer(&expire_timer, jiffies + has_lock);
523 } else {
524 if (del_timer(&expire_timer))
525 if (debug_mask & DEBUG_EXPIRE)
526 pr_info("wake_unlock: %s, stop expire "
527 "timer\n", lock->name);
528 if (has_lock == 0)
529 queue_work(suspend_work_queue, &suspend_work);
530 }
531 if (lock == &main_wake_lock) {
532 if (debug_mask & DEBUG_SUSPEND)
533 print_active_locks(WAKE_LOCK_SUSPEND);
534#ifdef CONFIG_WAKELOCK_STAT
535 update_sleep_wait_stats_locked(0);
536#endif
537 }
538 }
539 spin_unlock_irqrestore(&list_lock, irqflags);
540}
541EXPORT_SYMBOL(wake_unlock);
542
543int wake_lock_active(struct wake_lock *lock)
544{
545 return !!(lock->flags & WAKE_LOCK_ACTIVE);
546}
547EXPORT_SYMBOL(wake_lock_active);
548
549static int wakelock_stats_open(struct inode *inode, struct file *file)
550{
551 return single_open(file, wakelock_stats_show, NULL);
552}
553
554static const struct file_operations wakelock_stats_fops = {
555 .owner = THIS_MODULE,
556 .open = wakelock_stats_open,
557 .read = seq_read,
558 .llseek = seq_lseek,
559 .release = single_release,
560};
561
562static int __init wakelocks_init(void)
563{
564 int ret;
565 int i;
566
567 for (i = 0; i < ARRAY_SIZE(active_wake_locks); i++)
568 INIT_LIST_HEAD(&active_wake_locks[i]);
569
570#ifdef CONFIG_WAKELOCK_STAT
571 wake_lock_init(&deleted_wake_locks, WAKE_LOCK_SUSPEND,
572 "deleted_wake_locks");
573#endif
574 wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main");
575 wake_lock(&main_wake_lock);
576 wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups");
577 wake_lock_init(&suspend_backoff_lock, WAKE_LOCK_SUSPEND,
578 "suspend_backoff");
579
580 ret = platform_device_register(&power_device);
581 if (ret) {
582 pr_err("wakelocks_init: platform_device_register failed\n");
583 goto err_platform_device_register;
584 }
585 ret = platform_driver_register(&power_driver);
586 if (ret) {
587 pr_err("wakelocks_init: platform_driver_register failed\n");
588 goto err_platform_driver_register;
589 }
590
591 suspend_work_queue = create_singlethread_workqueue("suspend");
592 if (suspend_work_queue == NULL) {
593 ret = -ENOMEM;
594 goto err_suspend_work_queue;
595 }
596
597#ifdef CONFIG_WAKELOCK_STAT
598 proc_create("wakelocks", S_IRUGO, NULL, &wakelock_stats_fops);
599#endif
600
601 return 0;
602
603err_suspend_work_queue:
604 platform_driver_unregister(&power_driver);
605err_platform_driver_register:
606 platform_device_unregister(&power_device);
607err_platform_device_register:
608 wake_lock_destroy(&suspend_backoff_lock);
609 wake_lock_destroy(&unknown_wakeup);
610 wake_lock_destroy(&main_wake_lock);
611#ifdef CONFIG_WAKELOCK_STAT
612 wake_lock_destroy(&deleted_wake_locks);
613#endif
614 return ret;
615}
616
617static void __exit wakelocks_exit(void)
618{
619#ifdef CONFIG_WAKELOCK_STAT
620 remove_proc_entry("wakelocks", NULL);
621#endif
622 destroy_workqueue(suspend_work_queue);
623 platform_driver_unregister(&power_driver);
624 platform_device_unregister(&power_device);
625 wake_lock_destroy(&suspend_backoff_lock);
626 wake_lock_destroy(&unknown_wakeup);
627 wake_lock_destroy(&main_wake_lock);
628#ifdef CONFIG_WAKELOCK_STAT
629 wake_lock_destroy(&deleted_wake_locks);
630#endif
631}
632
633core_initcall(wakelocks_init);
634module_exit(wakelocks_exit);
diff --git a/kernel/printk.c b/kernel/printk.c
index 6edc4e89529..a1d702c1313 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -53,6 +53,10 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
53 53
54#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) 54#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
55 55
56#ifdef CONFIG_DEBUG_LL
57extern void printascii(char *);
58#endif
59
56/* printk's without a loglevel use this.. */ 60/* printk's without a loglevel use this.. */
57#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL 61#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
58 62
@@ -290,6 +294,53 @@ static inline void boot_delay_msec(void)
290} 294}
291#endif 295#endif
292 296
297/*
298 * Return the number of unread characters in the log buffer.
299 */
300static int log_buf_get_len(void)
301{
302 return logged_chars;
303}
304
305/*
306 * Clears the ring-buffer
307 */
308void log_buf_clear(void)
309{
310 logged_chars = 0;
311}
312
313/*
314 * Copy a range of characters from the log buffer.
315 */
316int log_buf_copy(char *dest, int idx, int len)
317{
318 int ret, max;
319 bool took_lock = false;
320
321 if (!oops_in_progress) {
322 spin_lock_irq(&logbuf_lock);
323 took_lock = true;
324 }
325
326 max = log_buf_get_len();
327 if (idx < 0 || idx >= max) {
328 ret = -1;
329 } else {
330 if (len > max - idx)
331 len = max - idx;
332 ret = len;
333 idx += (log_end - max);
334 while (len-- > 0)
335 dest[len] = LOG_BUF(idx + len);
336 }
337
338 if (took_lock)
339 spin_unlock_irq(&logbuf_lock);
340
341 return ret;
342}
343
293#ifdef CONFIG_SECURITY_DMESG_RESTRICT 344#ifdef CONFIG_SECURITY_DMESG_RESTRICT
294int dmesg_restrict = 1; 345int dmesg_restrict = 1;
295#else 346#else
@@ -885,6 +936,10 @@ asmlinkage int vprintk(const char *fmt, va_list args)
885 printed_len += vscnprintf(printk_buf + printed_len, 936 printed_len += vscnprintf(printk_buf + printed_len,
886 sizeof(printk_buf) - printed_len, fmt, args); 937 sizeof(printk_buf) - printed_len, fmt, args);
887 938
939#ifdef CONFIG_DEBUG_LL
940 printascii(printk_buf);
941#endif
942
888 p = printk_buf; 943 p = printk_buf;
889 944
890 /* Read log level and handle special printk prefix */ 945 /* Read log level and handle special printk prefix */
@@ -1159,7 +1214,6 @@ static int __cpuinit console_cpu_notify(struct notifier_block *self,
1159 switch (action) { 1214 switch (action) {
1160 case CPU_ONLINE: 1215 case CPU_ONLINE:
1161 case CPU_DEAD: 1216 case CPU_DEAD:
1162 case CPU_DYING:
1163 case CPU_DOWN_FAILED: 1217 case CPU_DOWN_FAILED:
1164 case CPU_UP_CANCELED: 1218 case CPU_UP_CANCELED:
1165 console_lock(); 1219 console_lock();
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index ab449117aaf..255e1662acd 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -890,7 +890,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
890{ 890{
891 lock->owner = NULL; 891 lock->owner = NULL;
892 raw_spin_lock_init(&lock->wait_lock); 892 raw_spin_lock_init(&lock->wait_lock);
893 plist_head_init_raw(&lock->wait_list, &lock->wait_lock); 893 plist_head_init(&lock->wait_list);
894 894
895 debug_rt_mutex_init(lock, name); 895 debug_rt_mutex_init(lock, name);
896} 896}
diff --git a/kernel/sched.c b/kernel/sched.c
index cd2b7cb638f..89a9c34c64f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/ctype.h> 71#include <linux/ctype.h>
72#include <linux/ftrace.h> 72#include <linux/ftrace.h>
73#include <linux/slab.h> 73#include <linux/slab.h>
74#include <linux/cpuacct.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -7950,7 +7951,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7950#ifdef CONFIG_SMP 7951#ifdef CONFIG_SMP
7951 rt_rq->rt_nr_migratory = 0; 7952 rt_rq->rt_nr_migratory = 0;
7952 rt_rq->overloaded = 0; 7953 rt_rq->overloaded = 0;
7953 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); 7954 plist_head_init(&rt_rq->pushable_tasks);
7954#endif 7955#endif
7955 7956
7956 rt_rq->rt_time = 0; 7957 rt_rq->rt_time = 0;
@@ -8155,7 +8156,7 @@ void __init sched_init(void)
8155#endif 8156#endif
8156 8157
8157#ifdef CONFIG_RT_MUTEXES 8158#ifdef CONFIG_RT_MUTEXES
8158 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); 8159 plist_head_init(&init_task.pi_waiters);
8159#endif 8160#endif
8160 8161
8161 /* 8162 /*
@@ -8206,13 +8207,24 @@ static inline int preempt_count_equals(int preempt_offset)
8206 return (nested == preempt_offset); 8207 return (nested == preempt_offset);
8207} 8208}
8208 8209
8210static int __might_sleep_init_called;
8211int __init __might_sleep_init(void)
8212{
8213 __might_sleep_init_called = 1;
8214 return 0;
8215}
8216early_initcall(__might_sleep_init);
8217
8209void __might_sleep(const char *file, int line, int preempt_offset) 8218void __might_sleep(const char *file, int line, int preempt_offset)
8210{ 8219{
8211#ifdef in_atomic 8220#ifdef in_atomic
8212 static unsigned long prev_jiffy; /* ratelimiting */ 8221 static unsigned long prev_jiffy; /* ratelimiting */
8213 8222
8214 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || 8223 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8215 system_state != SYSTEM_RUNNING || oops_in_progress) 8224 oops_in_progress)
8225 return;
8226 if (system_state != SYSTEM_RUNNING &&
8227 (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
8216 return; 8228 return;
8217 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8229 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8218 return; 8230 return;
@@ -8963,6 +8975,20 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
8963} 8975}
8964 8976
8965static int 8977static int
8978cpu_cgroup_allow_attach(struct cgroup *cgrp, struct task_struct *tsk)
8979{
8980 const struct cred *cred = current_cred(), *tcred;
8981
8982 tcred = __task_cred(tsk);
8983
8984 if ((current != tsk) && !capable(CAP_SYS_NICE) &&
8985 cred->euid != tcred->uid && cred->euid != tcred->suid)
8986 return -EACCES;
8987
8988 return 0;
8989}
8990
8991static int
8966cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 8992cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
8967{ 8993{
8968#ifdef CONFIG_RT_GROUP_SCHED 8994#ifdef CONFIG_RT_GROUP_SCHED
@@ -9067,6 +9093,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
9067 .name = "cpu", 9093 .name = "cpu",
9068 .create = cpu_cgroup_create, 9094 .create = cpu_cgroup_create,
9069 .destroy = cpu_cgroup_destroy, 9095 .destroy = cpu_cgroup_destroy,
9096 .allow_attach = cpu_cgroup_allow_attach,
9070 .can_attach_task = cpu_cgroup_can_attach_task, 9097 .can_attach_task = cpu_cgroup_can_attach_task,
9071 .attach_task = cpu_cgroup_attach_task, 9098 .attach_task = cpu_cgroup_attach_task,
9072 .exit = cpu_cgroup_exit, 9099 .exit = cpu_cgroup_exit,
@@ -9093,8 +9120,30 @@ struct cpuacct {
9093 u64 __percpu *cpuusage; 9120 u64 __percpu *cpuusage;
9094 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; 9121 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
9095 struct cpuacct *parent; 9122 struct cpuacct *parent;
9123 struct cpuacct_charge_calls *cpufreq_fn;
9124 void *cpuacct_data;
9096}; 9125};
9097 9126
9127static struct cpuacct *cpuacct_root;
9128
9129/* Default calls for cpufreq accounting */
9130static struct cpuacct_charge_calls *cpuacct_cpufreq;
9131int cpuacct_register_cpufreq(struct cpuacct_charge_calls *fn)
9132{
9133 cpuacct_cpufreq = fn;
9134
9135 /*
9136 * Root node is created before platform can register callbacks,
9137 * initalize here.
9138 */
9139 if (cpuacct_root && fn) {
9140 cpuacct_root->cpufreq_fn = fn;
9141 if (fn->init)
9142 fn->init(&cpuacct_root->cpuacct_data);
9143 }
9144 return 0;
9145}
9146
9098struct cgroup_subsys cpuacct_subsys; 9147struct cgroup_subsys cpuacct_subsys;
9099 9148
9100/* return cpu accounting group corresponding to this container */ 9149/* return cpu accounting group corresponding to this container */
@@ -9129,8 +9178,16 @@ static struct cgroup_subsys_state *cpuacct_create(
9129 if (percpu_counter_init(&ca->cpustat[i], 0)) 9178 if (percpu_counter_init(&ca->cpustat[i], 0))
9130 goto out_free_counters; 9179 goto out_free_counters;
9131 9180
9181 ca->cpufreq_fn = cpuacct_cpufreq;
9182
9183 /* If available, have platform code initalize cpu frequency table */
9184 if (ca->cpufreq_fn && ca->cpufreq_fn->init)
9185 ca->cpufreq_fn->init(&ca->cpuacct_data);
9186
9132 if (cgrp->parent) 9187 if (cgrp->parent)
9133 ca->parent = cgroup_ca(cgrp->parent); 9188 ca->parent = cgroup_ca(cgrp->parent);
9189 else
9190 cpuacct_root = ca;
9134 9191
9135 return &ca->css; 9192 return &ca->css;
9136 9193
@@ -9258,6 +9315,32 @@ static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9258 return 0; 9315 return 0;
9259} 9316}
9260 9317
9318static int cpuacct_cpufreq_show(struct cgroup *cgrp, struct cftype *cft,
9319 struct cgroup_map_cb *cb)
9320{
9321 struct cpuacct *ca = cgroup_ca(cgrp);
9322 if (ca->cpufreq_fn && ca->cpufreq_fn->cpufreq_show)
9323 ca->cpufreq_fn->cpufreq_show(ca->cpuacct_data, cb);
9324
9325 return 0;
9326}
9327
9328/* return total cpu power usage (milliWatt second) of a group */
9329static u64 cpuacct_powerusage_read(struct cgroup *cgrp, struct cftype *cft)
9330{
9331 int i;
9332 struct cpuacct *ca = cgroup_ca(cgrp);
9333 u64 totalpower = 0;
9334
9335 if (ca->cpufreq_fn && ca->cpufreq_fn->power_usage)
9336 for_each_present_cpu(i) {
9337 totalpower += ca->cpufreq_fn->power_usage(
9338 ca->cpuacct_data);
9339 }
9340
9341 return totalpower;
9342}
9343
9261static struct cftype files[] = { 9344static struct cftype files[] = {
9262 { 9345 {
9263 .name = "usage", 9346 .name = "usage",
@@ -9272,6 +9355,14 @@ static struct cftype files[] = {
9272 .name = "stat", 9355 .name = "stat",
9273 .read_map = cpuacct_stats_show, 9356 .read_map = cpuacct_stats_show,
9274 }, 9357 },
9358 {
9359 .name = "cpufreq",
9360 .read_map = cpuacct_cpufreq_show,
9361 },
9362 {
9363 .name = "power",
9364 .read_u64 = cpuacct_powerusage_read
9365 },
9275}; 9366};
9276 9367
9277static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) 9368static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
@@ -9301,6 +9392,10 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9301 for (; ca; ca = ca->parent) { 9392 for (; ca; ca = ca->parent) {
9302 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); 9393 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9303 *cpuusage += cputime; 9394 *cpuusage += cputime;
9395
9396 /* Call back into platform code to account for CPU speeds */
9397 if (ca->cpufreq_fn && ca->cpufreq_fn->charge)
9398 ca->cpufreq_fn->charge(ca->cpuacct_data, cputime, cpu);
9304 } 9399 }
9305 9400
9306 rcu_read_unlock(); 9401 rcu_read_unlock();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index aaaa7e749ad..5b6afb27e8b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -96,6 +96,7 @@ extern char core_pattern[];
96extern unsigned int core_pipe_limit; 96extern unsigned int core_pipe_limit;
97extern int pid_max; 97extern int pid_max;
98extern int min_free_kbytes; 98extern int min_free_kbytes;
99extern int min_free_order_shift;
99extern int pid_max_min, pid_max_max; 100extern int pid_max_min, pid_max_max;
100extern int sysctl_drop_caches; 101extern int sysctl_drop_caches;
101extern int percpu_pagelist_fraction; 102extern int percpu_pagelist_fraction;
@@ -1189,6 +1190,13 @@ static struct ctl_table vm_table[] = {
1189 .extra1 = &zero, 1190 .extra1 = &zero,
1190 }, 1191 },
1191 { 1192 {
1193 .procname = "min_free_order_shift",
1194 .data = &min_free_order_shift,
1195 .maxlen = sizeof(min_free_order_shift),
1196 .mode = 0644,
1197 .proc_handler = &proc_dointvec
1198 },
1199 {
1192 .procname = "percpu_pagelist_fraction", 1200 .procname = "percpu_pagelist_fraction",
1193 .data = &percpu_pagelist_fraction, 1201 .data = &percpu_pagelist_fraction,
1194 .maxlen = sizeof(percpu_pagelist_fraction), 1202 .maxlen = sizeof(percpu_pagelist_fraction),
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index e2fd74b8e8c..cae2ad7491b 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,5 +1,5 @@
1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o 1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
2obj-y += timeconv.o posix-clock.o alarmtimer.o 2obj-y += timeconv.o posix-clock.o #alarmtimer.o
3 3
4obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o 4obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
5obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o 5obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index c3cbd8c34b4..5928f9559da 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1053,7 +1053,7 @@ void get_monotonic_boottime(struct timespec *ts)
1053 } while (read_seqretry(&xtime_lock, seq)); 1053 } while (read_seqretry(&xtime_lock, seq));
1054 1054
1055 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, 1055 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
1056 ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs); 1056 (s64)ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
1057} 1057}
1058EXPORT_SYMBOL_GPL(get_monotonic_boottime); 1058EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1059 1059