aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorColin Cross2013-03-13 19:16:02 -0500
committerColin Cross2013-03-13 19:16:02 -0500
commit0b203ab4aacdb6e6dfb8c277aa290f0a02428e6f (patch)
tree9bab760a750d9cf4504d92603ee7fc82beeb2e38 /kernel
parenta276def548828763cf5ac228adf8c1ca73f8d4d3 (diff)
parente28c3f2b514b5581e15614f7cf976131092cf4b6 (diff)
downloadkernel-common-0b203ab4aacdb6e6dfb8c277aa290f0a02428e6f.tar.gz
kernel-common-0b203ab4aacdb6e6dfb8c277aa290f0a02428e6f.tar.xz
kernel-common-0b203ab4aacdb6e6dfb8c277aa290f0a02428e6f.zip
Merge tag 'v3.0.68' into android-3.0
This is the 3.0.68 stable release Conflicts: kernel/cgroup.c Change-Id: I067982d25e18e3a12de93a5eb6429b8829d7ca11
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c8
-rw-r--r--kernel/cpuset.c12
-rw-r--r--kernel/hrtimer.c36
-rw-r--r--kernel/irq/spurious.c7
-rw-r--r--kernel/posix-cpu-timers.c23
-rw-r--r--kernel/posix-timers.c7
-rw-r--r--kernel/printk.c13
-rw-r--r--kernel/ptrace.c63
-rw-r--r--kernel/resource.c50
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/signal.c17
-rw-r--r--kernel/smp.c13
-rw-r--r--kernel/sysctl_binary.c3
-rw-r--r--kernel/timeconst.pl6
-rw-r--r--kernel/trace/ftrace.c48
-rw-r--r--kernel/trace/ring_buffer.c2
16 files changed, 230 insertions, 81 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3948f0af58f..460aa1b0f50 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -366,10 +366,18 @@ static void free_css_set_work(struct work_struct *work)
366 struct cgroup *cgrp = link->cgrp; 366 struct cgroup *cgrp = link->cgrp;
367 list_del(&link->cg_link_list); 367 list_del(&link->cg_link_list);
368 list_del(&link->cgrp_link_list); 368 list_del(&link->cgrp_link_list);
369 /*
370 * We may not be holding cgroup_mutex, and if cgrp->count is
371 * dropped to 0 the cgroup can be destroyed at any time, hence
372 * rcu_read_lock is used to keep it alive.
373 */
374 rcu_read_lock();
369 if (atomic_dec_and_test(&cgrp->count)) { 375 if (atomic_dec_and_test(&cgrp->count)) {
370 check_for_release(cgrp); 376 check_for_release(cgrp);
371 cgroup_wakeup_rmdir_waiter(cgrp); 377 cgroup_wakeup_rmdir_waiter(cgrp);
372 } 378 }
379 rcu_read_unlock();
380
373 kfree(link); 381 kfree(link);
374 } 382 }
375 write_unlock(&css_set_lock); 383 write_unlock(&css_set_lock);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 6cbe0330249..ea76c9c5d42 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2499,8 +2499,16 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2499 2499
2500 dentry = task_cs(tsk)->css.cgroup->dentry; 2500 dentry = task_cs(tsk)->css.cgroup->dentry;
2501 spin_lock(&cpuset_buffer_lock); 2501 spin_lock(&cpuset_buffer_lock);
2502 snprintf(cpuset_name, CPUSET_NAME_LEN, 2502
2503 dentry ? (const char *)dentry->d_name.name : "/"); 2503 if (!dentry) {
2504 strcpy(cpuset_name, "/");
2505 } else {
2506 spin_lock(&dentry->d_lock);
2507 strlcpy(cpuset_name, (const char *)dentry->d_name.name,
2508 CPUSET_NAME_LEN);
2509 spin_unlock(&dentry->d_lock);
2510 }
2511
2504 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, 2512 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2505 tsk->mems_allowed); 2513 tsk->mems_allowed);
2506 printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", 2514 printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 957869fd596..e079c3e42fa 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -640,21 +640,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
640 * and expiry check is done in the hrtimer_interrupt or in the softirq. 640 * and expiry check is done in the hrtimer_interrupt or in the softirq.
641 */ 641 */
642static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 642static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
643 struct hrtimer_clock_base *base, 643 struct hrtimer_clock_base *base)
644 int wakeup)
645{ 644{
646 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 645 return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
647 if (wakeup) {
648 raw_spin_unlock(&base->cpu_base->lock);
649 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
650 raw_spin_lock(&base->cpu_base->lock);
651 } else
652 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
653
654 return 1;
655 }
656
657 return 0;
658} 646}
659 647
660static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) 648static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
@@ -735,8 +723,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
735static inline void 723static inline void
736hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } 724hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
737static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 725static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
738 struct hrtimer_clock_base *base, 726 struct hrtimer_clock_base *base)
739 int wakeup)
740{ 727{
741 return 0; 728 return 0;
742} 729}
@@ -995,8 +982,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
995 * 982 *
996 * XXX send_remote_softirq() ? 983 * XXX send_remote_softirq() ?
997 */ 984 */
998 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) 985 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
999 hrtimer_enqueue_reprogram(timer, new_base, wakeup); 986 && hrtimer_enqueue_reprogram(timer, new_base)) {
987 if (wakeup) {
988 /*
989 * We need to drop cpu_base->lock to avoid a
990 * lock ordering issue vs. rq->lock.
991 */
992 raw_spin_unlock(&new_base->cpu_base->lock);
993 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
994 local_irq_restore(flags);
995 return ret;
996 } else {
997 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
998 }
999 }
1000 1000
1001 unlock_hrtimer_base(timer, &flags); 1001 unlock_hrtimer_base(timer, &flags);
1002 1002
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dc813a948be..63633a320fb 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
80 80
81 /* 81 /*
82 * All handlers must agree on IRQF_SHARED, so we test just the 82 * All handlers must agree on IRQF_SHARED, so we test just the
83 * first. Check for action->next as well. 83 * first.
84 */ 84 */
85 action = desc->action; 85 action = desc->action;
86 if (!action || !(action->flags & IRQF_SHARED) || 86 if (!action || !(action->flags & IRQF_SHARED) ||
87 (action->flags & __IRQF_TIMER) || 87 (action->flags & __IRQF_TIMER))
88 (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
89 !action->next)
90 goto out; 88 goto out;
91 89
92 /* Already running on another processor */ 90 /* Already running on another processor */
@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
104 do { 102 do {
105 if (handle_irq_event(desc) == IRQ_HANDLED) 103 if (handle_irq_event(desc) == IRQ_HANDLED)
106 ret = IRQ_HANDLED; 104 ret = IRQ_HANDLED;
105 /* Make sure that there is still a valid action */
107 action = desc->action; 106 action = desc->action;
108 } while ((desc->istate & IRQS_PENDING) && action); 107 } while ((desc->istate & IRQS_PENDING) && action);
109 desc->istate &= ~IRQS_POLL_INPROGRESS; 108 desc->istate &= ~IRQS_POLL_INPROGRESS;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 640ded8f5c4..93d5e4a31fb 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1450,8 +1450,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1450 while (!signal_pending(current)) { 1450 while (!signal_pending(current)) {
1451 if (timer.it.cpu.expires.sched == 0) { 1451 if (timer.it.cpu.expires.sched == 0) {
1452 /* 1452 /*
1453 * Our timer fired and was reset. 1453 * Our timer fired and was reset, below
1454 * deletion can not fail.
1454 */ 1455 */
1456 posix_cpu_timer_del(&timer);
1455 spin_unlock_irq(&timer.it_lock); 1457 spin_unlock_irq(&timer.it_lock);
1456 return 0; 1458 return 0;
1457 } 1459 }
@@ -1469,9 +1471,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1469 * We were interrupted by a signal. 1471 * We were interrupted by a signal.
1470 */ 1472 */
1471 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); 1473 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1472 posix_cpu_timer_set(&timer, 0, &zero_it, it); 1474 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1475 if (!error) {
1476 /*
1477 * Timer is now unarmed, deletion can not fail.
1478 */
1479 posix_cpu_timer_del(&timer);
1480 }
1473 spin_unlock_irq(&timer.it_lock); 1481 spin_unlock_irq(&timer.it_lock);
1474 1482
1483 while (error == TIMER_RETRY) {
1484 /*
1485 * We need to handle case when timer was or is in the
1486 * middle of firing. In other cases we already freed
1487 * resources.
1488 */
1489 spin_lock_irq(&timer.it_lock);
1490 error = posix_cpu_timer_del(&timer);
1491 spin_unlock_irq(&timer.it_lock);
1492 }
1493
1475 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { 1494 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1476 /* 1495 /*
1477 * It actually did fire already. 1496 * It actually did fire already.
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 4556182527f..d2da8ad45b3 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -639,6 +639,13 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
639{ 639{
640 struct k_itimer *timr; 640 struct k_itimer *timr;
641 641
642 /*
643 * timer_t could be any type >= int and we want to make sure any
644 * @timer_id outside positive int range fails lookup.
645 */
646 if ((unsigned long long)timer_id > INT_MAX)
647 return NULL;
648
642 rcu_read_lock(); 649 rcu_read_lock();
643 timr = idr_find(&posix_timers_id, (int)timer_id); 650 timr = idr_find(&posix_timers_id, (int)timer_id);
644 if (timr) { 651 if (timr) {
diff --git a/kernel/printk.c b/kernel/printk.c
index 24146142bc0..a1d702c1313 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -684,8 +684,19 @@ static void call_console_drivers(unsigned start, unsigned end)
684 start_print = start; 684 start_print = start;
685 while (cur_index != end) { 685 while (cur_index != end) {
686 if (msg_level < 0 && ((end - cur_index) > 2)) { 686 if (msg_level < 0 && ((end - cur_index) > 2)) {
687 /*
688 * prepare buf_prefix, as a contiguous array,
689 * to be processed by log_prefix function
690 */
691 char buf_prefix[SYSLOG_PRI_MAX_LENGTH+1];
692 unsigned i;
693 for (i = 0; i < ((end - cur_index)) && (i < SYSLOG_PRI_MAX_LENGTH); i++) {
694 buf_prefix[i] = LOG_BUF(cur_index + i);
695 }
696 buf_prefix[i] = '\0'; /* force '\0' as last string character */
697
687 /* strip log prefix */ 698 /* strip log prefix */
688 cur_index += log_prefix(&LOG_BUF(cur_index), &msg_level, NULL); 699 cur_index += log_prefix((const char *)&buf_prefix, &msg_level, NULL);
689 start_print = cur_index; 700 start_print = cur_index;
690 } 701 }
691 while (cur_index != end) { 702 while (cur_index != end) {
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 2df115790cd..40581ee5680 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -38,6 +38,36 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
38 child->parent = new_parent; 38 child->parent = new_parent;
39} 39}
40 40
41/* Ensure that nothing can wake it up, even SIGKILL */
42static bool ptrace_freeze_traced(struct task_struct *task)
43{
44 bool ret = false;
45
46 spin_lock_irq(&task->sighand->siglock);
47 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
48 task->state = __TASK_TRACED;
49 ret = true;
50 }
51 spin_unlock_irq(&task->sighand->siglock);
52
53 return ret;
54}
55
56static void ptrace_unfreeze_traced(struct task_struct *task)
57{
58 if (task->state != __TASK_TRACED)
59 return;
60
61 WARN_ON(!task->ptrace || task->parent != current);
62
63 spin_lock_irq(&task->sighand->siglock);
64 if (__fatal_signal_pending(task))
65 wake_up_state(task, __TASK_TRACED);
66 else
67 task->state = TASK_TRACED;
68 spin_unlock_irq(&task->sighand->siglock);
69}
70
41/** 71/**
42 * __ptrace_unlink - unlink ptracee and restore its execution state 72 * __ptrace_unlink - unlink ptracee and restore its execution state
43 * @child: ptracee to be unlinked 73 * @child: ptracee to be unlinked
@@ -92,7 +122,7 @@ void __ptrace_unlink(struct task_struct *child)
92 * TASK_KILLABLE sleeps. 122 * TASK_KILLABLE sleeps.
93 */ 123 */
94 if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child)) 124 if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child))
95 signal_wake_up(child, task_is_traced(child)); 125 ptrace_signal_wake_up(child, true);
96 126
97 spin_unlock(&child->sighand->siglock); 127 spin_unlock(&child->sighand->siglock);
98} 128}
@@ -112,23 +142,29 @@ int ptrace_check_attach(struct task_struct *child, int kill)
112 * be changed by us so it's not changing right after this. 142 * be changed by us so it's not changing right after this.
113 */ 143 */
114 read_lock(&tasklist_lock); 144 read_lock(&tasklist_lock);
115 if ((child->ptrace & PT_PTRACED) && child->parent == current) { 145 if (child->ptrace && child->parent == current) {
146 WARN_ON(child->state == __TASK_TRACED);
116 /* 147 /*
117 * child->sighand can't be NULL, release_task() 148 * child->sighand can't be NULL, release_task()
118 * does ptrace_unlink() before __exit_signal(). 149 * does ptrace_unlink() before __exit_signal().
119 */ 150 */
120 spin_lock_irq(&child->sighand->siglock); 151 if (kill || ptrace_freeze_traced(child))
121 WARN_ON_ONCE(task_is_stopped(child));
122 if (task_is_traced(child) || kill)
123 ret = 0; 152 ret = 0;
124 spin_unlock_irq(&child->sighand->siglock);
125 } 153 }
126 read_unlock(&tasklist_lock); 154 read_unlock(&tasklist_lock);
127 155
128 if (!ret && !kill) 156 if (!ret && !kill) {
129 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; 157 if (!wait_task_inactive(child, __TASK_TRACED)) {
158 /*
159 * This can only happen if may_ptrace_stop() fails and
160 * ptrace_stop() changes ->state back to TASK_RUNNING,
161 * so we should not worry about leaking __TASK_TRACED.
162 */
163 WARN_ON(child->state == __TASK_TRACED);
164 ret = -ESRCH;
165 }
166 }
130 167
131 /* All systems go.. */
132 return ret; 168 return ret;
133} 169}
134 170
@@ -245,7 +281,7 @@ static int ptrace_attach(struct task_struct *task)
245 */ 281 */
246 if (task_is_stopped(task)) { 282 if (task_is_stopped(task)) {
247 task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; 283 task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
248 signal_wake_up(task, 1); 284 signal_wake_up_state(task, __TASK_STOPPED);
249 wait_trap = true; 285 wait_trap = true;
250 } 286 }
251 287
@@ -777,6 +813,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
777 goto out_put_task_struct; 813 goto out_put_task_struct;
778 814
779 ret = arch_ptrace(child, request, addr, data); 815 ret = arch_ptrace(child, request, addr, data);
816 if (ret || request != PTRACE_DETACH)
817 ptrace_unfreeze_traced(child);
780 818
781 out_put_task_struct: 819 out_put_task_struct:
782 put_task_struct(child); 820 put_task_struct(child);
@@ -915,8 +953,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
915 } 953 }
916 954
917 ret = ptrace_check_attach(child, request == PTRACE_KILL); 955 ret = ptrace_check_attach(child, request == PTRACE_KILL);
918 if (!ret) 956 if (!ret) {
919 ret = compat_arch_ptrace(child, request, addr, data); 957 ret = compat_arch_ptrace(child, request, addr, data);
958 if (ret || request != PTRACE_DETACH)
959 ptrace_unfreeze_traced(child);
960 }
920 961
921 out_put_task_struct: 962 out_put_task_struct:
922 put_task_struct(child); 963 put_task_struct(child);
diff --git a/kernel/resource.c b/kernel/resource.c
index b29b83d042f..d005cd30e5e 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -736,6 +736,7 @@ static void __init __reserve_region_with_split(struct resource *root,
736 struct resource *parent = root; 736 struct resource *parent = root;
737 struct resource *conflict; 737 struct resource *conflict;
738 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); 738 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
739 struct resource *next_res = NULL;
739 740
740 if (!res) 741 if (!res)
741 return; 742 return;
@@ -745,21 +746,46 @@ static void __init __reserve_region_with_split(struct resource *root,
745 res->end = end; 746 res->end = end;
746 res->flags = IORESOURCE_BUSY; 747 res->flags = IORESOURCE_BUSY;
747 748
748 conflict = __request_resource(parent, res); 749 while (1) {
749 if (!conflict)
750 return;
751 750
752 /* failed, split and try again */ 751 conflict = __request_resource(parent, res);
753 kfree(res); 752 if (!conflict) {
753 if (!next_res)
754 break;
755 res = next_res;
756 next_res = NULL;
757 continue;
758 }
754 759
755 /* conflict covered whole area */ 760 /* conflict covered whole area */
756 if (conflict->start <= start && conflict->end >= end) 761 if (conflict->start <= res->start &&
757 return; 762 conflict->end >= res->end) {
763 kfree(res);
764 WARN_ON(next_res);
765 break;
766 }
767
768 /* failed, split and try again */
769 if (conflict->start > res->start) {
770 end = res->end;
771 res->end = conflict->start - 1;
772 if (conflict->end < end) {
773 next_res = kzalloc(sizeof(*next_res),
774 GFP_ATOMIC);
775 if (!next_res) {
776 kfree(res);
777 break;
778 }
779 next_res->name = name;
780 next_res->start = conflict->end + 1;
781 next_res->end = end;
782 next_res->flags = IORESOURCE_BUSY;
783 }
784 } else {
785 res->start = conflict->end + 1;
786 }
787 }
758 788
759 if (conflict->start > start)
760 __reserve_region_with_split(root, start, conflict->start-1, name);
761 if (conflict->end < end)
762 __reserve_region_with_split(root, conflict->end+1, end, name);
763} 789}
764 790
765void __init reserve_region_with_split(struct resource *root, 791void __init reserve_region_with_split(struct resource *root,
diff --git a/kernel/sched.c b/kernel/sched.c
index e788b663b79..89a9c34c64f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2779,7 +2779,8 @@ out:
2779 */ 2779 */
2780int wake_up_process(struct task_struct *p) 2780int wake_up_process(struct task_struct *p)
2781{ 2781{
2782 return try_to_wake_up(p, TASK_ALL, 0); 2782 WARN_ON(task_is_stopped_or_traced(p));
2783 return try_to_wake_up(p, TASK_NORMAL, 0);
2783} 2784}
2784EXPORT_SYMBOL(wake_up_process); 2785EXPORT_SYMBOL(wake_up_process);
2785 2786
diff --git a/kernel/signal.c b/kernel/signal.c
index 43fee1cf50d..51f2e694ec6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -631,23 +631,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
631 * No need to set need_resched since signal event passing 631 * No need to set need_resched since signal event passing
632 * goes through ->blocked 632 * goes through ->blocked
633 */ 633 */
634void signal_wake_up(struct task_struct *t, int resume) 634void signal_wake_up_state(struct task_struct *t, unsigned int state)
635{ 635{
636 unsigned int mask;
637
638 set_tsk_thread_flag(t, TIF_SIGPENDING); 636 set_tsk_thread_flag(t, TIF_SIGPENDING);
639
640 /* 637 /*
641 * For SIGKILL, we want to wake it up in the stopped/traced/killable 638 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
642 * case. We don't check t->state here because there is a race with it 639 * case. We don't check t->state here because there is a race with it
643 * executing another processor and just now entering stopped state. 640 * executing another processor and just now entering stopped state.
644 * By using wake_up_state, we ensure the process will wake up and 641 * By using wake_up_state, we ensure the process will wake up and
645 * handle its death signal. 642 * handle its death signal.
646 */ 643 */
647 mask = TASK_INTERRUPTIBLE; 644 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
648 if (resume)
649 mask |= TASK_WAKEKILL;
650 if (!wake_up_state(t, mask))
651 kick_process(t); 645 kick_process(t);
652} 646}
653 647
@@ -1675,6 +1669,10 @@ static inline int may_ptrace_stop(void)
1675 * If SIGKILL was already sent before the caller unlocked 1669 * If SIGKILL was already sent before the caller unlocked
1676 * ->siglock we must see ->core_state != NULL. Otherwise it 1670 * ->siglock we must see ->core_state != NULL. Otherwise it
1677 * is safe to enter schedule(). 1671 * is safe to enter schedule().
1672 *
1673 * This is almost outdated, a task with the pending SIGKILL can't
1674 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1675 * after SIGKILL was already dequeued.
1678 */ 1676 */
1679 if (unlikely(current->mm->core_state) && 1677 if (unlikely(current->mm->core_state) &&
1680 unlikely(current->mm == current->parent->mm)) 1678 unlikely(current->mm == current->parent->mm))
@@ -1806,6 +1804,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1806 if (gstop_done) 1804 if (gstop_done)
1807 do_notify_parent_cldstop(current, false, why); 1805 do_notify_parent_cldstop(current, false, why);
1808 1806
1807 /* tasklist protects us from ptrace_freeze_traced() */
1809 __set_current_state(TASK_RUNNING); 1808 __set_current_state(TASK_RUNNING);
1810 if (clear_code) 1809 if (clear_code)
1811 current->exit_code = 0; 1810 current->exit_code = 0;
diff --git a/kernel/smp.c b/kernel/smp.c
index fb67dfa8394..38d9e033bbc 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -31,6 +31,7 @@ struct call_function_data {
31 struct call_single_data csd; 31 struct call_single_data csd;
32 atomic_t refs; 32 atomic_t refs;
33 cpumask_var_t cpumask; 33 cpumask_var_t cpumask;
34 cpumask_var_t cpumask_ipi;
34}; 35};
35 36
36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); 37static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
@@ -54,6 +55,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
54 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 55 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
55 cpu_to_node(cpu))) 56 cpu_to_node(cpu)))
56 return notifier_from_errno(-ENOMEM); 57 return notifier_from_errno(-ENOMEM);
58 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
59 cpu_to_node(cpu)))
60 return notifier_from_errno(-ENOMEM);
57 break; 61 break;
58 62
59#ifdef CONFIG_HOTPLUG_CPU 63#ifdef CONFIG_HOTPLUG_CPU
@@ -63,6 +67,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
63 case CPU_DEAD: 67 case CPU_DEAD:
64 case CPU_DEAD_FROZEN: 68 case CPU_DEAD_FROZEN:
65 free_cpumask_var(cfd->cpumask); 69 free_cpumask_var(cfd->cpumask);
70 free_cpumask_var(cfd->cpumask_ipi);
66 break; 71 break;
67#endif 72#endif
68 }; 73 };
@@ -524,6 +529,12 @@ void smp_call_function_many(const struct cpumask *mask,
524 return; 529 return;
525 } 530 }
526 531
532 /*
533 * After we put an entry into the list, data->cpumask
534 * may be cleared again when another CPU sends another IPI for
535 * a SMP function call, so data->cpumask will be zero.
536 */
537 cpumask_copy(data->cpumask_ipi, data->cpumask);
527 raw_spin_lock_irqsave(&call_function.lock, flags); 538 raw_spin_lock_irqsave(&call_function.lock, flags);
528 /* 539 /*
529 * Place entry at the _HEAD_ of the list, so that any cpu still 540 * Place entry at the _HEAD_ of the list, so that any cpu still
@@ -547,7 +558,7 @@ void smp_call_function_many(const struct cpumask *mask,
547 smp_mb(); 558 smp_mb();
548 559
549 /* Send a message to all CPUs in the map */ 560 /* Send a message to all CPUs in the map */
550 arch_send_call_function_ipi_mask(data->cpumask); 561 arch_send_call_function_ipi_mask(data->cpumask_ipi);
551 562
552 /* Optionally wait for the CPUs to complete */ 563 /* Optionally wait for the CPUs to complete */
553 if (wait) 564 if (wait)
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index e055e8b533c..17c20c7563a 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1194,9 +1194,10 @@ static ssize_t bin_dn_node_address(struct file *file,
1194 1194
1195 /* Convert the decnet address to binary */ 1195 /* Convert the decnet address to binary */
1196 result = -EIO; 1196 result = -EIO;
1197 nodep = strchr(buf, '.') + 1; 1197 nodep = strchr(buf, '.');
1198 if (!nodep) 1198 if (!nodep)
1199 goto out; 1199 goto out;
1200 ++nodep;
1200 1201
1201 area = simple_strtoul(buf, NULL, 10); 1202 area = simple_strtoul(buf, NULL, 10);
1202 node = simple_strtoul(nodep, NULL, 10); 1203 node = simple_strtoul(nodep, NULL, 10);
diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
index eb51d76e058..3f42652a6a3 100644
--- a/kernel/timeconst.pl
+++ b/kernel/timeconst.pl
@@ -369,10 +369,8 @@ if ($hz eq '--can') {
369 die "Usage: $0 HZ\n"; 369 die "Usage: $0 HZ\n";
370 } 370 }
371 371
372 @val = @{$canned_values{$hz}}; 372 $cv = $canned_values{$hz};
373 if (!defined(@val)) { 373 @val = defined($cv) ? @$cv : compute_values($hz);
374 @val = compute_values($hz);
375 }
376 output($hz, @val); 374 output($hz, @val);
377} 375}
378exit 0; 376exit 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f88ea18d2d9..86fd4170d24 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3432,35 +3432,49 @@ static void ftrace_init_module(struct module *mod,
3432 ftrace_process_locs(mod, start, end); 3432 ftrace_process_locs(mod, start, end);
3433} 3433}
3434 3434
3435static int ftrace_module_notify(struct notifier_block *self, 3435static int ftrace_module_notify_enter(struct notifier_block *self,
3436 unsigned long val, void *data) 3436 unsigned long val, void *data)
3437{ 3437{
3438 struct module *mod = data; 3438 struct module *mod = data;
3439 3439
3440 switch (val) { 3440 if (val == MODULE_STATE_COMING)
3441 case MODULE_STATE_COMING:
3442 ftrace_init_module(mod, mod->ftrace_callsites, 3441 ftrace_init_module(mod, mod->ftrace_callsites,
3443 mod->ftrace_callsites + 3442 mod->ftrace_callsites +
3444 mod->num_ftrace_callsites); 3443 mod->num_ftrace_callsites);
3445 break; 3444 return 0;
3446 case MODULE_STATE_GOING: 3445}
3446
3447static int ftrace_module_notify_exit(struct notifier_block *self,
3448 unsigned long val, void *data)
3449{
3450 struct module *mod = data;
3451
3452 if (val == MODULE_STATE_GOING)
3447 ftrace_release_mod(mod); 3453 ftrace_release_mod(mod);
3448 break;
3449 }
3450 3454
3451 return 0; 3455 return 0;
3452} 3456}
3453#else 3457#else
3454static int ftrace_module_notify(struct notifier_block *self, 3458static int ftrace_module_notify_enter(struct notifier_block *self,
3455 unsigned long val, void *data) 3459 unsigned long val, void *data)
3460{
3461 return 0;
3462}
3463static int ftrace_module_notify_exit(struct notifier_block *self,
3464 unsigned long val, void *data)
3456{ 3465{
3457 return 0; 3466 return 0;
3458} 3467}
3459#endif /* CONFIG_MODULES */ 3468#endif /* CONFIG_MODULES */
3460 3469
3461struct notifier_block ftrace_module_nb = { 3470struct notifier_block ftrace_module_enter_nb = {
3462 .notifier_call = ftrace_module_notify, 3471 .notifier_call = ftrace_module_notify_enter,
3463 .priority = 0, 3472 .priority = INT_MAX, /* Run before anything that can use kprobes */
3473};
3474
3475struct notifier_block ftrace_module_exit_nb = {
3476 .notifier_call = ftrace_module_notify_exit,
3477 .priority = INT_MIN, /* Run after anything that can remove kprobes */
3464}; 3478};
3465 3479
3466extern unsigned long __start_mcount_loc[]; 3480extern unsigned long __start_mcount_loc[];
@@ -3494,9 +3508,13 @@ void __init ftrace_init(void)
3494 __start_mcount_loc, 3508 __start_mcount_loc,
3495 __stop_mcount_loc); 3509 __stop_mcount_loc);
3496 3510
3497 ret = register_module_notifier(&ftrace_module_nb); 3511 ret = register_module_notifier(&ftrace_module_enter_nb);
3512 if (ret)
3513 pr_warning("Failed to register trace ftrace module enter notifier\n");
3514
3515 ret = register_module_notifier(&ftrace_module_exit_nb);
3498 if (ret) 3516 if (ret)
3499 pr_warning("Failed to register trace ftrace module notifier\n"); 3517 pr_warning("Failed to register trace ftrace module exit notifier\n");
3500 3518
3501 set_ftrace_early_filters(); 3519 set_ftrace_early_filters();
3502 3520
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b0c7aa40794..20dff64b521 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2926,6 +2926,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2926 * Splice the empty reader page into the list around the head. 2926 * Splice the empty reader page into the list around the head.
2927 */ 2927 */
2928 reader = rb_set_head_page(cpu_buffer); 2928 reader = rb_set_head_page(cpu_buffer);
2929 if (!reader)
2930 goto out;
2929 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 2931 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2930 cpu_buffer->reader_page->list.prev = reader->list.prev; 2932 cpu_buffer->reader_page->list.prev = reader->list.prev;
2931 2933