diff options
author | Alex Shi | 2017-05-25 23:03:27 -0500 |
---|---|---|
committer | Alex Shi | 2017-05-25 23:03:27 -0500 |
commit | be8cec38166830bd968b13389b8b22da6913f439 (patch) | |
tree | 5e68b20a71b42001ef9fbb9dcec4100089097dbe /kernel | |
parent | c8603c0171b20e28b3d22a3af0b6a04ccafb2111 (diff) | |
parent | b409ba3b053501181d47a35769fe61823da012e9 (diff) | |
download | kernel-omap-be8cec38166830bd968b13389b8b22da6913f439.tar.gz kernel-omap-be8cec38166830bd968b13389b8b22da6913f439.tar.xz kernel-omap-be8cec38166830bd968b13389b8b22da6913f439.zip |
Merge tag 'v4.4.70' into linux-linaro-lsk-v4.4
This is the 4.4.70 stable release
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 8 | ||||
-rw-r--r-- | kernel/irq/chip.c | 2 | ||||
-rw-r--r-- | kernel/kprobes.c | 2 | ||||
-rw-r--r-- | kernel/pid_namespace.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 29 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 5 |
7 files changed, 39 insertions, 11 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 278a2ddad351..0ee630f3ad4b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1590,11 +1590,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1590 | */ | 1590 | */ |
1591 | recalc_sigpending(); | 1591 | recalc_sigpending(); |
1592 | if (signal_pending(current)) { | 1592 | if (signal_pending(current)) { |
1593 | spin_unlock(¤t->sighand->siglock); | ||
1594 | write_unlock_irq(&tasklist_lock); | ||
1595 | retval = -ERESTARTNOINTR; | 1593 | retval = -ERESTARTNOINTR; |
1596 | goto bad_fork_cancel_cgroup; | 1594 | goto bad_fork_cancel_cgroup; |
1597 | } | 1595 | } |
1596 | if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) { | ||
1597 | retval = -ENOMEM; | ||
1598 | goto bad_fork_cancel_cgroup; | ||
1599 | } | ||
1598 | 1600 | ||
1599 | if (likely(p->pid)) { | 1601 | if (likely(p->pid)) { |
1600 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); | 1602 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); |
@@ -1645,6 +1647,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1645 | return p; | 1647 | return p; |
1646 | 1648 | ||
1647 | bad_fork_cancel_cgroup: | 1649 | bad_fork_cancel_cgroup: |
1650 | spin_unlock(¤t->sighand->siglock); | ||
1651 | write_unlock_irq(&tasklist_lock); | ||
1648 | cgroup_cancel_fork(p, cgrp_ss_priv); | 1652 | cgroup_cancel_fork(p, cgrp_ss_priv); |
1649 | bad_fork_free_pid: | 1653 | bad_fork_free_pid: |
1650 | threadgroup_change_end(current); | 1654 | threadgroup_change_end(current); |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 15206453b12a..e4453d9f788c 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -810,8 +810,8 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, | |||
810 | if (!desc) | 810 | if (!desc) |
811 | return; | 811 | return; |
812 | 812 | ||
813 | __irq_do_set_handler(desc, handle, 1, NULL); | ||
814 | desc->irq_common_data.handler_data = data; | 813 | desc->irq_common_data.handler_data = data; |
814 | __irq_do_set_handler(desc, handle, 1, NULL); | ||
815 | 815 | ||
816 | irq_put_desc_busunlock(desc, flags); | 816 | irq_put_desc_busunlock(desc, flags); |
817 | } | 817 | } |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index d10ab6b9b5e0..695763516908 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -563,7 +563,7 @@ static void kprobe_optimizer(struct work_struct *work) | |||
563 | } | 563 | } |
564 | 564 | ||
565 | /* Wait for completing optimization and unoptimization */ | 565 | /* Wait for completing optimization and unoptimization */ |
566 | static void wait_for_kprobe_optimizer(void) | 566 | void wait_for_kprobe_optimizer(void) |
567 | { | 567 | { |
568 | mutex_lock(&kprobe_mutex); | 568 | mutex_lock(&kprobe_mutex); |
569 | 569 | ||
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index a65ba137fd15..567ecc826bc8 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
@@ -255,7 +255,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) | |||
255 | * if reparented. | 255 | * if reparented. |
256 | */ | 256 | */ |
257 | for (;;) { | 257 | for (;;) { |
258 | set_current_state(TASK_UNINTERRUPTIBLE); | 258 | set_current_state(TASK_INTERRUPTIBLE); |
259 | if (pid_ns->nr_hashed == init_pids) | 259 | if (pid_ns->nr_hashed == init_pids) |
260 | break; | 260 | break; |
261 | schedule(); | 261 | schedule(); |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8f258f437ac2..812069b66f47 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3918,6 +3918,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) | |||
3918 | if (!cfs_bandwidth_used()) | 3918 | if (!cfs_bandwidth_used()) |
3919 | return; | 3919 | return; |
3920 | 3920 | ||
3921 | /* Synchronize hierarchical throttle counter: */ | ||
3922 | if (unlikely(!cfs_rq->throttle_uptodate)) { | ||
3923 | struct rq *rq = rq_of(cfs_rq); | ||
3924 | struct cfs_rq *pcfs_rq; | ||
3925 | struct task_group *tg; | ||
3926 | |||
3927 | cfs_rq->throttle_uptodate = 1; | ||
3928 | |||
3929 | /* Get closest up-to-date node, because leaves go first: */ | ||
3930 | for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) { | ||
3931 | pcfs_rq = tg->cfs_rq[cpu_of(rq)]; | ||
3932 | if (pcfs_rq->throttle_uptodate) | ||
3933 | break; | ||
3934 | } | ||
3935 | if (tg) { | ||
3936 | cfs_rq->throttle_count = pcfs_rq->throttle_count; | ||
3937 | cfs_rq->throttled_clock_task = rq_clock_task(rq); | ||
3938 | } | ||
3939 | } | ||
3940 | |||
3921 | /* an active group must be handled by the update_curr()->put() path */ | 3941 | /* an active group must be handled by the update_curr()->put() path */ |
3922 | if (!cfs_rq->runtime_enabled || cfs_rq->curr) | 3942 | if (!cfs_rq->runtime_enabled || cfs_rq->curr) |
3923 | return; | 3943 | return; |
@@ -4233,15 +4253,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
4233 | 4253 | ||
4234 | /* Don't dequeue parent if it has other entities besides us */ | 4254 | /* Don't dequeue parent if it has other entities besides us */ |
4235 | if (cfs_rq->load.weight) { | 4255 | if (cfs_rq->load.weight) { |
4256 | /* Avoid re-evaluating load for this entity: */ | ||
4257 | se = parent_entity(se); | ||
4236 | /* | 4258 | /* |
4237 | * Bias pick_next to pick a task from this cfs_rq, as | 4259 | * Bias pick_next to pick a task from this cfs_rq, as |
4238 | * p is sleeping when it is within its sched_slice. | 4260 | * p is sleeping when it is within its sched_slice. |
4239 | */ | 4261 | */ |
4240 | if (task_sleep && parent_entity(se)) | 4262 | if (task_sleep && se && !throttled_hierarchy(cfs_rq)) |
4241 | set_next_buddy(parent_entity(se)); | 4263 | set_next_buddy(se); |
4242 | |||
4243 | /* avoid re-evaluating load for this entity */ | ||
4244 | se = parent_entity(se); | ||
4245 | break; | 4264 | break; |
4246 | } | 4265 | } |
4247 | flags |= DEQUEUE_SLEEP; | 4266 | flags |= DEQUEUE_SLEEP; |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0517abd7dd73..4e5db65d1aab 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -417,7 +417,7 @@ struct cfs_rq { | |||
417 | 417 | ||
418 | u64 throttled_clock, throttled_clock_task; | 418 | u64 throttled_clock, throttled_clock_task; |
419 | u64 throttled_clock_task_time; | 419 | u64 throttled_clock_task_time; |
420 | int throttled, throttle_count; | 420 | int throttled, throttle_count, throttle_uptodate; |
421 | struct list_head throttled_list; | 421 | struct list_head throttled_list; |
422 | #endif /* CONFIG_CFS_BANDWIDTH */ | 422 | #endif /* CONFIG_CFS_BANDWIDTH */ |
423 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 423 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index c9956440d0e6..12ea4ea619ee 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1471,6 +1471,11 @@ static __init int kprobe_trace_self_tests_init(void) | |||
1471 | 1471 | ||
1472 | end: | 1472 | end: |
1473 | release_all_trace_kprobes(); | 1473 | release_all_trace_kprobes(); |
1474 | /* | ||
1475 | * Wait for the optimizer work to finish. Otherwise it might fiddle | ||
1476 | * with probes in already freed __init text. | ||
1477 | */ | ||
1478 | wait_for_kprobe_optimizer(); | ||
1474 | if (warn) | 1479 | if (warn) |
1475 | pr_cont("NG: Some tests are failed. Please check them.\n"); | 1480 | pr_cont("NG: Some tests are failed. Please check them.\n"); |
1476 | else | 1481 | else |