aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLCPD Auto Merger2017-08-12 17:08:59 -0500
committerLCPD Auto Merger2017-08-12 17:08:59 -0500
commit5a1f5679a626cb6a8c6e165c461797026f1f1f72 (patch)
treeb99af226cd42ae5c529d4de03787679c3bac9c28 /kernel
parent26bc2bc5280e63d54c3b69057a7db5698e8de0c3 (diff)
parent78f902ff441b17f8a3c51e0211fa69c052e9cbbe (diff)
downloadkernel-omap-5a1f5679a626cb6a8c6e165c461797026f1f1f72.tar.gz
kernel-omap-5a1f5679a626cb6a8c6e165c461797026f1f1f72.tar.xz
kernel-omap-5a1f5679a626cb6a8c6e165c461797026f1f1f72.zip
Merge branch 'ti-linux-4.4.y' of git.ti.com:ti-linux-kernel/ti-linux-kernel into ti-lsk-linux-4.4.y
TI-Feature: ti_linux_base_lsk TI-Tree: git@git.ti.com:ti-linux-kernel/ti-linux-kernel.git TI-Branch: ti-linux-4.4.y * 'ti-linux-4.4.y' of git.ti.com:ti-linux-kernel/ti-linux-kernel: (59 commits) Linux 4.4.81 workqueue: implicit ordered attribute should be overridable net: account for current skb length when deciding about UFO ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output mm: don't dereference struct page fields of invalid pages signal: protect SIGNAL_UNKILLABLE from unintentional clearing. lib/Kconfig.debug: fix frv build failure mm, slab: make sure that KMALLOC_MAX_SIZE will fit into MAX_ORDER ARM: 8632/1: ftrace: fix syscall name matching virtio_blk: fix panic in initialization error path drm/virtio: fix framebuffer sparse warning scsi: qla2xxx: Get mutex lock before checking optrom_state phy state machine: failsafe leave invalid RUNNING state x86/boot: Add missing declaration of string functions tg3: Fix race condition in tg3_get_stats64(). net: phy: dp83867: fix irq generation sh_eth: R8A7740 supports packet shecksumming wext: handle NULL extra data in iwe_stream_add_point better sparc64: Measure receiver forward progress to avoid send mondo timeout xen-netback: correctly schedule rate-limited queues ... Signed-off-by: LCPD Auto Merger <lcpd_integration@list.ti.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/workqueue.c23
2 files changed, 21 insertions, 6 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index b92a047ddc82..5d50ea899b6d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
346 * fresh group stop. Read comment in do_signal_stop() for details. 346 * fresh group stop. Read comment in do_signal_stop() for details.
347 */ 347 */
348 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { 348 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
349 sig->flags = SIGNAL_STOP_STOPPED; 349 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
350 return true; 350 return true;
351 } 351 }
352 return false; 352 return false;
@@ -845,7 +845,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
845 * will take ->siglock, notice SIGNAL_CLD_MASK, and 845 * will take ->siglock, notice SIGNAL_CLD_MASK, and
846 * notify its parent. See get_signal_to_deliver(). 846 * notify its parent. See get_signal_to_deliver().
847 */ 847 */
848 signal->flags = why | SIGNAL_STOP_CONTINUED; 848 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
849 signal->group_stop_count = 0; 849 signal->group_stop_count = 0;
850 signal->group_exit_code = 0; 850 signal->group_exit_code = 0;
851 } 851 }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2c2f971f3e75..23231237f2e2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3647,8 +3647,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3647 return -EINVAL; 3647 return -EINVAL;
3648 3648
3649 /* creating multiple pwqs breaks ordering guarantee */ 3649 /* creating multiple pwqs breaks ordering guarantee */
3650 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) 3650 if (!list_empty(&wq->pwqs)) {
3651 return -EINVAL; 3651 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3652 return -EINVAL;
3653
3654 wq->flags &= ~__WQ_ORDERED;
3655 }
3652 3656
3653 ctx = apply_wqattrs_prepare(wq, attrs); 3657 ctx = apply_wqattrs_prepare(wq, attrs);
3654 3658
@@ -3834,6 +3838,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3834 struct workqueue_struct *wq; 3838 struct workqueue_struct *wq;
3835 struct pool_workqueue *pwq; 3839 struct pool_workqueue *pwq;
3836 3840
3841 /*
3842 * Unbound && max_active == 1 used to imply ordered, which is no
3843 * longer the case on NUMA machines due to per-node pools. While
3844 * alloc_ordered_workqueue() is the right way to create an ordered
3845 * workqueue, keep the previous behavior to avoid subtle breakages
3846 * on NUMA.
3847 */
3848 if ((flags & WQ_UNBOUND) && max_active == 1)
3849 flags |= __WQ_ORDERED;
3850
3837 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 3851 /* see the comment above the definition of WQ_POWER_EFFICIENT */
3838 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 3852 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
3839 flags |= WQ_UNBOUND; 3853 flags |= WQ_UNBOUND;
@@ -4022,13 +4036,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4022 struct pool_workqueue *pwq; 4036 struct pool_workqueue *pwq;
4023 4037
4024 /* disallow meddling with max_active for ordered workqueues */ 4038 /* disallow meddling with max_active for ordered workqueues */
4025 if (WARN_ON(wq->flags & __WQ_ORDERED)) 4039 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4026 return; 4040 return;
4027 4041
4028 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4042 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4029 4043
4030 mutex_lock(&wq->mutex); 4044 mutex_lock(&wq->mutex);
4031 4045
4046 wq->flags &= ~__WQ_ORDERED;
4032 wq->saved_max_active = max_active; 4047 wq->saved_max_active = max_active;
4033 4048
4034 for_each_pwq(pwq, wq) 4049 for_each_pwq(pwq, wq)
@@ -5154,7 +5169,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
5154 * attributes breaks ordering guarantee. Disallow exposing ordered 5169 * attributes breaks ordering guarantee. Disallow exposing ordered
5155 * workqueues. 5170 * workqueues.
5156 */ 5171 */
5157 if (WARN_ON(wq->flags & __WQ_ORDERED)) 5172 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5158 return -EINVAL; 5173 return -EINVAL;
5159 5174
5160 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 5175 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);