diff options
author | Buddy Liong | 2018-03-23 17:03:38 -0500 |
---|---|---|
committer | Buddy Liong | 2018-03-23 17:04:02 -0500 |
commit | 03cce97d4f914d4b8bc071df6eedc4a0f510ffab (patch) | |
tree | e7f0c6d8aae5b4b81501c1c66e84b548bc38036f | |
parent | b43fa3b965b034a7e390989a6294b597e188fb62 (diff) | |
parent | eb496200efeb6c580f386523062b0ccc80325315 (diff) | |
download | kernel-omap-03cce97d4f914d4b8bc071df6eedc4a0f510ffab.tar.gz kernel-omap-03cce97d4f914d4b8bc071df6eedc4a0f510ffab.tar.xz kernel-omap-03cce97d4f914d4b8bc071df6eedc4a0f510ffab.zip |
Merge branch 'p-ti-android-linux-4.4.y' of git://git.omapzoom.org/kernel/omap into 6AO.1.06AO.1.0
* 'p-ti-android-linux-4.4.y' of git://git.omapzoom.org/kernel/omap: (752 commits)
ARM: dts: dra76-evm: Add wilink8 wlan support
ARM: dts: dra7-evm: move MMC4 description to common file
usb: otg: Fix crash on shutdown
ARM: DTS: DRA76-EVM: Set powerhold property for tps65917
ti_config_fragments: android_omap: enable MCAN
Linux 4.4.117
media: r820t: fix r820t_write_reg for KASAN
ARM: dts: s5pv210: add interrupt-parent for ohci
ARM: pxa/tosa-bt: add MODULE_LICENSE tag
vfs: don't do RCU lookup of empty pathnames
x86: fix build warnign with 32-bit PAE
dm: correctly handle chained bios in dec_pending()
mvpp2: fix multicast address filter
ALSA: seq: Fix racy pool initializations
ALSA: usb-audio: add implicit fb quirk for Behringer UFX1204
ALSA: hda/realtek: PCI quirk for Fujitsu U7x7
ALSA: usb-audio: Fix UAC2 get_ctl request with a RANGE attribute
ALSA: hda - Fix headset mic detection problem for two Dell machines
Btrfs: fix unexpected -EEXIST when creating new inode
Btrfs: fix crash due to not cleaning up tree log block's dirty bits
...
Signed-off-by: Buddy Liong <a0270631@ti.com>
658 files changed, 10248 insertions, 3272 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index b683e8ee69ec..ea6a043f5beb 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu | |||
@@ -271,3 +271,19 @@ Description: Parameters for the CPU cache attributes | |||
271 | - WriteBack: data is written only to the cache line and | 271 | - WriteBack: data is written only to the cache line and |
272 | the modified cache line is written to main | 272 | the modified cache line is written to main |
273 | memory only when it is replaced | 273 | memory only when it is replaced |
274 | |||
275 | What: /sys/devices/system/cpu/vulnerabilities | ||
276 | /sys/devices/system/cpu/vulnerabilities/meltdown | ||
277 | /sys/devices/system/cpu/vulnerabilities/spectre_v1 | ||
278 | /sys/devices/system/cpu/vulnerabilities/spectre_v2 | ||
279 | Date: January 2018 | ||
280 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | ||
281 | Description: Information about CPU vulnerabilities | ||
282 | |||
283 | The files are named after the code names of CPU | ||
284 | vulnerabilities. The output of those files reflects the | ||
285 | state of the CPUs in the system. Possible output values: | ||
286 | |||
287 | "Not affected" CPU is not affected by the vulnerability | ||
288 | "Vulnerable" CPU is affected and no mitigation in effect | ||
289 | "Mitigation: $M" CPU is affected and mitigation $M is in effect | ||
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index c261598164a7..17d43ca27f41 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt | |||
@@ -58,6 +58,6 @@ Example: | |||
58 | interrupts = <0 35 0x4>; | 58 | interrupts = <0 35 0x4>; |
59 | status = "disabled"; | 59 | status = "disabled"; |
60 | dmas = <&dmahost 12 0 1>, | 60 | dmas = <&dmahost 12 0 1>, |
61 | <&dmahost 13 0 1 0>; | 61 | <&dmahost 13 1 0>; |
62 | dma-names = "rx", "rx"; | 62 | dma-names = "rx", "rx"; |
63 | }; | 63 | }; |
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt index 6c0108eb0137..2139ea253142 100644 --- a/Documentation/filesystems/ext4.txt +++ b/Documentation/filesystems/ext4.txt | |||
@@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs | |||
233 | data_err=abort Abort the journal if an error occurs in a file | 233 | data_err=abort Abort the journal if an error occurs in a file |
234 | data buffer in ordered mode. | 234 | data buffer in ordered mode. |
235 | 235 | ||
236 | grpid Give objects the same group ID as their creator. | 236 | grpid New objects have the group ID of their parent. |
237 | bsdgroups | 237 | bsdgroups |
238 | 238 | ||
239 | nogrpid (*) New objects have the group ID of their creator. | 239 | nogrpid (*) New objects have the group ID of their creator. |
diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt index 9b9b454b048a..35f6a982a0d5 100644 --- a/Documentation/kdump/gdbmacros.txt +++ b/Documentation/kdump/gdbmacros.txt | |||
@@ -15,15 +15,16 @@ | |||
15 | 15 | ||
16 | define bttnobp | 16 | define bttnobp |
17 | set $tasks_off=((size_t)&((struct task_struct *)0)->tasks) | 17 | set $tasks_off=((size_t)&((struct task_struct *)0)->tasks) |
18 | set $pid_off=((size_t)&((struct task_struct *)0)->pids[1].pid_list.next) | 18 | set $pid_off=((size_t)&((struct task_struct *)0)->thread_group.next) |
19 | set $init_t=&init_task | 19 | set $init_t=&init_task |
20 | set $next_t=(((char *)($init_t->tasks).next) - $tasks_off) | 20 | set $next_t=(((char *)($init_t->tasks).next) - $tasks_off) |
21 | set var $stacksize = sizeof(union thread_union) | ||
21 | while ($next_t != $init_t) | 22 | while ($next_t != $init_t) |
22 | set $next_t=(struct task_struct *)$next_t | 23 | set $next_t=(struct task_struct *)$next_t |
23 | printf "\npid %d; comm %s:\n", $next_t.pid, $next_t.comm | 24 | printf "\npid %d; comm %s:\n", $next_t.pid, $next_t.comm |
24 | printf "===================\n" | 25 | printf "===================\n" |
25 | set var $stackp = $next_t.thread.esp | 26 | set var $stackp = $next_t.thread.sp |
26 | set var $stack_top = ($stackp & ~4095) + 4096 | 27 | set var $stack_top = ($stackp & ~($stacksize - 1)) + $stacksize |
27 | 28 | ||
28 | while ($stackp < $stack_top) | 29 | while ($stackp < $stack_top) |
29 | if (*($stackp) > _stext && *($stackp) < _sinittext) | 30 | if (*($stackp) > _stext && *($stackp) < _sinittext) |
@@ -31,13 +32,13 @@ define bttnobp | |||
31 | end | 32 | end |
32 | set $stackp += 4 | 33 | set $stackp += 4 |
33 | end | 34 | end |
34 | set $next_th=(((char *)$next_t->pids[1].pid_list.next) - $pid_off) | 35 | set $next_th=(((char *)$next_t->thread_group.next) - $pid_off) |
35 | while ($next_th != $next_t) | 36 | while ($next_th != $next_t) |
36 | set $next_th=(struct task_struct *)$next_th | 37 | set $next_th=(struct task_struct *)$next_th |
37 | printf "\npid %d; comm %s:\n", $next_t.pid, $next_t.comm | 38 | printf "\npid %d; comm %s:\n", $next_t.pid, $next_t.comm |
38 | printf "===================\n" | 39 | printf "===================\n" |
39 | set var $stackp = $next_t.thread.esp | 40 | set var $stackp = $next_t.thread.sp |
40 | set var $stack_top = ($stackp & ~4095) + 4096 | 41 | set var $stack_top = ($stackp & ~($stacksize - 1)) + stacksize |
41 | 42 | ||
42 | while ($stackp < $stack_top) | 43 | while ($stackp < $stack_top) |
43 | if (*($stackp) > _stext && *($stackp) < _sinittext) | 44 | if (*($stackp) > _stext && *($stackp) < _sinittext) |
@@ -45,7 +46,7 @@ define bttnobp | |||
45 | end | 46 | end |
46 | set $stackp += 4 | 47 | set $stackp += 4 |
47 | end | 48 | end |
48 | set $next_th=(((char *)$next_th->pids[1].pid_list.next) - $pid_off) | 49 | set $next_th=(((char *)$next_th->thread_group.next) - $pid_off) |
49 | end | 50 | end |
50 | set $next_t=(char *)($next_t->tasks.next) - $tasks_off | 51 | set $next_t=(char *)($next_t->tasks.next) - $tasks_off |
51 | end | 52 | end |
@@ -54,42 +55,44 @@ document bttnobp | |||
54 | dump all thread stack traces on a kernel compiled with !CONFIG_FRAME_POINTER | 55 | dump all thread stack traces on a kernel compiled with !CONFIG_FRAME_POINTER |
55 | end | 56 | end |
56 | 57 | ||
58 | define btthreadstack | ||
59 | set var $pid_task = $arg0 | ||
60 | |||
61 | printf "\npid %d; comm %s:\n", $pid_task.pid, $pid_task.comm | ||
62 | printf "task struct: " | ||
63 | print $pid_task | ||
64 | printf "===================\n" | ||
65 | set var $stackp = $pid_task.thread.sp | ||
66 | set var $stacksize = sizeof(union thread_union) | ||
67 | set var $stack_top = ($stackp & ~($stacksize - 1)) + $stacksize | ||
68 | set var $stack_bot = ($stackp & ~($stacksize - 1)) | ||
69 | |||
70 | set $stackp = *((unsigned long *) $stackp) | ||
71 | while (($stackp < $stack_top) && ($stackp > $stack_bot)) | ||
72 | set var $addr = *(((unsigned long *) $stackp) + 1) | ||
73 | info symbol $addr | ||
74 | set $stackp = *((unsigned long *) $stackp) | ||
75 | end | ||
76 | end | ||
77 | document btthreadstack | ||
78 | dump a thread stack using the given task structure pointer | ||
79 | end | ||
80 | |||
81 | |||
57 | define btt | 82 | define btt |
58 | set $tasks_off=((size_t)&((struct task_struct *)0)->tasks) | 83 | set $tasks_off=((size_t)&((struct task_struct *)0)->tasks) |
59 | set $pid_off=((size_t)&((struct task_struct *)0)->pids[1].pid_list.next) | 84 | set $pid_off=((size_t)&((struct task_struct *)0)->thread_group.next) |
60 | set $init_t=&init_task | 85 | set $init_t=&init_task |
61 | set $next_t=(((char *)($init_t->tasks).next) - $tasks_off) | 86 | set $next_t=(((char *)($init_t->tasks).next) - $tasks_off) |
62 | while ($next_t != $init_t) | 87 | while ($next_t != $init_t) |
63 | set $next_t=(struct task_struct *)$next_t | 88 | set $next_t=(struct task_struct *)$next_t |
64 | printf "\npid %d; comm %s:\n", $next_t.pid, $next_t.comm | 89 | btthreadstack $next_t |
65 | printf "===================\n" | ||
66 | set var $stackp = $next_t.thread.esp | ||
67 | set var $stack_top = ($stackp & ~4095) + 4096 | ||
68 | set var $stack_bot = ($stackp & ~4095) | ||
69 | |||
70 | set $stackp = *($stackp) | ||
71 | while (($stackp < $stack_top) && ($stackp > $stack_bot)) | ||
72 | set var $addr = *($stackp + 4) | ||
73 | info symbol $addr | ||
74 | set $stackp = *($stackp) | ||
75 | end | ||
76 | 90 | ||
77 | set $next_th=(((char *)$next_t->pids[1].pid_list.next) - $pid_off) | 91 | set $next_th=(((char *)$next_t->thread_group.next) - $pid_off) |
78 | while ($next_th != $next_t) | 92 | while ($next_th != $next_t) |
79 | set $next_th=(struct task_struct *)$next_th | 93 | set $next_th=(struct task_struct *)$next_th |
80 | printf "\npid %d; comm %s:\n", $next_t.pid, $next_t.comm | 94 | btthreadstack $next_th |
81 | printf "===================\n" | 95 | set $next_th=(((char *)$next_th->thread_group.next) - $pid_off) |
82 | set var $stackp = $next_t.thread.esp | ||
83 | set var $stack_top = ($stackp & ~4095) + 4096 | ||
84 | set var $stack_bot = ($stackp & ~4095) | ||
85 | |||
86 | set $stackp = *($stackp) | ||
87 | while (($stackp < $stack_top) && ($stackp > $stack_bot)) | ||
88 | set var $addr = *($stackp + 4) | ||
89 | info symbol $addr | ||
90 | set $stackp = *($stackp) | ||
91 | end | ||
92 | set $next_th=(((char *)$next_th->pids[1].pid_list.next) - $pid_off) | ||
93 | end | 96 | end |
94 | set $next_t=(char *)($next_t->tasks.next) - $tasks_off | 97 | set $next_t=(char *)($next_t->tasks.next) - $tasks_off |
95 | end | 98 | end |
@@ -101,7 +104,7 @@ end | |||
101 | define btpid | 104 | define btpid |
102 | set var $pid = $arg0 | 105 | set var $pid = $arg0 |
103 | set $tasks_off=((size_t)&((struct task_struct *)0)->tasks) | 106 | set $tasks_off=((size_t)&((struct task_struct *)0)->tasks) |
104 | set $pid_off=((size_t)&((struct task_struct *)0)->pids[1].pid_list.next) | 107 | set $pid_off=((size_t)&((struct task_struct *)0)->thread_group.next) |
105 | set $init_t=&init_task | 108 | set $init_t=&init_task |
106 | set $next_t=(((char *)($init_t->tasks).next) - $tasks_off) | 109 | set $next_t=(((char *)($init_t->tasks).next) - $tasks_off) |
107 | set var $pid_task = 0 | 110 | set var $pid_task = 0 |
@@ -113,29 +116,18 @@ define btpid | |||
113 | set $pid_task = $next_t | 116 | set $pid_task = $next_t |
114 | end | 117 | end |
115 | 118 | ||
116 | set $next_th=(((char *)$next_t->pids[1].pid_list.next) - $pid_off) | 119 | set $next_th=(((char *)$next_t->thread_group.next) - $pid_off) |
117 | while ($next_th != $next_t) | 120 | while ($next_th != $next_t) |
118 | set $next_th=(struct task_struct *)$next_th | 121 | set $next_th=(struct task_struct *)$next_th |
119 | if ($next_th.pid == $pid) | 122 | if ($next_th.pid == $pid) |
120 | set $pid_task = $next_th | 123 | set $pid_task = $next_th |
121 | end | 124 | end |
122 | set $next_th=(((char *)$next_th->pids[1].pid_list.next) - $pid_off) | 125 | set $next_th=(((char *)$next_th->thread_group.next) - $pid_off) |
123 | end | 126 | end |
124 | set $next_t=(char *)($next_t->tasks.next) - $tasks_off | 127 | set $next_t=(char *)($next_t->tasks.next) - $tasks_off |
125 | end | 128 | end |
126 | 129 | ||
127 | printf "\npid %d; comm %s:\n", $pid_task.pid, $pid_task.comm | 130 | btthreadstack $pid_task |
128 | printf "===================\n" | ||
129 | set var $stackp = $pid_task.thread.esp | ||
130 | set var $stack_top = ($stackp & ~4095) + 4096 | ||
131 | set var $stack_bot = ($stackp & ~4095) | ||
132 | |||
133 | set $stackp = *($stackp) | ||
134 | while (($stackp < $stack_top) && ($stackp > $stack_bot)) | ||
135 | set var $addr = *($stackp + 4) | ||
136 | info symbol $addr | ||
137 | set $stackp = *($stackp) | ||
138 | end | ||
139 | end | 131 | end |
140 | document btpid | 132 | document btpid |
141 | backtrace of pid | 133 | backtrace of pid |
@@ -145,7 +137,7 @@ end | |||
145 | define trapinfo | 137 | define trapinfo |
146 | set var $pid = $arg0 | 138 | set var $pid = $arg0 |
147 | set $tasks_off=((size_t)&((struct task_struct *)0)->tasks) | 139 | set $tasks_off=((size_t)&((struct task_struct *)0)->tasks) |
148 | set $pid_off=((size_t)&((struct task_struct *)0)->pids[1].pid_list.next) | 140 | set $pid_off=((size_t)&((struct task_struct *)0)->thread_group.next) |
149 | set $init_t=&init_task | 141 | set $init_t=&init_task |
150 | set $next_t=(((char *)($init_t->tasks).next) - $tasks_off) | 142 | set $next_t=(((char *)($init_t->tasks).next) - $tasks_off) |
151 | set var $pid_task = 0 | 143 | set var $pid_task = 0 |
@@ -157,13 +149,13 @@ define trapinfo | |||
157 | set $pid_task = $next_t | 149 | set $pid_task = $next_t |
158 | end | 150 | end |
159 | 151 | ||
160 | set $next_th=(((char *)$next_t->pids[1].pid_list.next) - $pid_off) | 152 | set $next_th=(((char *)$next_t->thread_group.next) - $pid_off) |
161 | while ($next_th != $next_t) | 153 | while ($next_th != $next_t) |
162 | set $next_th=(struct task_struct *)$next_th | 154 | set $next_th=(struct task_struct *)$next_th |
163 | if ($next_th.pid == $pid) | 155 | if ($next_th.pid == $pid) |
164 | set $pid_task = $next_th | 156 | set $pid_task = $next_th |
165 | end | 157 | end |
166 | set $next_th=(((char *)$next_th->pids[1].pid_list.next) - $pid_off) | 158 | set $next_th=(((char *)$next_th->thread_group.next) - $pid_off) |
167 | end | 159 | end |
168 | set $next_t=(char *)($next_t->tasks.next) - $tasks_off | 160 | set $next_t=(char *)($next_t->tasks.next) - $tasks_off |
169 | end | 161 | end |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 21321b9acfb0..9fee3a90deaa 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2458,6 +2458,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2458 | 2458 | ||
2459 | nohugeiomap [KNL,x86] Disable kernel huge I/O mappings. | 2459 | nohugeiomap [KNL,x86] Disable kernel huge I/O mappings. |
2460 | 2460 | ||
2461 | nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 | ||
2462 | (indirect branch prediction) vulnerability. System may | ||
2463 | allow data leaks with this option, which is equivalent | ||
2464 | to spectre_v2=off. | ||
2465 | |||
2461 | noxsave [BUGS=X86] Disables x86 extended register state save | 2466 | noxsave [BUGS=X86] Disables x86 extended register state save |
2462 | and restore using xsave. The kernel will fallback to | 2467 | and restore using xsave. The kernel will fallback to |
2463 | enabling legacy floating-point and sse state. | 2468 | enabling legacy floating-point and sse state. |
@@ -2525,6 +2530,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2525 | 2530 | ||
2526 | nointroute [IA-64] | 2531 | nointroute [IA-64] |
2527 | 2532 | ||
2533 | noinvpcid [X86] Disable the INVPCID cpu feature. | ||
2534 | |||
2528 | nojitter [IA-64] Disables jitter checking for ITC timers. | 2535 | nojitter [IA-64] Disables jitter checking for ITC timers. |
2529 | 2536 | ||
2530 | no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver | 2537 | no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver |
@@ -2559,6 +2566,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2559 | nopat [X86] Disable PAT (page attribute table extension of | 2566 | nopat [X86] Disable PAT (page attribute table extension of |
2560 | pagetables) support. | 2567 | pagetables) support. |
2561 | 2568 | ||
2569 | nopcid [X86-64] Disable the PCID cpu feature. | ||
2570 | |||
2562 | norandmaps Don't use address space randomization. Equivalent to | 2571 | norandmaps Don't use address space randomization. Equivalent to |
2563 | echo 0 > /proc/sys/kernel/randomize_va_space | 2572 | echo 0 > /proc/sys/kernel/randomize_va_space |
2564 | 2573 | ||
@@ -3056,6 +3065,21 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3056 | pt. [PARIDE] | 3065 | pt. [PARIDE] |
3057 | See Documentation/blockdev/paride.txt. | 3066 | See Documentation/blockdev/paride.txt. |
3058 | 3067 | ||
3068 | pti= [X86_64] Control Page Table Isolation of user and | ||
3069 | kernel address spaces. Disabling this feature | ||
3070 | removes hardening, but improves performance of | ||
3071 | system calls and interrupts. | ||
3072 | |||
3073 | on - unconditionally enable | ||
3074 | off - unconditionally disable | ||
3075 | auto - kernel detects whether your CPU model is | ||
3076 | vulnerable to issues that PTI mitigates | ||
3077 | |||
3078 | Not specifying this option is equivalent to pti=auto. | ||
3079 | |||
3080 | nopti [X86_64] | ||
3081 | Equivalent to pti=off | ||
3082 | |||
3059 | pty.legacy_count= | 3083 | pty.legacy_count= |
3060 | [KNL] Number of legacy pty's. Overwrites compiled-in | 3084 | [KNL] Number of legacy pty's. Overwrites compiled-in |
3061 | default number. | 3085 | default number. |
@@ -3585,6 +3609,29 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3585 | sonypi.*= [HW] Sony Programmable I/O Control Device driver | 3609 | sonypi.*= [HW] Sony Programmable I/O Control Device driver |
3586 | See Documentation/laptops/sonypi.txt | 3610 | See Documentation/laptops/sonypi.txt |
3587 | 3611 | ||
3612 | spectre_v2= [X86] Control mitigation of Spectre variant 2 | ||
3613 | (indirect branch speculation) vulnerability. | ||
3614 | |||
3615 | on - unconditionally enable | ||
3616 | off - unconditionally disable | ||
3617 | auto - kernel detects whether your CPU model is | ||
3618 | vulnerable | ||
3619 | |||
3620 | Selecting 'on' will, and 'auto' may, choose a | ||
3621 | mitigation method at run time according to the | ||
3622 | CPU, the available microcode, the setting of the | ||
3623 | CONFIG_RETPOLINE configuration option, and the | ||
3624 | compiler with which the kernel was built. | ||
3625 | |||
3626 | Specific mitigations can also be selected manually: | ||
3627 | |||
3628 | retpoline - replace indirect branches | ||
3629 | retpoline,generic - google's original retpoline | ||
3630 | retpoline,amd - AMD-specific minimal thunk | ||
3631 | |||
3632 | Not specifying this option is equivalent to | ||
3633 | spectre_v2=auto. | ||
3634 | |||
3588 | spia_io_base= [HW,MTD] | 3635 | spia_io_base= [HW,MTD] |
3589 | spia_fio_base= | 3636 | spia_fio_base= |
3590 | spia_pedr= | 3637 | spia_pedr= |
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt new file mode 100644 index 000000000000..5cd58439ad2d --- /dev/null +++ b/Documentation/x86/pti.txt | |||
@@ -0,0 +1,186 @@ | |||
1 | Overview | ||
2 | ======== | ||
3 | |||
4 | Page Table Isolation (pti, previously known as KAISER[1]) is a | ||
5 | countermeasure against attacks on the shared user/kernel address | ||
6 | space such as the "Meltdown" approach[2]. | ||
7 | |||
8 | To mitigate this class of attacks, we create an independent set of | ||
9 | page tables for use only when running userspace applications. When | ||
10 | the kernel is entered via syscalls, interrupts or exceptions, the | ||
11 | page tables are switched to the full "kernel" copy. When the system | ||
12 | switches back to user mode, the user copy is used again. | ||
13 | |||
14 | The userspace page tables contain only a minimal amount of kernel | ||
15 | data: only what is needed to enter/exit the kernel such as the | ||
16 | entry/exit functions themselves and the interrupt descriptor table | ||
17 | (IDT). There are a few strictly unnecessary things that get mapped | ||
18 | such as the first C function when entering an interrupt (see | ||
19 | comments in pti.c). | ||
20 | |||
21 | This approach helps to ensure that side-channel attacks leveraging | ||
22 | the paging structures do not function when PTI is enabled. It can be | ||
23 | enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time. | ||
24 | Once enabled at compile-time, it can be disabled at boot with the | ||
25 | 'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt). | ||
26 | |||
27 | Page Table Management | ||
28 | ===================== | ||
29 | |||
30 | When PTI is enabled, the kernel manages two sets of page tables. | ||
31 | The first set is very similar to the single set which is present in | ||
32 | kernels without PTI. This includes a complete mapping of userspace | ||
33 | that the kernel can use for things like copy_to_user(). | ||
34 | |||
35 | Although _complete_, the user portion of the kernel page tables is | ||
36 | crippled by setting the NX bit in the top level. This ensures | ||
37 | that any missed kernel->user CR3 switch will immediately crash | ||
38 | userspace upon executing its first instruction. | ||
39 | |||
40 | The userspace page tables map only the kernel data needed to enter | ||
41 | and exit the kernel. This data is entirely contained in the 'struct | ||
42 | cpu_entry_area' structure which is placed in the fixmap which gives | ||
43 | each CPU's copy of the area a compile-time-fixed virtual address. | ||
44 | |||
45 | For new userspace mappings, the kernel makes the entries in its | ||
46 | page tables like normal. The only difference is when the kernel | ||
47 | makes entries in the top (PGD) level. In addition to setting the | ||
48 | entry in the main kernel PGD, a copy of the entry is made in the | ||
49 | userspace page tables' PGD. | ||
50 | |||
51 | This sharing at the PGD level also inherently shares all the lower | ||
52 | layers of the page tables. This leaves a single, shared set of | ||
53 | userspace page tables to manage. One PTE to lock, one set of | ||
54 | accessed bits, dirty bits, etc... | ||
55 | |||
56 | Overhead | ||
57 | ======== | ||
58 | |||
59 | Protection against side-channel attacks is important. But, | ||
60 | this protection comes at a cost: | ||
61 | |||
62 | 1. Increased Memory Use | ||
63 | a. Each process now needs an order-1 PGD instead of order-0. | ||
64 | (Consumes an additional 4k per process). | ||
65 | b. The 'cpu_entry_area' structure must be 2MB in size and 2MB | ||
66 | aligned so that it can be mapped by setting a single PMD | ||
67 | entry. This consumes nearly 2MB of RAM once the kernel | ||
68 | is decompressed, but no space in the kernel image itself. | ||
69 | |||
70 | 2. Runtime Cost | ||
71 | a. CR3 manipulation to switch between the page table copies | ||
72 | must be done at interrupt, syscall, and exception entry | ||
73 | and exit (it can be skipped when the kernel is interrupted, | ||
74 | though.) Moves to CR3 are on the order of a hundred | ||
75 | cycles, and are required at every entry and exit. | ||
76 | b. A "trampoline" must be used for SYSCALL entry. This | ||
77 | trampoline depends on a smaller set of resources than the | ||
78 | non-PTI SYSCALL entry code, so requires mapping fewer | ||
79 | things into the userspace page tables. The downside is | ||
80 | that stacks must be switched at entry time. | ||
81 | c. Global pages are disabled for all kernel structures not | ||
82 | mapped into both kernel and userspace page tables. This | ||
83 | feature of the MMU allows different processes to share TLB | ||
84 | entries mapping the kernel. Losing the feature means more | ||
85 | TLB misses after a context switch. The actual loss of | ||
86 | performance is very small, however, never exceeding 1%. | ||
87 | d. Process Context IDentifiers (PCID) is a CPU feature that | ||
88 | allows us to skip flushing the entire TLB when switching page | ||
89 | tables by setting a special bit in CR3 when the page tables | ||
90 | are changed. This makes switching the page tables (at context | ||
91 | switch, or kernel entry/exit) cheaper. But, on systems with | ||
92 | PCID support, the context switch code must flush both the user | ||
93 | and kernel entries out of the TLB. The user PCID TLB flush is | ||
94 | deferred until the exit to userspace, minimizing the cost. | ||
95 | See intel.com/sdm for the gory PCID/INVPCID details. | ||
96 | e. The userspace page tables must be populated for each new | ||
97 | process. Even without PTI, the shared kernel mappings | ||
98 | are created by copying top-level (PGD) entries into each | ||
99 | new process. But, with PTI, there are now *two* kernel | ||
100 | mappings: one in the kernel page tables that maps everything | ||
101 | and one for the entry/exit structures. At fork(), we need to | ||
102 | copy both. | ||
103 | f. In addition to the fork()-time copying, there must also | ||
104 | be an update to the userspace PGD any time a set_pgd() is done | ||
105 | on a PGD used to map userspace. This ensures that the kernel | ||
106 | and userspace copies always map the same userspace | ||
107 | memory. | ||
108 | g. On systems without PCID support, each CR3 write flushes | ||
109 | the entire TLB. That means that each syscall, interrupt | ||
110 | or exception flushes the TLB. | ||
111 | h. INVPCID is a TLB-flushing instruction which allows flushing | ||
112 | of TLB entries for non-current PCIDs. Some systems support | ||
113 | PCIDs, but do not support INVPCID. On these systems, addresses | ||
114 | can only be flushed from the TLB for the current PCID. When | ||
115 | flushing a kernel address, we need to flush all PCIDs, so a | ||
116 | single kernel address flush will require a TLB-flushing CR3 | ||
117 | write upon the next use of every PCID. | ||
118 | |||
119 | Possible Future Work | ||
120 | ==================== | ||
121 | 1. We can be more careful about not actually writing to CR3 | ||
122 | unless its value is actually changed. | ||
123 | 2. Allow PTI to be enabled/disabled at runtime in addition to the | ||
124 | boot-time switching. | ||
125 | |||
126 | Testing | ||
127 | ======== | ||
128 | |||
129 | To test stability of PTI, the following test procedure is recommended, | ||
130 | ideally doing all of these in parallel: | ||
131 | |||
132 | 1. Set CONFIG_DEBUG_ENTRY=y | ||
133 | 2. Run several copies of all of the tools/testing/selftests/x86/ tests | ||
134 | (excluding MPX and protection_keys) in a loop on multiple CPUs for | ||
135 | several minutes. These tests frequently uncover corner cases in the | ||
136 | kernel entry code. In general, old kernels might cause these tests | ||
137 | themselves to crash, but they should never crash the kernel. | ||
138 | 3. Run the 'perf' tool in a mode (top or record) that generates many | ||
139 | frequent performance monitoring non-maskable interrupts (see "NMI" | ||
140 | in /proc/interrupts). This exercises the NMI entry/exit code which | ||
141 | is known to trigger bugs in code paths that did not expect to be | ||
142 | interrupted, including nested NMIs. Using "-c" boosts the rate of | ||
143 | NMIs, and using two -c with separate counters encourages nested NMIs | ||
144 | and less deterministic behavior. | ||
145 | |||
146 | while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done | ||
147 | |||
148 | 4. Launch a KVM virtual machine. | ||
149 | 5. Run 32-bit binaries on systems supporting the SYSCALL instruction. | ||
150 | This has been a lightly-tested code path and needs extra scrutiny. | ||
151 | |||
152 | Debugging | ||
153 | ========= | ||
154 | |||
155 | Bugs in PTI cause a few different signatures of crashes | ||
156 | that are worth noting here. | ||
157 | |||
158 | * Failures of the selftests/x86 code. Usually a bug in one of the | ||
159 | more obscure corners of entry_64.S | ||
160 | * Crashes in early boot, especially around CPU bringup. Bugs | ||
161 | in the trampoline code or mappings cause these. | ||
162 | * Crashes at the first interrupt. Caused by bugs in entry_64.S, | ||
163 | like screwing up a page table switch. Also caused by | ||
164 | incorrectly mapping the IRQ handler entry code. | ||
165 | * Crashes at the first NMI. The NMI code is separate from main | ||
166 | interrupt handlers and can have bugs that do not affect | ||
167 | normal interrupts. Also caused by incorrectly mapping NMI | ||
168 | code. NMIs that interrupt the entry code must be very | ||
169 | careful and can be the cause of crashes that show up when | ||
170 | running perf. | ||
171 | * Kernel crashes at the first exit to userspace. entry_64.S | ||
172 | bugs, or failing to map some of the exit code. | ||
173 | * Crashes at first interrupt that interrupts userspace. The paths | ||
174 | in entry_64.S that return to userspace are sometimes separate | ||
175 | from the ones that return to the kernel. | ||
176 | * Double faults: overflowing the kernel stack because of page | ||
177 | faults upon page faults. Caused by touching non-pti-mapped | ||
178 | data in the entry code, or forgetting to switch to kernel | ||
179 | CR3 before calling into C functions which are not pti-mapped. | ||
180 | * Userspace segfaults early in boot, sometimes manifesting | ||
181 | as mount(8) failing to mount the rootfs. These have | ||
182 | tended to be TLB invalidation issues. Usually invalidating | ||
183 | the wrong PCID, or otherwise missing an invalidation. | ||
184 | |||
185 | 1. https://gruss.cc/files/kaiser.pdf | ||
186 | 2. https://meltdownattack.com/meltdown.pdf | ||
@@ -1,6 +1,6 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 4 | 2 | PATCHLEVEL = 4 |
3 | SUBLEVEL = 107 | 3 | SUBLEVEL = 117 |
4 | EXTRAVERSION = | 4 | EXTRAVERSION = |
5 | NAME = Blurry Fish Butt | 5 | NAME = Blurry Fish Butt |
6 | 6 | ||
@@ -782,6 +782,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) | |||
782 | # disable invalid "can't wrap" optimizations for signed / pointers | 782 | # disable invalid "can't wrap" optimizations for signed / pointers |
783 | KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) | 783 | KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) |
784 | 784 | ||
785 | # Make sure -fstack-check isn't enabled (like gentoo apparently did) | ||
786 | KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) | ||
787 | |||
785 | # conserve stack if available | 788 | # conserve stack if available |
786 | KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) | 789 | KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) |
787 | 790 | ||
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h index 4c51c05333c6..4cafffa80e2c 100644 --- a/arch/alpha/include/asm/mmu_context.h +++ b/arch/alpha/include/asm/mmu_context.h | |||
@@ -7,6 +7,7 @@ | |||
7 | * Copyright (C) 1996, Linus Torvalds | 7 | * Copyright (C) 1996, Linus Torvalds |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/sched.h> | ||
10 | #include <asm/machvec.h> | 11 | #include <asm/machvec.h> |
11 | #include <asm/compiler.h> | 12 | #include <asm/compiler.h> |
12 | #include <asm-generic/mm_hooks.h> | 13 | #include <asm-generic/mm_hooks.h> |
diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h index 2b0ac429f5eb..412bb3c24f36 100644 --- a/arch/alpha/kernel/pci_impl.h +++ b/arch/alpha/kernel/pci_impl.h | |||
@@ -143,7 +143,8 @@ struct pci_iommu_arena | |||
143 | }; | 143 | }; |
144 | 144 | ||
145 | #if defined(CONFIG_ALPHA_SRM) && \ | 145 | #if defined(CONFIG_ALPHA_SRM) && \ |
146 | (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA)) | 146 | (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \ |
147 | defined(CONFIG_ALPHA_AVANTI)) | ||
147 | # define NEED_SRM_SAVE_RESTORE | 148 | # define NEED_SRM_SAVE_RESTORE |
148 | #else | 149 | #else |
149 | # undef NEED_SRM_SAVE_RESTORE | 150 | # undef NEED_SRM_SAVE_RESTORE |
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 84d13263ce46..8095fb2c5c94 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -273,12 +273,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp, | |||
273 | application calling fork. */ | 273 | application calling fork. */ |
274 | if (clone_flags & CLONE_SETTLS) | 274 | if (clone_flags & CLONE_SETTLS) |
275 | childti->pcb.unique = regs->r20; | 275 | childti->pcb.unique = regs->r20; |
276 | else | ||
277 | regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */ | ||
276 | childti->pcb.usp = usp ?: rdusp(); | 278 | childti->pcb.usp = usp ?: rdusp(); |
277 | *childregs = *regs; | 279 | *childregs = *regs; |
278 | childregs->r0 = 0; | 280 | childregs->r0 = 0; |
279 | childregs->r19 = 0; | 281 | childregs->r19 = 0; |
280 | childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ | 282 | childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ |
281 | regs->r20 = 0; | ||
282 | stack = ((struct switch_stack *) regs) - 1; | 283 | stack = ((struct switch_stack *) regs) - 1; |
283 | *childstack = *stack; | 284 | *childstack = *stack; |
284 | childstack->r26 = (unsigned long) ret_from_fork; | 285 | childstack->r26 = (unsigned long) ret_from_fork; |
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index d4d8df706efa..57387b567f34 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h | |||
@@ -673,6 +673,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) | |||
673 | return 0; | 673 | return 0; |
674 | 674 | ||
675 | __asm__ __volatile__( | 675 | __asm__ __volatile__( |
676 | " mov lp_count, %5 \n" | ||
676 | " lp 3f \n" | 677 | " lp 3f \n" |
677 | "1: ldb.ab %3, [%2, 1] \n" | 678 | "1: ldb.ab %3, [%2, 1] \n" |
678 | " breq.d %3, 0, 3f \n" | 679 | " breq.d %3, 0, 3f \n" |
@@ -689,8 +690,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) | |||
689 | " .word 1b, 4b \n" | 690 | " .word 1b, 4b \n" |
690 | " .previous \n" | 691 | " .previous \n" |
691 | : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) | 692 | : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) |
692 | : "g"(-EFAULT), "l"(count) | 693 | : "g"(-EFAULT), "r"(count) |
693 | : "memory"); | 694 | : "lp_count", "lp_start", "lp_end", "memory"); |
694 | 695 | ||
695 | return res; | 696 | return res; |
696 | } | 697 | } |
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 155255ed8e2a..84aec2d729cf 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile | |||
@@ -528,7 +528,8 @@ dtb-$(CONFIG_SOC_DRA7XX) += \ | |||
528 | dra7-evm-late-attach.dtb \ | 528 | dra7-evm-late-attach.dtb \ |
529 | dra72-evm-late-attach.dtb \ | 529 | dra72-evm-late-attach.dtb \ |
530 | dra71-evm-late-attach.dtb \ | 530 | dra71-evm-late-attach.dtb \ |
531 | dra76-evm-late-attach.dtb | 531 | dra76-evm-late-attach.dtb \ |
532 | dra76-evm-tfp410.dtb | ||
532 | dtb-$(CONFIG_ARCH_ORION5X) += \ | 533 | dtb-$(CONFIG_ARCH_ORION5X) += \ |
533 | orion5x-lacie-d2-network.dtb \ | 534 | orion5x-lacie-d2-network.dtb \ |
534 | orion5x-lacie-ethernet-disk-mini-v2.dtb \ | 535 | orion5x-lacie-ethernet-disk-mini-v2.dtb \ |
diff --git a/arch/arm/boot/dts/dra7-evm-common.dtsi b/arch/arm/boot/dts/dra7-evm-common.dtsi index 1f9008d8e14e..c1bee515079f 100644 --- a/arch/arm/boot/dts/dra7-evm-common.dtsi +++ b/arch/arm/boot/dts/dra7-evm-common.dtsi | |||
@@ -292,3 +292,19 @@ | |||
292 | &pcie1_rc { | 292 | &pcie1_rc { |
293 | status = "okay"; | 293 | status = "okay"; |
294 | }; | 294 | }; |
295 | |||
296 | &mmc4 { | ||
297 | bus-width = <4>; | ||
298 | cap-power-off-card; | ||
299 | keep-power-in-suspend; | ||
300 | ti,non-removable; | ||
301 | |||
302 | #address-cells = <1>; | ||
303 | #size-cells = <0>; | ||
304 | wlcore: wlcore@2 { | ||
305 | compatible = "ti,wl1835"; | ||
306 | reg = <2>; | ||
307 | interrupt-parent = <&gpio5>; | ||
308 | interrupts = <7 IRQ_TYPE_EDGE_RISING>; | ||
309 | }; | ||
310 | }; | ||
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 3f27909b7b46..25070a073a94 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts | |||
@@ -907,10 +907,6 @@ i2c_p3_exp: &i2c2 { | |||
907 | &mmc4 { | 907 | &mmc4 { |
908 | status = "okay"; | 908 | status = "okay"; |
909 | vmmc-supply = <&vmmcwl_fixed>; | 909 | vmmc-supply = <&vmmcwl_fixed>; |
910 | bus-width = <4>; | ||
911 | cap-power-off-card; | ||
912 | keep-power-in-suspend; | ||
913 | ti,non-removable; | ||
914 | 910 | ||
915 | pinctrl-names = "default-rev11", "default", "hs-rev11", "hs", "sdr12-rev11", "sdr12", "sdr25-rev11", "sdr25"; | 911 | pinctrl-names = "default-rev11", "default", "hs-rev11", "hs", "sdr12-rev11", "sdr12", "sdr25-rev11", "sdr25"; |
916 | pinctrl-0 = <&mmc4_pins_default &mmc4_iodelay_ds_rev11_conf>; | 912 | pinctrl-0 = <&mmc4_pins_default &mmc4_iodelay_ds_rev11_conf>; |
@@ -921,15 +917,6 @@ i2c_p3_exp: &i2c2 { | |||
921 | pinctrl-5 = <&mmc4_pins_sdr12 &mmc4_iodelay_sdr12_hs_sdr25_rev20_conf>; | 917 | pinctrl-5 = <&mmc4_pins_sdr12 &mmc4_iodelay_sdr12_hs_sdr25_rev20_conf>; |
922 | pinctrl-6 = <&mmc4_pins_sdr25 &mmc4_iodelay_sdr12_hs_sdr25_rev11_conf>; | 918 | pinctrl-6 = <&mmc4_pins_sdr25 &mmc4_iodelay_sdr12_hs_sdr25_rev11_conf>; |
923 | pinctrl-7 = <&mmc4_pins_sdr25 &mmc4_iodelay_sdr12_hs_sdr25_rev20_conf>; | 919 | pinctrl-7 = <&mmc4_pins_sdr25 &mmc4_iodelay_sdr12_hs_sdr25_rev20_conf>; |
924 | |||
925 | #address-cells = <1>; | ||
926 | #size-cells = <0>; | ||
927 | wlcore: wlcore@2 { | ||
928 | compatible = "ti,wl1835"; | ||
929 | reg = <2>; | ||
930 | interrupt-parent = <&gpio5>; | ||
931 | interrupts = <7 IRQ_TYPE_LEVEL_HIGH>; | ||
932 | }; | ||
933 | }; | 920 | }; |
934 | 921 | ||
935 | &oppdm_mpu { | 922 | &oppdm_mpu { |
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 4fa2eedc71f4..9f49f96e6f93 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
@@ -308,6 +308,7 @@ | |||
308 | device_type = "pci"; | 308 | device_type = "pci"; |
309 | ranges = <0x81000000 0 0 0x03000 0 0x00010000 | 309 | ranges = <0x81000000 0 0 0x03000 0 0x00010000 |
310 | 0x82000000 0 0x20013000 0x13000 0 0xffed000>; | 310 | 0x82000000 0 0x20013000 0x13000 0 0xffed000>; |
311 | bus-range = <0x00 0xff>; | ||
311 | #interrupt-cells = <1>; | 312 | #interrupt-cells = <1>; |
312 | num-lanes = <1>; | 313 | num-lanes = <1>; |
313 | linux,pci-domain = <0>; | 314 | linux,pci-domain = <0>; |
@@ -360,6 +361,7 @@ | |||
360 | device_type = "pci"; | 361 | device_type = "pci"; |
361 | ranges = <0x81000000 0 0 0x03000 0 0x00010000 | 362 | ranges = <0x81000000 0 0 0x03000 0 0x00010000 |
362 | 0x82000000 0 0x30013000 0x13000 0 0xffed000>; | 363 | 0x82000000 0 0x30013000 0x13000 0 0xffed000>; |
364 | bus-range = <0x00 0xff>; | ||
363 | #interrupt-cells = <1>; | 365 | #interrupt-cells = <1>; |
364 | num-lanes = <1>; | 366 | num-lanes = <1>; |
365 | linux,pci-domain = <1>; | 367 | linux,pci-domain = <1>; |
@@ -2386,4 +2388,24 @@ | |||
2386 | polling-delay = <500>; /* milliseconds */ | 2388 | polling-delay = <500>; /* milliseconds */ |
2387 | }; | 2389 | }; |
2388 | 2390 | ||
2391 | &cpu_crit { | ||
2392 | temperature = <120000>; /* milli Celsius */ | ||
2393 | }; | ||
2394 | |||
2395 | &core_crit { | ||
2396 | temperature = <120000>; /* milli Celsius */ | ||
2397 | }; | ||
2398 | |||
2399 | &gpu_crit { | ||
2400 | temperature = <120000>; /* milli Celsius */ | ||
2401 | }; | ||
2402 | |||
2403 | &dspeve_crit { | ||
2404 | temperature = <120000>; /* milli Celsius */ | ||
2405 | }; | ||
2406 | |||
2407 | &iva_crit { | ||
2408 | temperature = <120000>; /* milli Celsius */ | ||
2409 | }; | ||
2410 | |||
2389 | /include/ "dra7xx-clocks.dtsi" | 2411 | /include/ "dra7xx-clocks.dtsi" |
diff --git a/arch/arm/boot/dts/dra76-evm-tfp410.dts b/arch/arm/boot/dts/dra76-evm-tfp410.dts new file mode 100644 index 000000000000..77390c0277ce --- /dev/null +++ b/arch/arm/boot/dts/dra76-evm-tfp410.dts | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <dra76-evm.dts> | ||
9 | |||
10 | / { | ||
11 | tfp410: encoder@0 { | ||
12 | compatible = "ti,tfp410"; | ||
13 | |||
14 | ports { | ||
15 | #address-cells = <1>; | ||
16 | #size-cells = <0>; | ||
17 | |||
18 | port@0 { | ||
19 | reg = <0>; | ||
20 | tfp410_in: endpoint@0 { | ||
21 | remote-endpoint = <&dpi_out>; | ||
22 | }; | ||
23 | }; | ||
24 | |||
25 | port@1 { | ||
26 | reg = <1>; | ||
27 | |||
28 | tfp410_out: endpoint@0 { | ||
29 | remote-endpoint = <&dvi_connector_in>; | ||
30 | }; | ||
31 | }; | ||
32 | }; | ||
33 | }; | ||
34 | |||
35 | dvi0: connector@0 { | ||
36 | compatible = "dvi-connector"; | ||
37 | label = "dvi"; | ||
38 | |||
39 | digital; | ||
40 | |||
41 | ddc-i2c-bus = <&i2c3>; | ||
42 | |||
43 | hpd-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; /* wakeup2/sys_nirq2/gpio1_2 HPD */ | ||
44 | |||
45 | port { | ||
46 | dvi_connector_in: endpoint { | ||
47 | remote-endpoint = <&tfp410_out>; | ||
48 | }; | ||
49 | }; | ||
50 | }; | ||
51 | }; | ||
52 | |||
53 | &dss { | ||
54 | status = "ok"; | ||
55 | ports { | ||
56 | #address-cells = <1>; | ||
57 | #size-cells = <0>; | ||
58 | status = "ok"; | ||
59 | |||
60 | port { | ||
61 | reg = <0>; | ||
62 | |||
63 | dpi_out: endpoint { | ||
64 | remote-endpoint = <&tfp410_in>; | ||
65 | data-lines = <24>; | ||
66 | }; | ||
67 | }; | ||
68 | }; | ||
69 | }; | ||
70 | |||
71 | &gpio3 { | ||
72 | p1 { | ||
73 | /* GPIO3_1 CON_LCD_PWR_DN */ | ||
74 | /* This affects the TFP410 and the USB */ | ||
75 | gpio-hog; | ||
76 | gpios = <1 GPIO_ACTIVE_HIGH>; | ||
77 | output-low; | ||
78 | line-name = "CON_LCD_PWR_DN"; | ||
79 | }; | ||
80 | }; | ||
81 | |||
82 | &i2c3 { | ||
83 | clock-frequency = <100000>; | ||
84 | |||
85 | pcf_tfp: pcf8757@20 { | ||
86 | compatible = "ti,pcf8575", "nxp,pcf8575"; | ||
87 | reg = <0x27>; | ||
88 | gpio-controller; | ||
89 | #gpio-cells = <2>; | ||
90 | |||
91 | p2 { | ||
92 | gpio-hog; | ||
93 | gpios = <2 GPIO_ACTIVE_HIGH>; | ||
94 | output-high; | ||
95 | line-name = "ct_hpd"; | ||
96 | }; | ||
97 | |||
98 | p3 { | ||
99 | gpio-hog; | ||
100 | gpios = <3 GPIO_ACTIVE_HIGH>; | ||
101 | output-high; | ||
102 | line-name = "ls_oe"; | ||
103 | }; | ||
104 | }; | ||
105 | }; | ||
diff --git a/arch/arm/boot/dts/dra76-evm.dts b/arch/arm/boot/dts/dra76-evm.dts index c852a76ec21d..9012e7677e52 100644 --- a/arch/arm/boot/dts/dra76-evm.dts +++ b/arch/arm/boot/dts/dra76-evm.dts | |||
@@ -119,6 +119,16 @@ | |||
119 | vin-supply = <&smps5_reg>; | 119 | vin-supply = <&smps5_reg>; |
120 | }; | 120 | }; |
121 | 121 | ||
122 | vmmcwl_fixed: fixedregulator-mmcwl { | ||
123 | compatible = "regulator-fixed"; | ||
124 | regulator-name = "vmmcwl_fixed"; | ||
125 | regulator-min-microvolt = <1800000>; | ||
126 | regulator-max-microvolt = <1800000>; | ||
127 | gpio = <&gpio5 8 0>; /* gpio5_8 */ | ||
128 | startup-delay-us = <70000>; | ||
129 | enable-active-high; | ||
130 | }; | ||
131 | |||
122 | vtt_fixed: fixedregulator-vtt { | 132 | vtt_fixed: fixedregulator-vtt { |
123 | compatible = "regulator-fixed"; | 133 | compatible = "regulator-fixed"; |
124 | regulator-name = "vtt_fixed"; | 134 | regulator-name = "vtt_fixed"; |
@@ -147,6 +157,7 @@ | |||
147 | compatible = "ti,tps65917"; | 157 | compatible = "ti,tps65917"; |
148 | reg = <0x58>; | 158 | reg = <0x58>; |
149 | ti,system-power-controller; | 159 | ti,system-power-controller; |
160 | ti,palmas-override-powerhold; | ||
150 | interrupt-controller; | 161 | interrupt-controller; |
151 | #interrupt-cells = <2>; | 162 | #interrupt-cells = <2>; |
152 | 163 | ||
@@ -427,6 +438,17 @@ | |||
427 | pinctrl-3 = <&mmc2_pins_hs200 &mmc2_iodelay_hs200_conf>; | 438 | pinctrl-3 = <&mmc2_pins_hs200 &mmc2_iodelay_hs200_conf>; |
428 | }; | 439 | }; |
429 | 440 | ||
441 | &mmc4 { | ||
442 | status = "okay"; | ||
443 | vmmc-supply = <&vmmcwl_fixed>; | ||
444 | |||
445 | pinctrl-names = "default", "hs", "sdr12", "sdr25"; | ||
446 | pinctrl-0 = <&mmc4_pins_hs &mmc4_iodelay_default_conf>; | ||
447 | pinctrl-1 = <&mmc4_pins_hs &mmc4_iodelay_manual1_conf>; | ||
448 | pinctrl-2 = <&mmc4_pins_hs &mmc4_iodelay_manual1_conf>; | ||
449 | pinctrl-3 = <&mmc4_pins_hs &mmc4_iodelay_manual1_conf>; | ||
450 | }; | ||
451 | |||
430 | &oppdm_mpu { | 452 | &oppdm_mpu { |
431 | vdd-supply = <&buck10_reg>; | 453 | vdd-supply = <&buck10_reg>; |
432 | }; | 454 | }; |
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts index d5e3bc518968..d57f48543f76 100644 --- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts +++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts | |||
@@ -53,7 +53,8 @@ | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | pinctrl: pin-controller@10000 { | 55 | pinctrl: pin-controller@10000 { |
56 | pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>; | 56 | pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header |
57 | &pmx_gpio_header_gpo>; | ||
57 | pinctrl-names = "default"; | 58 | pinctrl-names = "default"; |
58 | 59 | ||
59 | pmx_uart0: pmx-uart0 { | 60 | pmx_uart0: pmx-uart0 { |
@@ -85,11 +86,16 @@ | |||
85 | * ground. | 86 | * ground. |
86 | */ | 87 | */ |
87 | pmx_gpio_header: pmx-gpio-header { | 88 | pmx_gpio_header: pmx-gpio-header { |
88 | marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28", | 89 | marvell,pins = "mpp17", "mpp29", "mpp28", |
89 | "mpp35", "mpp34", "mpp40"; | 90 | "mpp35", "mpp34", "mpp40"; |
90 | marvell,function = "gpio"; | 91 | marvell,function = "gpio"; |
91 | }; | 92 | }; |
92 | 93 | ||
94 | pmx_gpio_header_gpo: pxm-gpio-header-gpo { | ||
95 | marvell,pins = "mpp7"; | ||
96 | marvell,function = "gpo"; | ||
97 | }; | ||
98 | |||
93 | pmx_gpio_init: pmx-init { | 99 | pmx_gpio_init: pmx-init { |
94 | marvell,pins = "mpp38"; | 100 | marvell,pins = "mpp38"; |
95 | marvell,function = "gpio"; | 101 | marvell,function = "gpio"; |
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi index 8344a0ee2b86..b03fe747b98c 100644 --- a/arch/arm/boot/dts/s5pv210.dtsi +++ b/arch/arm/boot/dts/s5pv210.dtsi | |||
@@ -461,6 +461,7 @@ | |||
461 | compatible = "samsung,exynos4210-ohci"; | 461 | compatible = "samsung,exynos4210-ohci"; |
462 | reg = <0xec300000 0x100>; | 462 | reg = <0xec300000 0x100>; |
463 | interrupts = <23>; | 463 | interrupts = <23>; |
464 | interrupt-parent = <&vic1>; | ||
464 | clocks = <&clocks CLK_USB_HOST>; | 465 | clocks = <&clocks CLK_USB_HOST>; |
465 | clock-names = "usbhost"; | 466 | clock-names = "usbhost"; |
466 | #address-cells = <1>; | 467 | #address-cells = <1>; |
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts index e48857249ce7..3d83992efd90 100644 --- a/arch/arm/boot/dts/spear1310-evb.dts +++ b/arch/arm/boot/dts/spear1310-evb.dts | |||
@@ -349,7 +349,7 @@ | |||
349 | spi0: spi@e0100000 { | 349 | spi0: spi@e0100000 { |
350 | status = "okay"; | 350 | status = "okay"; |
351 | num-cs = <3>; | 351 | num-cs = <3>; |
352 | cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>; | 352 | cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>; |
353 | 353 | ||
354 | stmpe610@0 { | 354 | stmpe610@0 { |
355 | compatible = "st,stmpe610"; | 355 | compatible = "st,stmpe610"; |
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index df2232d767ed..6361cbfcbe5e 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi | |||
@@ -141,8 +141,8 @@ | |||
141 | reg = <0xb4100000 0x1000>; | 141 | reg = <0xb4100000 0x1000>; |
142 | interrupts = <0 105 0x4>; | 142 | interrupts = <0 105 0x4>; |
143 | status = "disabled"; | 143 | status = "disabled"; |
144 | dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */ | 144 | dmas = <&dwdma0 12 0 1>, |
145 | <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */ | 145 | <&dwdma0 13 1 0>; |
146 | dma-names = "tx", "rx"; | 146 | dma-names = "tx", "rx"; |
147 | }; | 147 | }; |
148 | 148 | ||
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index 14594ce8c18a..8fd8a3328acb 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi | |||
@@ -100,7 +100,7 @@ | |||
100 | reg = <0xb2800000 0x1000>; | 100 | reg = <0xb2800000 0x1000>; |
101 | interrupts = <0 29 0x4>; | 101 | interrupts = <0 29 0x4>; |
102 | status = "disabled"; | 102 | status = "disabled"; |
103 | dmas = <&dwdma0 0 0 0 0>; | 103 | dmas = <&dwdma0 0 0 0>; |
104 | dma-names = "data"; | 104 | dma-names = "data"; |
105 | }; | 105 | }; |
106 | 106 | ||
@@ -288,8 +288,8 @@ | |||
288 | #size-cells = <0>; | 288 | #size-cells = <0>; |
289 | interrupts = <0 31 0x4>; | 289 | interrupts = <0 31 0x4>; |
290 | status = "disabled"; | 290 | status = "disabled"; |
291 | dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */ | 291 | dmas = <&dwdma0 4 0 0>, |
292 | <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */ | 292 | <&dwdma0 5 0 0>; |
293 | dma-names = "tx", "rx"; | 293 | dma-names = "tx", "rx"; |
294 | }; | 294 | }; |
295 | 295 | ||
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi index 9f60a7b6a42b..bd379034993c 100644 --- a/arch/arm/boot/dts/spear600.dtsi +++ b/arch/arm/boot/dts/spear600.dtsi | |||
@@ -194,6 +194,7 @@ | |||
194 | rtc@fc900000 { | 194 | rtc@fc900000 { |
195 | compatible = "st,spear600-rtc"; | 195 | compatible = "st,spear600-rtc"; |
196 | reg = <0xfc900000 0x1000>; | 196 | reg = <0xfc900000 0x1000>; |
197 | interrupt-parent = <&vic0>; | ||
197 | interrupts = <10>; | 198 | interrupts = <10>; |
198 | status = "disabled"; | 199 | status = "disabled"; |
199 | }; | 200 | }; |
diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi index d60f0d8add26..e4b508ce38a2 100644 --- a/arch/arm/boot/dts/stih407.dtsi +++ b/arch/arm/boot/dts/stih407.dtsi | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | #include "stih407-clock.dtsi" | 9 | #include "stih407-clock.dtsi" |
10 | #include "stih407-family.dtsi" | 10 | #include "stih407-family.dtsi" |
11 | #include <dt-bindings/gpio/gpio.h> | ||
11 | / { | 12 | / { |
12 | soc { | 13 | soc { |
13 | sti-display-subsystem { | 14 | sti-display-subsystem { |
@@ -112,7 +113,7 @@ | |||
112 | <&clk_s_d2_quadfs 0>, | 113 | <&clk_s_d2_quadfs 0>, |
113 | <&clk_s_d2_quadfs 1>; | 114 | <&clk_s_d2_quadfs 1>; |
114 | 115 | ||
115 | hdmi,hpd-gpio = <&pio5 3>; | 116 | hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; |
116 | reset-names = "hdmi"; | 117 | reset-names = "hdmi"; |
117 | resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; | 118 | resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; |
118 | ddc = <&hdmiddc>; | 119 | ddc = <&hdmiddc>; |
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi index 40318869c733..3c32fb8cdcac 100644 --- a/arch/arm/boot/dts/stih410.dtsi +++ b/arch/arm/boot/dts/stih410.dtsi | |||
@@ -9,6 +9,7 @@ | |||
9 | #include "stih410-clock.dtsi" | 9 | #include "stih410-clock.dtsi" |
10 | #include "stih407-family.dtsi" | 10 | #include "stih407-family.dtsi" |
11 | #include "stih410-pinctrl.dtsi" | 11 | #include "stih410-pinctrl.dtsi" |
12 | #include <dt-bindings/gpio/gpio.h> | ||
12 | / { | 13 | / { |
13 | aliases { | 14 | aliases { |
14 | bdisp0 = &bdisp0; | 15 | bdisp0 = &bdisp0; |
@@ -203,7 +204,7 @@ | |||
203 | <&clk_s_d2_quadfs 0>, | 204 | <&clk_s_d2_quadfs 0>, |
204 | <&clk_s_d2_quadfs 1>; | 205 | <&clk_s_d2_quadfs 1>; |
205 | 206 | ||
206 | hdmi,hpd-gpio = <&pio5 3>; | 207 | hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; |
207 | reset-names = "hdmi"; | 208 | reset-names = "hdmi"; |
208 | resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; | 209 | resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; |
209 | ddc = <&hdmiddc>; | 210 | ddc = <&hdmiddc>; |
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 31d33ee41e28..48e6ee2d4f5c 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h | |||
@@ -162,13 +162,8 @@ | |||
162 | #define VTTBR_X (5 - KVM_T0SZ) | 162 | #define VTTBR_X (5 - KVM_T0SZ) |
163 | #endif | 163 | #endif |
164 | #define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_X) | 164 | #define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_X) |
165 | #define VTTBR_VMID_SHIFT (48LLU) | 165 | #define VTTBR_VMID_SHIFT _AC(48, ULL) |
166 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) | 166 | #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) |
167 | |||
168 | /* Hyp Syndrome Register (HSR) bits */ | ||
169 | #define HSR_EC_SHIFT (26) | ||
170 | #define VTTBR_VMID_SHIFT (48LLU) | ||
171 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) | ||
172 | 167 | ||
173 | /* Hyp Syndrome Register (HSR) bits */ | 168 | /* Hyp Syndrome Register (HSR) bits */ |
174 | #define HSR_EC_SHIFT (26) | 169 | #define HSR_EC_SHIFT (26) |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 9b32f76bb0dd..10f662498eb7 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -61,6 +61,7 @@ static inline void check_and_switch_context(struct mm_struct *mm, | |||
61 | cpu_switch_mm(mm->pgd, mm); | 61 | cpu_switch_mm(mm->pgd, mm); |
62 | } | 62 | } |
63 | 63 | ||
64 | #ifndef MODULE | ||
64 | #define finish_arch_post_lock_switch \ | 65 | #define finish_arch_post_lock_switch \ |
65 | finish_arch_post_lock_switch | 66 | finish_arch_post_lock_switch |
66 | static inline void finish_arch_post_lock_switch(void) | 67 | static inline void finish_arch_post_lock_switch(void) |
@@ -82,6 +83,7 @@ static inline void finish_arch_post_lock_switch(void) | |||
82 | preempt_enable_no_resched(); | 83 | preempt_enable_no_resched(); |
83 | } | 84 | } |
84 | } | 85 | } |
86 | #endif /* !MODULE */ | ||
85 | 87 | ||
86 | #endif /* CONFIG_MMU */ | 88 | #endif /* CONFIG_MMU */ |
87 | 89 | ||
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index f36b5b1acd1f..05b2f8294968 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c | |||
@@ -45,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
45 | 45 | ||
46 | ret = kvm_psci_call(vcpu); | 46 | ret = kvm_psci_call(vcpu); |
47 | if (ret < 0) { | 47 | if (ret < 0) { |
48 | kvm_inject_undefined(vcpu); | 48 | vcpu_set_reg(vcpu, 0, ~0UL); |
49 | return 1; | 49 | return 1; |
50 | } | 50 | } |
51 | 51 | ||
@@ -54,7 +54,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
54 | 54 | ||
55 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | 55 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
56 | { | 56 | { |
57 | kvm_inject_undefined(vcpu); | 57 | /* |
58 | * "If an SMC instruction executed at Non-secure EL1 is | ||
59 | * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a | ||
60 | * Trap exception, not a Secure Monitor Call exception [...]" | ||
61 | * | ||
62 | * We need to advance the PC after the trap, as it would | ||
63 | * otherwise return to the same address... | ||
64 | */ | ||
65 | vcpu_set_reg(vcpu, 0, ~0UL); | ||
66 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
58 | return 1; | 67 | return 1; |
59 | } | 68 | } |
60 | 69 | ||
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 3a10c9f1d0a4..387ee2a11e36 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -113,7 +113,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
113 | } | 113 | } |
114 | 114 | ||
115 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, | 115 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, |
116 | data); | 116 | &data); |
117 | data = vcpu_data_host_to_guest(vcpu, data, len); | 117 | data = vcpu_data_host_to_guest(vcpu, data, len); |
118 | vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); | 118 | vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); |
119 | } | 119 | } |
@@ -189,14 +189,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
189 | data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt), | 189 | data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt), |
190 | len); | 190 | len); |
191 | 191 | ||
192 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); | 192 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data); |
193 | mmio_write_buf(data_buf, len, data); | 193 | mmio_write_buf(data_buf, len, data); |
194 | 194 | ||
195 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len, | 195 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len, |
196 | data_buf); | 196 | data_buf); |
197 | } else { | 197 | } else { |
198 | trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len, | 198 | trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len, |
199 | fault_ipa, 0); | 199 | fault_ipa, NULL); |
200 | 200 | ||
201 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len, | 201 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len, |
202 | data_buf); | 202 | data_buf); |
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index e0a53208880a..b59a7a2df4e3 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c | |||
@@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = { | |||
132 | }, | 132 | }, |
133 | }; | 133 | }; |
134 | module_platform_driver(tosa_bt_driver); | 134 | module_platform_driver(tosa_bt_driver); |
135 | |||
136 | MODULE_LICENSE("GPL"); | ||
137 | MODULE_AUTHOR("Dmitry Baryshkov"); | ||
138 | MODULE_DESCRIPTION("Bluetooth built-in chip control"); | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 9bfd334f6e9b..fb86f4d89db1 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -815,13 +815,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add | |||
815 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); | 815 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); |
816 | } | 816 | } |
817 | 817 | ||
818 | /* | ||
819 | * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems | ||
820 | * that the intention is to allow exporting memory allocated via the | ||
821 | * coherent DMA APIs through the dma_buf API, which only accepts a | ||
822 | * scattertable. This presents a couple of problems: | ||
823 | * 1. Not all memory allocated via the coherent DMA APIs is backed by | ||
824 | * a struct page | ||
825 | * 2. Passing coherent DMA memory into the streaming APIs is not allowed | ||
826 | * as we will try to flush the memory through a different alias to that | ||
827 | * actually being used (and the flushes are redundant.) | ||
828 | */ | ||
818 | int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, | 829 | int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
819 | void *cpu_addr, dma_addr_t handle, size_t size, | 830 | void *cpu_addr, dma_addr_t handle, size_t size, |
820 | struct dma_attrs *attrs) | 831 | struct dma_attrs *attrs) |
821 | { | 832 | { |
822 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | 833 | unsigned long pfn = dma_to_pfn(dev, handle); |
834 | struct page *page; | ||
823 | int ret; | 835 | int ret; |
824 | 836 | ||
837 | /* If the PFN is not valid, we do not have a struct page */ | ||
838 | if (!pfn_valid(pfn)) | ||
839 | return -ENXIO; | ||
840 | |||
841 | page = pfn_to_page(pfn); | ||
842 | |||
825 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | 843 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
826 | if (unlikely(ret)) | 844 | if (unlikely(ret)) |
827 | return ret; | 845 | return ret; |
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index a4ec240ee7ba..3eb018fa1a1f 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c | |||
@@ -433,6 +433,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
433 | struct hlist_node *tmp; | 433 | struct hlist_node *tmp; |
434 | unsigned long flags, orig_ret_address = 0; | 434 | unsigned long flags, orig_ret_address = 0; |
435 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; | 435 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
436 | kprobe_opcode_t *correct_ret_addr = NULL; | ||
436 | 437 | ||
437 | INIT_HLIST_HEAD(&empty_rp); | 438 | INIT_HLIST_HEAD(&empty_rp); |
438 | kretprobe_hash_lock(current, &head, &flags); | 439 | kretprobe_hash_lock(current, &head, &flags); |
@@ -455,14 +456,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
455 | /* another task is sharing our hash bucket */ | 456 | /* another task is sharing our hash bucket */ |
456 | continue; | 457 | continue; |
457 | 458 | ||
459 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
460 | |||
461 | if (orig_ret_address != trampoline_address) | ||
462 | /* | ||
463 | * This is the real return address. Any other | ||
464 | * instances associated with this task are for | ||
465 | * other calls deeper on the call stack | ||
466 | */ | ||
467 | break; | ||
468 | } | ||
469 | |||
470 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
471 | |||
472 | correct_ret_addr = ri->ret_addr; | ||
473 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { | ||
474 | if (ri->task != current) | ||
475 | /* another task is sharing our hash bucket */ | ||
476 | continue; | ||
477 | |||
478 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
458 | if (ri->rp && ri->rp->handler) { | 479 | if (ri->rp && ri->rp->handler) { |
459 | __this_cpu_write(current_kprobe, &ri->rp->kp); | 480 | __this_cpu_write(current_kprobe, &ri->rp->kp); |
460 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | 481 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; |
482 | ri->ret_addr = correct_ret_addr; | ||
461 | ri->rp->handler(ri, regs); | 483 | ri->rp->handler(ri, regs); |
462 | __this_cpu_write(current_kprobe, NULL); | 484 | __this_cpu_write(current_kprobe, NULL); |
463 | } | 485 | } |
464 | 486 | ||
465 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
466 | recycle_rp_inst(ri, &empty_rp); | 487 | recycle_rp_inst(ri, &empty_rp); |
467 | 488 | ||
468 | if (orig_ret_address != trampoline_address) | 489 | if (orig_ret_address != trampoline_address) |
@@ -474,7 +495,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
474 | break; | 495 | break; |
475 | } | 496 | } |
476 | 497 | ||
477 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
478 | kretprobe_hash_unlock(current, &flags); | 498 | kretprobe_hash_unlock(current, &flags); |
479 | 499 | ||
480 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { | 500 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c index 9775de22e2ff..a48354de1aa1 100644 --- a/arch/arm/probes/kprobes/test-core.c +++ b/arch/arm/probes/kprobes/test-core.c | |||
@@ -976,7 +976,10 @@ static void coverage_end(void) | |||
976 | void __naked __kprobes_test_case_start(void) | 976 | void __naked __kprobes_test_case_start(void) |
977 | { | 977 | { |
978 | __asm__ __volatile__ ( | 978 | __asm__ __volatile__ ( |
979 | "stmdb sp!, {r4-r11} \n\t" | 979 | "mov r2, sp \n\t" |
980 | "bic r3, r2, #7 \n\t" | ||
981 | "mov sp, r3 \n\t" | ||
982 | "stmdb sp!, {r2-r11} \n\t" | ||
980 | "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" | 983 | "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" |
981 | "bic r0, lr, #1 @ r0 = inline data \n\t" | 984 | "bic r0, lr, #1 @ r0 = inline data \n\t" |
982 | "mov r1, sp \n\t" | 985 | "mov r1, sp \n\t" |
@@ -996,7 +999,8 @@ void __naked __kprobes_test_case_end_32(void) | |||
996 | "movne pc, r0 \n\t" | 999 | "movne pc, r0 \n\t" |
997 | "mov r0, r4 \n\t" | 1000 | "mov r0, r4 \n\t" |
998 | "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" | 1001 | "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" |
999 | "ldmia sp!, {r4-r11} \n\t" | 1002 | "ldmia sp!, {r2-r11} \n\t" |
1003 | "mov sp, r2 \n\t" | ||
1000 | "mov pc, r0 \n\t" | 1004 | "mov pc, r0 \n\t" |
1001 | ); | 1005 | ); |
1002 | } | 1006 | } |
@@ -1012,7 +1016,8 @@ void __naked __kprobes_test_case_end_16(void) | |||
1012 | "bxne r0 \n\t" | 1016 | "bxne r0 \n\t" |
1013 | "mov r0, r4 \n\t" | 1017 | "mov r0, r4 \n\t" |
1014 | "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" | 1018 | "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" |
1015 | "ldmia sp!, {r4-r11} \n\t" | 1019 | "ldmia sp!, {r2-r11} \n\t" |
1020 | "mov sp, r2 \n\t" | ||
1016 | "bx r0 \n\t" | 1021 | "bx r0 \n\t" |
1017 | ); | 1022 | ); |
1018 | } | 1023 | } |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 190471794853..e8c8d5b69886 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -59,6 +59,7 @@ config ARM64 | |||
59 | select HAVE_ARCH_SECCOMP_FILTER | 59 | select HAVE_ARCH_SECCOMP_FILTER |
60 | select HAVE_ARCH_TRACEHOOK | 60 | select HAVE_ARCH_TRACEHOOK |
61 | select HAVE_BPF_JIT | 61 | select HAVE_BPF_JIT |
62 | select HAVE_EBPF_JIT | ||
62 | select HAVE_C_RECORDMCOUNT | 63 | select HAVE_C_RECORDMCOUNT |
63 | select HAVE_CC_STACKPROTECTOR | 64 | select HAVE_CC_STACKPROTECTOR |
64 | select HAVE_CMPXCHG_DOUBLE | 65 | select HAVE_CMPXCHG_DOUBLE |
@@ -687,6 +688,18 @@ config FORCE_MAX_ZONEORDER | |||
687 | However for 4K, we choose a higher default value, 11 as opposed to 10, giving us | 688 | However for 4K, we choose a higher default value, 11 as opposed to 10, giving us |
688 | 4M allocations matching the default size used by generic code. | 689 | 4M allocations matching the default size used by generic code. |
689 | 690 | ||
691 | config UNMAP_KERNEL_AT_EL0 | ||
692 | bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT | ||
693 | default y | ||
694 | help | ||
695 | Speculation attacks against some high-performance processors can | ||
696 | be used to bypass MMU permission checks and leak kernel data to | ||
697 | userspace. This can be defended against by unmapping the kernel | ||
698 | when running in userspace, mapping it back in on exception entry | ||
699 | via a trampoline page in the vector table. | ||
700 | |||
701 | If unsure, say Y. | ||
702 | |||
690 | menuconfig ARMV8_DEPRECATED | 703 | menuconfig ARMV8_DEPRECATED |
691 | bool "Emulate deprecated/obsolete ARMv8 instructions" | 704 | bool "Emulate deprecated/obsolete ARMv8 instructions" |
692 | depends on COMPAT | 705 | depends on COMPAT |
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index e450bb6d21bd..5f6c8345c0e6 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -398,17 +398,4 @@ alternative_endif | |||
398 | mrs \rd, sp_el0 | 398 | mrs \rd, sp_el0 |
399 | .endm | 399 | .endm |
400 | 400 | ||
401 | /* | ||
402 | * Errata workaround post TTBR0_EL1 update. | ||
403 | */ | ||
404 | .macro post_ttbr0_update_workaround | ||
405 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 | ||
406 | alternative_if ARM64_WORKAROUND_CAVIUM_27456 | ||
407 | ic iallu | ||
408 | dsb nsh | ||
409 | isb | ||
410 | alternative_else_nop_endif | ||
411 | #endif | ||
412 | .endm | ||
413 | |||
414 | #endif /* __ASM_ASSEMBLER_H */ | 401 | #endif /* __ASM_ASSEMBLER_H */ |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8e1f826caf99..2c6497a042b1 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -33,10 +33,11 @@ | |||
33 | #define ARM64_HAS_NO_HW_PREFETCH 8 | 33 | #define ARM64_HAS_NO_HW_PREFETCH 8 |
34 | #define ARM64_HAS_UAO 9 | 34 | #define ARM64_HAS_UAO 9 |
35 | #define ARM64_ALT_PAN_NOT_UAO 10 | 35 | #define ARM64_ALT_PAN_NOT_UAO 10 |
36 | #define ARM64_HAS_VIRT_HOST_EXTN 11 | ||
37 | #define ARM64_WORKAROUND_CAVIUM_27456 12 | ||
38 | #define ARM64_UNMAP_KERNEL_AT_EL0 23 | ||
36 | 39 | ||
37 | #define ARM64_WORKAROUND_CAVIUM_27456 11 | 40 | #define ARM64_NCAPS 24 |
38 | #define ARM64_HAS_VIRT_HOST_EXTN 12 | ||
39 | #define ARM64_NCAPS 13 | ||
40 | 41 | ||
41 | #ifndef __ASSEMBLY__ | 42 | #ifndef __ASSEMBLY__ |
42 | 43 | ||
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 932f5a56d1a6..48e317e4a32d 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h | |||
@@ -76,12 +76,14 @@ static inline void efi_set_pgd(struct mm_struct *mm) | |||
76 | if (mm != current->active_mm) { | 76 | if (mm != current->active_mm) { |
77 | /* | 77 | /* |
78 | * Update the current thread's saved ttbr0 since it is | 78 | * Update the current thread's saved ttbr0 since it is |
79 | * restored as part of a return from exception. Set | 79 | * restored as part of a return from exception. Enable |
80 | * the hardware TTBR0_EL1 using cpu_switch_mm() | 80 | * access to the valid TTBR0_EL1 and invoke the errata |
81 | * directly to enable potential errata workarounds. | 81 | * workaround directly since there is no return from |
82 | * exception when invoking the EFI run-time services. | ||
82 | */ | 83 | */ |
83 | update_saved_ttbr0(current, mm); | 84 | update_saved_ttbr0(current, mm); |
84 | cpu_switch_mm(mm->pgd, mm); | 85 | uaccess_ttbr0_enable(); |
86 | post_ttbr_update_workaround(); | ||
85 | } else { | 87 | } else { |
86 | /* | 88 | /* |
87 | * Defer the switch to the current thread's TTBR0_EL1 | 89 | * Defer the switch to the current thread's TTBR0_EL1 |
@@ -89,7 +91,7 @@ static inline void efi_set_pgd(struct mm_struct *mm) | |||
89 | * thread's saved ttbr0 corresponding to its active_mm | 91 | * thread's saved ttbr0 corresponding to its active_mm |
90 | * (if different from init_mm). | 92 | * (if different from init_mm). |
91 | */ | 93 | */ |
92 | cpu_set_reserved_ttbr0(); | 94 | uaccess_ttbr0_disable(); |
93 | if (current->active_mm != &init_mm) | 95 | if (current->active_mm != &init_mm) |
94 | update_saved_ttbr0(current, current->active_mm); | 96 | update_saved_ttbr0(current, current->active_mm); |
95 | } | 97 | } |
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index f772e15c4766..2d4e9c26f8f6 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h | |||
@@ -109,6 +109,46 @@ | |||
109 | ((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \ | 109 | ((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \ |
110 | ((imm) & 0xffff)) | 110 | ((imm) & 0xffff)) |
111 | 111 | ||
112 | /* ISS field definitions for System instruction traps */ | ||
113 | #define ESR_ELx_SYS64_ISS_RES0_SHIFT 22 | ||
114 | #define ESR_ELx_SYS64_ISS_RES0_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_RES0_SHIFT) | ||
115 | #define ESR_ELx_SYS64_ISS_DIR_MASK 0x1 | ||
116 | #define ESR_ELx_SYS64_ISS_DIR_READ 0x1 | ||
117 | #define ESR_ELx_SYS64_ISS_DIR_WRITE 0x0 | ||
118 | |||
119 | #define ESR_ELx_SYS64_ISS_RT_SHIFT 5 | ||
120 | #define ESR_ELx_SYS64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_SYS64_ISS_RT_SHIFT) | ||
121 | #define ESR_ELx_SYS64_ISS_CRM_SHIFT 1 | ||
122 | #define ESR_ELx_SYS64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRM_SHIFT) | ||
123 | #define ESR_ELx_SYS64_ISS_CRN_SHIFT 10 | ||
124 | #define ESR_ELx_SYS64_ISS_CRN_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRN_SHIFT) | ||
125 | #define ESR_ELx_SYS64_ISS_OP1_SHIFT 14 | ||
126 | #define ESR_ELx_SYS64_ISS_OP1_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP1_SHIFT) | ||
127 | #define ESR_ELx_SYS64_ISS_OP2_SHIFT 17 | ||
128 | #define ESR_ELx_SYS64_ISS_OP2_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP2_SHIFT) | ||
129 | #define ESR_ELx_SYS64_ISS_OP0_SHIFT 20 | ||
130 | #define ESR_ELx_SYS64_ISS_OP0_MASK (UL(0x3) << ESR_ELx_SYS64_ISS_OP0_SHIFT) | ||
131 | #define ESR_ELx_SYS64_ISS_SYS_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \ | ||
132 | ESR_ELx_SYS64_ISS_OP1_MASK | \ | ||
133 | ESR_ELx_SYS64_ISS_OP2_MASK | \ | ||
134 | ESR_ELx_SYS64_ISS_CRN_MASK | \ | ||
135 | ESR_ELx_SYS64_ISS_CRM_MASK) | ||
136 | #define ESR_ELx_SYS64_ISS_SYS_VAL(op0, op1, op2, crn, crm) \ | ||
137 | (((op0) << ESR_ELx_SYS64_ISS_OP0_SHIFT) | \ | ||
138 | ((op1) << ESR_ELx_SYS64_ISS_OP1_SHIFT) | \ | ||
139 | ((op2) << ESR_ELx_SYS64_ISS_OP2_SHIFT) | \ | ||
140 | ((crn) << ESR_ELx_SYS64_ISS_CRN_SHIFT) | \ | ||
141 | ((crm) << ESR_ELx_SYS64_ISS_CRM_SHIFT)) | ||
142 | |||
143 | #define ESR_ELx_SYS64_ISS_SYS_OP_MASK (ESR_ELx_SYS64_ISS_SYS_MASK | \ | ||
144 | ESR_ELx_SYS64_ISS_DIR_MASK) | ||
145 | |||
146 | #define ESR_ELx_SYS64_ISS_SYS_CNTVCT (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \ | ||
147 | ESR_ELx_SYS64_ISS_DIR_READ) | ||
148 | |||
149 | #define ESR_ELx_SYS64_ISS_SYS_CNTFRQ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \ | ||
150 | ESR_ELx_SYS64_ISS_DIR_READ) | ||
151 | |||
112 | #ifndef __ASSEMBLY__ | 152 | #ifndef __ASSEMBLY__ |
113 | #include <asm/types.h> | 153 | #include <asm/types.h> |
114 | 154 | ||
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 1a617d46fce9..03a1e908b8e9 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h | |||
@@ -50,6 +50,11 @@ enum fixed_addresses { | |||
50 | 50 | ||
51 | FIX_EARLYCON_MEM_BASE, | 51 | FIX_EARLYCON_MEM_BASE, |
52 | FIX_TEXT_POKE0, | 52 | FIX_TEXT_POKE0, |
53 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
54 | FIX_ENTRY_TRAMP_DATA, | ||
55 | FIX_ENTRY_TRAMP_TEXT, | ||
56 | #define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) | ||
57 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | ||
53 | __end_of_permanent_fixed_addresses, | 58 | __end_of_permanent_fixed_addresses, |
54 | 59 | ||
55 | /* | 60 | /* |
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 7803343e5881..77a27af01371 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h | |||
@@ -78,8 +78,16 @@ | |||
78 | /* | 78 | /* |
79 | * Initial memory map attributes. | 79 | * Initial memory map attributes. |
80 | */ | 80 | */ |
81 | #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) | 81 | #define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
82 | #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) | 82 | #define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) |
83 | |||
84 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
85 | #define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG) | ||
86 | #define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG) | ||
87 | #else | ||
88 | #define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS | ||
89 | #define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS | ||
90 | #endif | ||
83 | 91 | ||
84 | #if ARM64_SWAPPER_USES_SECTION_MAPS | 92 | #if ARM64_SWAPPER_USES_SECTION_MAPS |
85 | #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) | 93 | #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) |
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 5472251c8e6c..f7448b6afe58 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h | |||
@@ -16,6 +16,11 @@ | |||
16 | #ifndef __ASM_MMU_H | 16 | #ifndef __ASM_MMU_H |
17 | #define __ASM_MMU_H | 17 | #define __ASM_MMU_H |
18 | 18 | ||
19 | #define USER_ASID_FLAG (UL(1) << 48) | ||
20 | #define TTBR_ASID_MASK (UL(0xffff) << 48) | ||
21 | |||
22 | #ifndef __ASSEMBLY__ | ||
23 | |||
19 | typedef struct { | 24 | typedef struct { |
20 | atomic64_t id; | 25 | atomic64_t id; |
21 | void *vdso; | 26 | void *vdso; |
@@ -28,6 +33,12 @@ typedef struct { | |||
28 | */ | 33 | */ |
29 | #define ASID(mm) ((mm)->context.id.counter & 0xffff) | 34 | #define ASID(mm) ((mm)->context.id.counter & 0xffff) |
30 | 35 | ||
36 | static inline bool arm64_kernel_unmapped_at_el0(void) | ||
37 | { | ||
38 | return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && | ||
39 | cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0); | ||
40 | } | ||
41 | |||
31 | extern void paging_init(void); | 42 | extern void paging_init(void); |
32 | extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); | 43 | extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); |
33 | extern void init_mem_pgprot(void); | 44 | extern void init_mem_pgprot(void); |
@@ -36,4 +47,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, | |||
36 | pgprot_t prot, bool allow_block_mappings); | 47 | pgprot_t prot, bool allow_block_mappings); |
37 | extern void *fixmap_remap_fdt(phys_addr_t dt_phys); | 48 | extern void *fixmap_remap_fdt(phys_addr_t dt_phys); |
38 | 49 | ||
50 | #endif /* !__ASSEMBLY__ */ | ||
39 | #endif | 51 | #endif |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index e53d30c6f779..5c1b168a3c22 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -59,6 +59,13 @@ static inline void cpu_set_reserved_ttbr0(void) | |||
59 | : "r" (ttbr)); | 59 | : "r" (ttbr)); |
60 | } | 60 | } |
61 | 61 | ||
62 | static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) | ||
63 | { | ||
64 | BUG_ON(pgd == swapper_pg_dir); | ||
65 | cpu_set_reserved_ttbr0(); | ||
66 | cpu_do_switch_mm(virt_to_phys(pgd),mm); | ||
67 | } | ||
68 | |||
62 | /* | 69 | /* |
63 | * TCR.T0SZ value to use when the ID map is active. Usually equals | 70 | * TCR.T0SZ value to use when the ID map is active. Usually equals |
64 | * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in | 71 | * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in |
@@ -179,9 +186,10 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, | |||
179 | struct mm_struct *mm) | 186 | struct mm_struct *mm) |
180 | { | 187 | { |
181 | if (system_uses_ttbr0_pan()) { | 188 | if (system_uses_ttbr0_pan()) { |
189 | u64 ttbr; | ||
182 | BUG_ON(mm->pgd == swapper_pg_dir); | 190 | BUG_ON(mm->pgd == swapper_pg_dir); |
183 | task_thread_info(tsk)->ttbr0 = | 191 | ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; |
184 | virt_to_phys(mm->pgd) | ASID(mm) << 48; | 192 | WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); |
185 | } | 193 | } |
186 | } | 194 | } |
187 | #else | 195 | #else |
@@ -228,4 +236,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
228 | #define deactivate_mm(tsk,mm) do { } while (0) | 236 | #define deactivate_mm(tsk,mm) do { } while (0) |
229 | #define activate_mm(prev,next) switch_mm(prev, next, current) | 237 | #define activate_mm(prev,next) switch_mm(prev, next, current) |
230 | 238 | ||
239 | void post_ttbr_update_workaround(void); | ||
240 | |||
231 | #endif | 241 | #endif |
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 9786f770088d..d7890c0f2d3d 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -224,6 +224,8 @@ | |||
224 | #define TCR_TG1_16K (UL(1) << 30) | 224 | #define TCR_TG1_16K (UL(1) << 30) |
225 | #define TCR_TG1_4K (UL(2) << 30) | 225 | #define TCR_TG1_4K (UL(2) << 30) |
226 | #define TCR_TG1_64K (UL(3) << 30) | 226 | #define TCR_TG1_64K (UL(3) << 30) |
227 | |||
228 | #define TCR_A1 (UL(1) << 22) | ||
227 | #define TCR_ASID16 (UL(1) << 36) | 229 | #define TCR_ASID16 (UL(1) << 36) |
228 | #define TCR_TBI0 (UL(1) << 37) | 230 | #define TCR_TBI0 (UL(1) << 37) |
229 | #define TCR_HA (UL(1) << 39) | 231 | #define TCR_HA (UL(1) << 39) |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 9a09ccf7122d..7519016e9846 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -61,8 +61,16 @@ extern void __pmd_error(const char *file, int line, unsigned long val); | |||
61 | extern void __pud_error(const char *file, int line, unsigned long val); | 61 | extern void __pud_error(const char *file, int line, unsigned long val); |
62 | extern void __pgd_error(const char *file, int line, unsigned long val); | 62 | extern void __pgd_error(const char *file, int line, unsigned long val); |
63 | 63 | ||
64 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) | 64 | #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
65 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) | 65 | #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) |
66 | |||
67 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
68 | #define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG) | ||
69 | #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG) | ||
70 | #else | ||
71 | #define PROT_DEFAULT _PROT_DEFAULT | ||
72 | #define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT | ||
73 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | ||
66 | 74 | ||
67 | #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) | 75 | #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) |
68 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | 76 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
@@ -75,6 +83,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
75 | #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) | 83 | #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) |
76 | 84 | ||
77 | #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) | 85 | #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) |
86 | #define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG) | ||
78 | 87 | ||
79 | #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) | 88 | #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
80 | #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) | 89 | #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) |
@@ -82,13 +91,13 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
82 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) | 91 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
83 | #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) | 92 | #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) |
84 | 93 | ||
85 | #define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP) | 94 | #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP) |
86 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) | 95 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) |
87 | 96 | ||
88 | #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) | 97 | #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) |
89 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) | 98 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) |
90 | 99 | ||
91 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) | 100 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN) |
92 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) | 101 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
93 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) | 102 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) |
94 | #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 103 | #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
@@ -706,6 +715,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |||
706 | 715 | ||
707 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 716 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
708 | extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | 717 | extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; |
718 | extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; | ||
709 | 719 | ||
710 | /* | 720 | /* |
711 | * Encode and decode a swap entry: | 721 | * Encode and decode a swap entry: |
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 14ad6e4e87d1..16cef2e8449e 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h | |||
@@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); | |||
35 | 35 | ||
36 | #include <asm/memory.h> | 36 | #include <asm/memory.h> |
37 | 37 | ||
38 | #define cpu_switch_mm(pgd,mm) \ | ||
39 | do { \ | ||
40 | BUG_ON(pgd == swapper_pg_dir); \ | ||
41 | cpu_do_switch_mm(virt_to_phys(pgd),mm); \ | ||
42 | } while (0) | ||
43 | |||
44 | #endif /* __ASSEMBLY__ */ | 38 | #endif /* __ASSEMBLY__ */ |
45 | #endif /* __KERNEL__ */ | 39 | #endif /* __KERNEL__ */ |
46 | #endif /* __ASM_PROCFNS_H */ | 40 | #endif /* __ASM_PROCFNS_H */ |
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index b460ae28e346..ad6bd8b26ada 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h | |||
@@ -23,6 +23,30 @@ | |||
23 | 23 | ||
24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
25 | #include <asm/cputype.h> | 25 | #include <asm/cputype.h> |
26 | #include <asm/mmu.h> | ||
27 | |||
28 | /* | ||
29 | * Raw TLBI operations. | ||
30 | * | ||
31 | * Where necessary, use the __tlbi() macro to avoid asm() | ||
32 | * boilerplate. Drivers and most kernel code should use the TLB | ||
33 | * management routines in preference to the macro below. | ||
34 | * | ||
35 | * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending | ||
36 | * on whether a particular TLBI operation takes an argument or | ||
37 | * not. The macros handles invoking the asm with or without the | ||
38 | * register argument as appropriate. | ||
39 | */ | ||
40 | #define __TLBI_0(op, arg) asm ("tlbi " #op) | ||
41 | #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg)) | ||
42 | #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) | ||
43 | |||
44 | #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) | ||
45 | |||
46 | #define __tlbi_user(op, arg) do { \ | ||
47 | if (arm64_kernel_unmapped_at_el0()) \ | ||
48 | __tlbi(op, (arg) | USER_ASID_FLAG); \ | ||
49 | } while (0) | ||
26 | 50 | ||
27 | /* | 51 | /* |
28 | * TLB Management | 52 | * TLB Management |
@@ -66,7 +90,7 @@ | |||
66 | static inline void local_flush_tlb_all(void) | 90 | static inline void local_flush_tlb_all(void) |
67 | { | 91 | { |
68 | dsb(nshst); | 92 | dsb(nshst); |
69 | asm("tlbi vmalle1"); | 93 | __tlbi(vmalle1); |
70 | dsb(nsh); | 94 | dsb(nsh); |
71 | isb(); | 95 | isb(); |
72 | } | 96 | } |
@@ -74,7 +98,7 @@ static inline void local_flush_tlb_all(void) | |||
74 | static inline void flush_tlb_all(void) | 98 | static inline void flush_tlb_all(void) |
75 | { | 99 | { |
76 | dsb(ishst); | 100 | dsb(ishst); |
77 | asm("tlbi vmalle1is"); | 101 | __tlbi(vmalle1is); |
78 | dsb(ish); | 102 | dsb(ish); |
79 | isb(); | 103 | isb(); |
80 | } | 104 | } |
@@ -84,7 +108,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm) | |||
84 | unsigned long asid = ASID(mm) << 48; | 108 | unsigned long asid = ASID(mm) << 48; |
85 | 109 | ||
86 | dsb(ishst); | 110 | dsb(ishst); |
87 | asm("tlbi aside1is, %0" : : "r" (asid)); | 111 | __tlbi(aside1is, asid); |
112 | __tlbi_user(aside1is, asid); | ||
88 | dsb(ish); | 113 | dsb(ish); |
89 | } | 114 | } |
90 | 115 | ||
@@ -94,7 +119,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, | |||
94 | unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); | 119 | unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); |
95 | 120 | ||
96 | dsb(ishst); | 121 | dsb(ishst); |
97 | asm("tlbi vale1is, %0" : : "r" (addr)); | 122 | __tlbi(vale1is, addr); |
123 | __tlbi_user(vale1is, addr); | ||
98 | dsb(ish); | 124 | dsb(ish); |
99 | } | 125 | } |
100 | 126 | ||
@@ -121,10 +147,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, | |||
121 | 147 | ||
122 | dsb(ishst); | 148 | dsb(ishst); |
123 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { | 149 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { |
124 | if (last_level) | 150 | if (last_level) { |
125 | asm("tlbi vale1is, %0" : : "r"(addr)); | 151 | __tlbi(vale1is, addr); |
126 | else | 152 | __tlbi_user(vale1is, addr); |
127 | asm("tlbi vae1is, %0" : : "r"(addr)); | 153 | } else { |
154 | __tlbi(vae1is, addr); | ||
155 | __tlbi_user(vae1is, addr); | ||
156 | } | ||
128 | } | 157 | } |
129 | dsb(ish); | 158 | dsb(ish); |
130 | } | 159 | } |
@@ -149,7 +178,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end | |||
149 | 178 | ||
150 | dsb(ishst); | 179 | dsb(ishst); |
151 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) | 180 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) |
152 | asm("tlbi vaae1is, %0" : : "r"(addr)); | 181 | __tlbi(vaae1is, addr); |
153 | dsb(ish); | 182 | dsb(ish); |
154 | isb(); | 183 | isb(); |
155 | } | 184 | } |
@@ -163,7 +192,8 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, | |||
163 | { | 192 | { |
164 | unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); | 193 | unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); |
165 | 194 | ||
166 | asm("tlbi vae1is, %0" : : "r" (addr)); | 195 | __tlbi(vae1is, addr); |
196 | __tlbi_user(vae1is, addr); | ||
167 | dsb(ish); | 197 | dsb(ish); |
168 | } | 198 | } |
169 | 199 | ||
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 064cef9ae2d1..d39d8bde42d7 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | #include <asm/alternative.h> | 21 | #include <asm/alternative.h> |
22 | #include <asm/kernel-pgtable.h> | 22 | #include <asm/kernel-pgtable.h> |
23 | #include <asm/mmu.h> | ||
23 | #include <asm/sysreg.h> | 24 | #include <asm/sysreg.h> |
24 | 25 | ||
25 | #ifndef __ASSEMBLY__ | 26 | #ifndef __ASSEMBLY__ |
@@ -142,17 +143,23 @@ static inline void set_fs(mm_segment_t fs) | |||
142 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | 143 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
143 | static inline void __uaccess_ttbr0_disable(void) | 144 | static inline void __uaccess_ttbr0_disable(void) |
144 | { | 145 | { |
145 | unsigned long ttbr; | 146 | unsigned long flags, ttbr; |
146 | 147 | ||
148 | local_irq_save(flags); | ||
149 | ttbr = read_sysreg(ttbr1_el1); | ||
150 | ttbr &= ~TTBR_ASID_MASK; | ||
147 | /* reserved_ttbr0 placed at the end of swapper_pg_dir */ | 151 | /* reserved_ttbr0 placed at the end of swapper_pg_dir */ |
148 | ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; | 152 | write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1); |
149 | write_sysreg(ttbr, ttbr0_el1); | 153 | isb(); |
154 | /* Set reserved ASID */ | ||
155 | write_sysreg(ttbr, ttbr1_el1); | ||
150 | isb(); | 156 | isb(); |
157 | local_irq_restore(flags); | ||
151 | } | 158 | } |
152 | 159 | ||
153 | static inline void __uaccess_ttbr0_enable(void) | 160 | static inline void __uaccess_ttbr0_enable(void) |
154 | { | 161 | { |
155 | unsigned long flags; | 162 | unsigned long flags, ttbr0, ttbr1; |
156 | 163 | ||
157 | /* | 164 | /* |
158 | * Disable interrupts to avoid preemption between reading the 'ttbr0' | 165 | * Disable interrupts to avoid preemption between reading the 'ttbr0' |
@@ -160,7 +167,17 @@ static inline void __uaccess_ttbr0_enable(void) | |||
160 | * roll-over and an update of 'ttbr0'. | 167 | * roll-over and an update of 'ttbr0'. |
161 | */ | 168 | */ |
162 | local_irq_save(flags); | 169 | local_irq_save(flags); |
163 | write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); | 170 | ttbr0 = READ_ONCE(current_thread_info()->ttbr0); |
171 | |||
172 | /* Restore active ASID */ | ||
173 | ttbr1 = read_sysreg(ttbr1_el1); | ||
174 | ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ | ||
175 | ttbr1 |= ttbr0 & TTBR_ASID_MASK; | ||
176 | write_sysreg(ttbr1, ttbr1_el1); | ||
177 | isb(); | ||
178 | |||
179 | /* Restore user page table */ | ||
180 | write_sysreg(ttbr0, ttbr0_el1); | ||
164 | isb(); | 181 | isb(); |
165 | local_irq_restore(flags); | 182 | local_irq_restore(flags); |
166 | } | 183 | } |
@@ -439,51 +456,62 @@ extern __must_check long strnlen_user(const char __user *str, long n); | |||
439 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | 456 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
440 | .macro __uaccess_ttbr0_disable, tmp1 | 457 | .macro __uaccess_ttbr0_disable, tmp1 |
441 | mrs \tmp1, ttbr1_el1 // swapper_pg_dir | 458 | mrs \tmp1, ttbr1_el1 // swapper_pg_dir |
459 | bic \tmp1, \tmp1, #TTBR_ASID_MASK | ||
442 | add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir | 460 | add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir |
443 | msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 | 461 | msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 |
444 | isb | 462 | isb |
463 | sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE | ||
464 | msr ttbr1_el1, \tmp1 // set reserved ASID | ||
465 | isb | ||
445 | .endm | 466 | .endm |
446 | 467 | ||
447 | .macro __uaccess_ttbr0_enable, tmp1 | 468 | .macro __uaccess_ttbr0_enable, tmp1, tmp2 |
448 | get_thread_info \tmp1 | 469 | get_thread_info \tmp1 |
449 | ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 | 470 | ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 |
471 | mrs \tmp2, ttbr1_el1 | ||
472 | extr \tmp2, \tmp2, \tmp1, #48 | ||
473 | ror \tmp2, \tmp2, #16 | ||
474 | msr ttbr1_el1, \tmp2 // set the active ASID | ||
475 | isb | ||
450 | msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 | 476 | msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 |
451 | isb | 477 | isb |
452 | .endm | 478 | .endm |
453 | 479 | ||
454 | .macro uaccess_ttbr0_disable, tmp1 | 480 | .macro uaccess_ttbr0_disable, tmp1, tmp2 |
455 | alternative_if_not ARM64_HAS_PAN | 481 | alternative_if_not ARM64_HAS_PAN |
482 | save_and_disable_irq \tmp2 // avoid preemption | ||
456 | __uaccess_ttbr0_disable \tmp1 | 483 | __uaccess_ttbr0_disable \tmp1 |
484 | restore_irq \tmp2 | ||
457 | alternative_else_nop_endif | 485 | alternative_else_nop_endif |
458 | .endm | 486 | .endm |
459 | 487 | ||
460 | .macro uaccess_ttbr0_enable, tmp1, tmp2 | 488 | .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 |
461 | alternative_if_not ARM64_HAS_PAN | 489 | alternative_if_not ARM64_HAS_PAN |
462 | save_and_disable_irq \tmp2 // avoid preemption | 490 | save_and_disable_irq \tmp3 // avoid preemption |
463 | __uaccess_ttbr0_enable \tmp1 | 491 | __uaccess_ttbr0_enable \tmp1, \tmp2 |
464 | restore_irq \tmp2 | 492 | restore_irq \tmp3 |
465 | alternative_else_nop_endif | 493 | alternative_else_nop_endif |
466 | .endm | 494 | .endm |
467 | #else | 495 | #else |
468 | .macro uaccess_ttbr0_disable, tmp1 | 496 | .macro uaccess_ttbr0_disable, tmp1, tmp2 |
469 | .endm | 497 | .endm |
470 | 498 | ||
471 | .macro uaccess_ttbr0_enable, tmp1, tmp2 | 499 | .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 |
472 | .endm | 500 | .endm |
473 | #endif | 501 | #endif |
474 | 502 | ||
475 | /* | 503 | /* |
476 | * These macros are no-ops when UAO is present. | 504 | * These macros are no-ops when UAO is present. |
477 | */ | 505 | */ |
478 | .macro uaccess_disable_not_uao, tmp1 | 506 | .macro uaccess_disable_not_uao, tmp1, tmp2 |
479 | uaccess_ttbr0_disable \tmp1 | 507 | uaccess_ttbr0_disable \tmp1, \tmp2 |
480 | alternative_if ARM64_ALT_PAN_NOT_UAO | 508 | alternative_if ARM64_ALT_PAN_NOT_UAO |
481 | SET_PSTATE_PAN(1) | 509 | SET_PSTATE_PAN(1) |
482 | alternative_else_nop_endif | 510 | alternative_else_nop_endif |
483 | .endm | 511 | .endm |
484 | 512 | ||
485 | .macro uaccess_enable_not_uao, tmp1, tmp2 | 513 | .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3 |
486 | uaccess_ttbr0_enable \tmp1, \tmp2 | 514 | uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3 |
487 | alternative_if ARM64_ALT_PAN_NOT_UAO | 515 | alternative_if ARM64_ALT_PAN_NOT_UAO |
488 | SET_PSTATE_PAN(0) | 516 | SET_PSTATE_PAN(0) |
489 | alternative_else_nop_endif | 517 | alternative_else_nop_endif |
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 67ebe708e30c..4e6331562d7c 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/dma-mapping.h> | 23 | #include <linux/dma-mapping.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <linux/suspend.h> | 25 | #include <asm/fixmap.h> |
26 | #include <asm/thread_info.h> | 26 | #include <asm/thread_info.h> |
27 | #include <asm/memory.h> | 27 | #include <asm/memory.h> |
28 | #include <asm/smp_plat.h> | 28 | #include <asm/smp_plat.h> |
@@ -152,14 +152,13 @@ int main(void) | |||
152 | DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); | 152 | DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); |
153 | DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); | 153 | DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); |
154 | DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff)); | 154 | DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff)); |
155 | DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs)); | 155 | DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); |
156 | DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs)); | 156 | DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); |
157 | DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); | ||
157 | #endif | 158 | #endif |
158 | DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0)); | ||
159 | DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); | ||
160 | BLANK(); | 159 | BLANK(); |
161 | DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); | 160 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
162 | DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); | 161 | DEFINE(TRAMP_VALIAS, TRAMP_VALIAS); |
163 | DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next)); | 162 | #endif |
164 | return 0; | 163 | return 0; |
165 | } | 164 | } |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 53fab76d3c39..590dde2d014b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -650,10 +650,38 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry) | |||
650 | return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max); | 650 | return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max); |
651 | } | 651 | } |
652 | 652 | ||
653 | static bool runs_at_el2(const struct arm64_cpu_capabilities *entry) | 653 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
654 | static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ | ||
655 | |||
656 | static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry) | ||
657 | { | ||
658 | /* Forced on command line? */ | ||
659 | if (__kpti_forced) { | ||
660 | pr_info_once("kernel page table isolation forced %s by command line option\n", | ||
661 | __kpti_forced > 0 ? "ON" : "OFF"); | ||
662 | return __kpti_forced > 0; | ||
663 | } | ||
664 | |||
665 | /* Useful for KASLR robustness */ | ||
666 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) | ||
667 | return true; | ||
668 | |||
669 | return false; | ||
670 | } | ||
671 | |||
672 | static int __init parse_kpti(char *str) | ||
654 | { | 673 | { |
655 | return is_kernel_in_hyp_mode(); | 674 | bool enabled; |
675 | int ret = strtobool(str, &enabled); | ||
676 | |||
677 | if (ret) | ||
678 | return ret; | ||
679 | |||
680 | __kpti_forced = enabled ? 1 : -1; | ||
681 | return 0; | ||
656 | } | 682 | } |
683 | __setup("kpti=", parse_kpti); | ||
684 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | ||
657 | 685 | ||
658 | static const struct arm64_cpu_capabilities arm64_features[] = { | 686 | static const struct arm64_cpu_capabilities arm64_features[] = { |
659 | { | 687 | { |
@@ -707,11 +735,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { | |||
707 | .matches = cpufeature_pan_not_uao, | 735 | .matches = cpufeature_pan_not_uao, |
708 | }, | 736 | }, |
709 | #endif /* CONFIG_ARM64_PAN */ | 737 | #endif /* CONFIG_ARM64_PAN */ |
738 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
710 | { | 739 | { |
711 | .desc = "Virtualization Host Extensions", | 740 | .capability = ARM64_UNMAP_KERNEL_AT_EL0, |
712 | .capability = ARM64_HAS_VIRT_HOST_EXTN, | 741 | .matches = unmap_kernel_at_el0, |
713 | .matches = runs_at_el2, | ||
714 | }, | 742 | }, |
743 | #endif | ||
715 | {}, | 744 | {}, |
716 | }; | 745 | }; |
717 | 746 | ||
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9ff717d93653..739ec3b359b0 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/esr.h> | 29 | #include <asm/esr.h> |
30 | #include <asm/irq.h> | 30 | #include <asm/irq.h> |
31 | #include <asm/memory.h> | 31 | #include <asm/memory.h> |
32 | #include <asm/mmu.h> | ||
32 | #include <asm/ptrace.h> | 33 | #include <asm/ptrace.h> |
33 | #include <asm/thread_info.h> | 34 | #include <asm/thread_info.h> |
34 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
@@ -70,8 +71,31 @@ | |||
70 | #define BAD_FIQ 2 | 71 | #define BAD_FIQ 2 |
71 | #define BAD_ERROR 3 | 72 | #define BAD_ERROR 3 |
72 | 73 | ||
73 | .macro kernel_entry, el, regsize = 64 | 74 | .macro kernel_ventry, el, label, regsize = 64 |
75 | .align 7 | ||
76 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
77 | alternative_if ARM64_UNMAP_KERNEL_AT_EL0 | ||
78 | .if \el == 0 | ||
79 | .if \regsize == 64 | ||
80 | mrs x30, tpidrro_el0 | ||
81 | msr tpidrro_el0, xzr | ||
82 | .else | ||
83 | mov x30, xzr | ||
84 | .endif | ||
85 | .endif | ||
86 | alternative_else_nop_endif | ||
87 | #endif | ||
88 | |||
74 | sub sp, sp, #S_FRAME_SIZE | 89 | sub sp, sp, #S_FRAME_SIZE |
90 | b el\()\el\()_\label | ||
91 | .endm | ||
92 | |||
93 | .macro tramp_alias, dst, sym | ||
94 | mov_q \dst, TRAMP_VALIAS | ||
95 | add \dst, \dst, #(\sym - .entry.tramp.text) | ||
96 | .endm | ||
97 | |||
98 | .macro kernel_entry, el, regsize = 64 | ||
75 | .if \regsize == 32 | 99 | .if \regsize == 32 |
76 | mov w0, w0 // zero upper 32 bits of x0 | 100 | mov w0, w0 // zero upper 32 bits of x0 |
77 | .endif | 101 | .endif |
@@ -141,7 +165,7 @@ alternative_else_nop_endif | |||
141 | 165 | ||
142 | .if \el != 0 | 166 | .if \el != 0 |
143 | mrs x21, ttbr0_el1 | 167 | mrs x21, ttbr0_el1 |
144 | tst x21, #0xffff << 48 // Check for the reserved ASID | 168 | tst x21, #TTBR_ASID_MASK // Check for the reserved ASID |
145 | orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR | 169 | orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR |
146 | b.eq 1f // TTBR0 access already disabled | 170 | b.eq 1f // TTBR0 access already disabled |
147 | and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR | 171 | and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR |
@@ -208,7 +232,7 @@ alternative_else_nop_endif | |||
208 | tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set | 232 | tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set |
209 | .endif | 233 | .endif |
210 | 234 | ||
211 | __uaccess_ttbr0_enable x0 | 235 | __uaccess_ttbr0_enable x0, x1 |
212 | 236 | ||
213 | .if \el == 0 | 237 | .if \el == 0 |
214 | /* | 238 | /* |
@@ -217,7 +241,7 @@ alternative_else_nop_endif | |||
217 | * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache | 241 | * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache |
218 | * corruption). | 242 | * corruption). |
219 | */ | 243 | */ |
220 | post_ttbr0_update_workaround | 244 | bl post_ttbr_update_workaround |
221 | .endif | 245 | .endif |
222 | 1: | 246 | 1: |
223 | .if \el != 0 | 247 | .if \el != 0 |
@@ -229,24 +253,20 @@ alternative_else_nop_endif | |||
229 | .if \el == 0 | 253 | .if \el == 0 |
230 | ldr x23, [sp, #S_SP] // load return stack pointer | 254 | ldr x23, [sp, #S_SP] // load return stack pointer |
231 | msr sp_el0, x23 | 255 | msr sp_el0, x23 |
256 | tst x22, #PSR_MODE32_BIT // native task? | ||
257 | b.eq 3f | ||
258 | |||
232 | #ifdef CONFIG_ARM64_ERRATUM_845719 | 259 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
233 | alternative_if_not ARM64_WORKAROUND_845719 | 260 | alternative_if ARM64_WORKAROUND_845719 |
234 | nop | ||
235 | nop | ||
236 | #ifdef CONFIG_PID_IN_CONTEXTIDR | ||
237 | nop | ||
238 | #endif | ||
239 | alternative_else | ||
240 | tbz x22, #4, 1f | ||
241 | #ifdef CONFIG_PID_IN_CONTEXTIDR | 261 | #ifdef CONFIG_PID_IN_CONTEXTIDR |
242 | mrs x29, contextidr_el1 | 262 | mrs x29, contextidr_el1 |
243 | msr contextidr_el1, x29 | 263 | msr contextidr_el1, x29 |
244 | #else | 264 | #else |
245 | msr contextidr_el1, xzr | 265 | msr contextidr_el1, xzr |
246 | #endif | 266 | #endif |
247 | 1: | 267 | alternative_else_nop_endif |
248 | alternative_endif | ||
249 | #endif | 268 | #endif |
269 | 3: | ||
250 | .endif | 270 | .endif |
251 | 271 | ||
252 | msr elr_el1, x21 // set up the return data | 272 | msr elr_el1, x21 // set up the return data |
@@ -268,7 +288,21 @@ alternative_endif | |||
268 | ldp x28, x29, [sp, #16 * 14] | 288 | ldp x28, x29, [sp, #16 * 14] |
269 | ldr lr, [sp, #S_LR] | 289 | ldr lr, [sp, #S_LR] |
270 | add sp, sp, #S_FRAME_SIZE // restore sp | 290 | add sp, sp, #S_FRAME_SIZE // restore sp |
271 | eret // return to kernel | 291 | |
292 | .if \el == 0 | ||
293 | alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 | ||
294 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
295 | bne 4f | ||
296 | msr far_el1, x30 | ||
297 | tramp_alias x30, tramp_exit_native | ||
298 | br x30 | ||
299 | 4: | ||
300 | tramp_alias x30, tramp_exit_compat | ||
301 | br x30 | ||
302 | #endif | ||
303 | .else | ||
304 | eret | ||
305 | .endif | ||
272 | .endm | 306 | .endm |
273 | 307 | ||
274 | .macro irq_stack_entry | 308 | .macro irq_stack_entry |
@@ -346,31 +380,31 @@ tsk .req x28 // current thread_info | |||
346 | 380 | ||
347 | .align 11 | 381 | .align 11 |
348 | ENTRY(vectors) | 382 | ENTRY(vectors) |
349 | ventry el1_sync_invalid // Synchronous EL1t | 383 | kernel_ventry 1, sync_invalid // Synchronous EL1t |
350 | ventry el1_irq_invalid // IRQ EL1t | 384 | kernel_ventry 1, irq_invalid // IRQ EL1t |
351 | ventry el1_fiq_invalid // FIQ EL1t | 385 | kernel_ventry 1, fiq_invalid // FIQ EL1t |
352 | ventry el1_error_invalid // Error EL1t | 386 | kernel_ventry 1, error_invalid // Error EL1t |
353 | 387 | ||
354 | ventry el1_sync // Synchronous EL1h | 388 | kernel_ventry 1, sync // Synchronous EL1h |
355 | ventry el1_irq // IRQ EL1h | 389 | kernel_ventry 1, irq // IRQ EL1h |
356 | ventry el1_fiq_invalid // FIQ EL1h | 390 | kernel_ventry 1, fiq_invalid // FIQ EL1h |
357 | ventry el1_error_invalid // Error EL1h | 391 | kernel_ventry 1, error_invalid // Error EL1h |
358 | 392 | ||
359 | ventry el0_sync // Synchronous 64-bit EL0 | 393 | kernel_ventry 0, sync // Synchronous 64-bit EL0 |
360 | ventry el0_irq // IRQ 64-bit EL0 | 394 | kernel_ventry 0, irq // IRQ 64-bit EL0 |
361 | ventry el0_fiq_invalid // FIQ 64-bit EL0 | 395 | kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 |
362 | ventry el0_error_invalid // Error 64-bit EL0 | 396 | kernel_ventry 0, error_invalid // Error 64-bit EL0 |
363 | 397 | ||
364 | #ifdef CONFIG_COMPAT | 398 | #ifdef CONFIG_COMPAT |
365 | ventry el0_sync_compat // Synchronous 32-bit EL0 | 399 | kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 |
366 | ventry el0_irq_compat // IRQ 32-bit EL0 | 400 | kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 |
367 | ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 | 401 | kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 |
368 | ventry el0_error_invalid_compat // Error 32-bit EL0 | 402 | kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0 |
369 | #else | 403 | #else |
370 | ventry el0_sync_invalid // Synchronous 32-bit EL0 | 404 | kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 |
371 | ventry el0_irq_invalid // IRQ 32-bit EL0 | 405 | kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 |
372 | ventry el0_fiq_invalid // FIQ 32-bit EL0 | 406 | kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 |
373 | ventry el0_error_invalid // Error 32-bit EL0 | 407 | kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 |
374 | #endif | 408 | #endif |
375 | END(vectors) | 409 | END(vectors) |
376 | 410 | ||
@@ -572,7 +606,7 @@ el0_sync: | |||
572 | cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception | 606 | cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception |
573 | b.eq el0_fpsimd_exc | 607 | b.eq el0_fpsimd_exc |
574 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap | 608 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
575 | b.eq el0_undef | 609 | b.eq el0_sys |
576 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception | 610 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
577 | b.eq el0_sp_pc | 611 | b.eq el0_sp_pc |
578 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception | 612 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
@@ -700,6 +734,16 @@ el0_undef: | |||
700 | mov x0, sp | 734 | mov x0, sp |
701 | bl do_undefinstr | 735 | bl do_undefinstr |
702 | b ret_to_user | 736 | b ret_to_user |
737 | el0_sys: | ||
738 | /* | ||
739 | * System instructions, for trapped cache maintenance instructions | ||
740 | */ | ||
741 | enable_dbg_and_irq | ||
742 | ct_user_exit | ||
743 | mov x0, x25 | ||
744 | mov x1, sp | ||
745 | bl do_sysinstr | ||
746 | b ret_to_user | ||
703 | el0_dbg: | 747 | el0_dbg: |
704 | /* | 748 | /* |
705 | * Debug exception handling | 749 | * Debug exception handling |
@@ -911,7 +955,118 @@ __ni_sys_trace: | |||
911 | bl do_ni_syscall | 955 | bl do_ni_syscall |
912 | b __sys_trace_return | 956 | b __sys_trace_return |
913 | 957 | ||
914 | .popsection // .entry.text | 958 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
959 | /* | ||
960 | * Exception vectors trampoline. | ||
961 | */ | ||
962 | .pushsection ".entry.tramp.text", "ax" | ||
963 | |||
964 | .macro tramp_map_kernel, tmp | ||
965 | mrs \tmp, ttbr1_el1 | ||
966 | sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) | ||
967 | bic \tmp, \tmp, #USER_ASID_FLAG | ||
968 | msr ttbr1_el1, \tmp | ||
969 | #ifdef CONFIG_ARCH_MSM8996 | ||
970 | /* ASID already in \tmp[63:48] */ | ||
971 | movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) | ||
972 | movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) | ||
973 | /* 2MB boundary containing the vectors, so we nobble the walk cache */ | ||
974 | movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) | ||
975 | isb | ||
976 | tlbi vae1, \tmp | ||
977 | dsb nsh | ||
978 | #endif /* CONFIG_ARCH_MSM8996 */ | ||
979 | .endm | ||
980 | |||
981 | .macro tramp_unmap_kernel, tmp | ||
982 | mrs \tmp, ttbr1_el1 | ||
983 | add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) | ||
984 | orr \tmp, \tmp, #USER_ASID_FLAG | ||
985 | msr ttbr1_el1, \tmp | ||
986 | /* | ||
987 | * We avoid running the post_ttbr_update_workaround here because the | ||
988 | * user and kernel ASIDs don't have conflicting mappings, so any | ||
989 | * "blessing" as described in: | ||
990 | * | ||
991 | * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com | ||
992 | * | ||
993 | * will not hurt correctness. Whilst this may partially defeat the | ||
994 | * point of using split ASIDs in the first place, it avoids | ||
995 | * the hit of invalidating the entire I-cache on every return to | ||
996 | * userspace. | ||
997 | */ | ||
998 | .endm | ||
999 | |||
1000 | .macro tramp_ventry, regsize = 64 | ||
1001 | .align 7 | ||
1002 | 1: | ||
1003 | .if \regsize == 64 | ||
1004 | msr tpidrro_el0, x30 // Restored in kernel_ventry | ||
1005 | .endif | ||
1006 | bl 2f | ||
1007 | b . | ||
1008 | 2: | ||
1009 | tramp_map_kernel x30 | ||
1010 | #ifdef CONFIG_RANDOMIZE_BASE | ||
1011 | adr x30, tramp_vectors + PAGE_SIZE | ||
1012 | #ifndef CONFIG_ARCH_MSM8996 | ||
1013 | isb | ||
1014 | #endif | ||
1015 | ldr x30, [x30] | ||
1016 | #else | ||
1017 | ldr x30, =vectors | ||
1018 | #endif | ||
1019 | prfm plil1strm, [x30, #(1b - tramp_vectors)] | ||
1020 | msr vbar_el1, x30 | ||
1021 | add x30, x30, #(1b - tramp_vectors) | ||
1022 | isb | ||
1023 | ret | ||
1024 | .endm | ||
1025 | |||
1026 | .macro tramp_exit, regsize = 64 | ||
1027 | adr x30, tramp_vectors | ||
1028 | msr vbar_el1, x30 | ||
1029 | tramp_unmap_kernel x30 | ||
1030 | .if \regsize == 64 | ||
1031 | mrs x30, far_el1 | ||
1032 | .endif | ||
1033 | eret | ||
1034 | .endm | ||
1035 | |||
1036 | .align 11 | ||
1037 | ENTRY(tramp_vectors) | ||
1038 | .space 0x400 | ||
1039 | |||
1040 | tramp_ventry | ||
1041 | tramp_ventry | ||
1042 | tramp_ventry | ||
1043 | tramp_ventry | ||
1044 | |||
1045 | tramp_ventry 32 | ||
1046 | tramp_ventry 32 | ||
1047 | tramp_ventry 32 | ||
1048 | tramp_ventry 32 | ||
1049 | END(tramp_vectors) | ||
1050 | |||
1051 | ENTRY(tramp_exit_native) | ||
1052 | tramp_exit | ||
1053 | END(tramp_exit_native) | ||
1054 | |||
1055 | ENTRY(tramp_exit_compat) | ||
1056 | tramp_exit 32 | ||
1057 | END(tramp_exit_compat) | ||
1058 | |||
1059 | .ltorg | ||
1060 | .popsection // .entry.tramp.text | ||
1061 | #ifdef CONFIG_RANDOMIZE_BASE | ||
1062 | .pushsection ".rodata", "a" | ||
1063 | .align PAGE_SHIFT | ||
1064 | .globl __entry_tramp_data_start | ||
1065 | __entry_tramp_data_start: | ||
1066 | .quad vectors | ||
1067 | .popsection // .rodata | ||
1068 | #endif /* CONFIG_RANDOMIZE_BASE */ | ||
1069 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | ||
915 | 1070 | ||
916 | /* | 1071 | /* |
917 | * Special system call wrappers. | 1072 | * Special system call wrappers. |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 5ac462f222f8..dfbabc0e274b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -376,19 +376,17 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
376 | 376 | ||
377 | static void tls_thread_switch(struct task_struct *next) | 377 | static void tls_thread_switch(struct task_struct *next) |
378 | { | 378 | { |
379 | unsigned long tpidr, tpidrro; | 379 | unsigned long tpidr; |
380 | 380 | ||
381 | asm("mrs %0, tpidr_el0" : "=r" (tpidr)); | 381 | asm("mrs %0, tpidr_el0" : "=r" (tpidr)); |
382 | *task_user_tls(current) = tpidr; | 382 | *task_user_tls(current) = tpidr; |
383 | 383 | ||
384 | tpidr = *task_user_tls(next); | 384 | if (is_compat_thread(task_thread_info(next))) |
385 | tpidrro = is_compat_thread(task_thread_info(next)) ? | 385 | write_sysreg(next->thread.tp_value, tpidrro_el0); |
386 | next->thread.tp_value : 0; | 386 | else if (!arm64_kernel_unmapped_at_el0()) |
387 | write_sysreg(0, tpidrro_el0); | ||
387 | 388 | ||
388 | asm( | 389 | write_sysreg(*task_user_tls(next), tpidr_el0); |
389 | " msr tpidr_el0, %0\n" | ||
390 | " msr tpidrro_el0, %1" | ||
391 | : : "r" (tpidr), "r" (tpidrro)); | ||
392 | } | 390 | } |
393 | 391 | ||
394 | /* Restore the UAO state depending on next's addr_limit */ | 392 | /* Restore the UAO state depending on next's addr_limit */ |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 9278a2bf9596..d421b5681754 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/syscalls.h> | 33 | #include <linux/syscalls.h> |
34 | 34 | ||
35 | #include <asm/atomic.h> | 35 | #include <asm/atomic.h> |
36 | #include <asm/barrier.h> | ||
36 | #include <asm/bug.h> | 37 | #include <asm/bug.h> |
37 | #include <asm/debug-monitors.h> | 38 | #include <asm/debug-monitors.h> |
38 | #include <asm/esr.h> | 39 | #include <asm/esr.h> |
@@ -404,6 +405,38 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | |||
404 | arm64_notify_die("Oops - undefined instruction", regs, &info, 0); | 405 | arm64_notify_die("Oops - undefined instruction", regs, &info, 0); |
405 | } | 406 | } |
406 | 407 | ||
408 | static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) | ||
409 | { | ||
410 | int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; | ||
411 | |||
412 | isb(); | ||
413 | if (rt != 31) | ||
414 | regs->regs[rt] = arch_counter_get_cntvct(); | ||
415 | regs->pc += 4; | ||
416 | } | ||
417 | |||
418 | static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) | ||
419 | { | ||
420 | int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; | ||
421 | |||
422 | if (rt != 31) | ||
423 | regs->regs[rt] = read_sysreg(cntfrq_el0); | ||
424 | regs->pc += 4; | ||
425 | } | ||
426 | |||
427 | asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) | ||
428 | { | ||
429 | if ((esr & ESR_ELx_SYS64_ISS_SYS_OP_MASK) == ESR_ELx_SYS64_ISS_SYS_CNTVCT) { | ||
430 | cntvct_read_handler(esr, regs); | ||
431 | return; | ||
432 | } else if ((esr & ESR_ELx_SYS64_ISS_SYS_OP_MASK) == ESR_ELx_SYS64_ISS_SYS_CNTFRQ) { | ||
433 | cntfrq_read_handler(esr, regs); | ||
434 | return; | ||
435 | } | ||
436 | |||
437 | do_undefinstr(regs); | ||
438 | } | ||
439 | |||
407 | long compat_arm_syscall(struct pt_regs *regs); | 440 | long compat_arm_syscall(struct pt_regs *regs); |
408 | 441 | ||
409 | asmlinkage long do_ni_syscall(struct pt_regs *regs) | 442 | asmlinkage long do_ni_syscall(struct pt_regs *regs) |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index eb8a7ebe31c3..c49760d5b6c9 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -46,14 +46,15 @@ jiffies = jiffies_64; | |||
46 | *(.idmap.text) \ | 46 | *(.idmap.text) \ |
47 | VMLINUX_SYMBOL(__idmap_text_end) = .; | 47 | VMLINUX_SYMBOL(__idmap_text_end) = .; |
48 | 48 | ||
49 | #ifdef CONFIG_HIBERNATION | 49 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
50 | #define HIBERNATE_TEXT \ | 50 | #define TRAMP_TEXT \ |
51 | . = ALIGN(SZ_4K); \ | 51 | . = ALIGN(PAGE_SIZE); \ |
52 | VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\ | 52 | VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \ |
53 | *(.hibernate_exit.text) \ | 53 | *(.entry.tramp.text) \ |
54 | VMLINUX_SYMBOL(__hibernate_exit_text_end) = .; | 54 | . = ALIGN(PAGE_SIZE); \ |
55 | VMLINUX_SYMBOL(__entry_tramp_text_end) = .; | ||
55 | #else | 56 | #else |
56 | #define HIBERNATE_TEXT | 57 | #define TRAMP_TEXT |
57 | #endif | 58 | #endif |
58 | 59 | ||
59 | /* | 60 | /* |
@@ -125,7 +126,7 @@ SECTIONS | |||
125 | KPROBES_TEXT | 126 | KPROBES_TEXT |
126 | HYPERVISOR_TEXT | 127 | HYPERVISOR_TEXT |
127 | IDMAP_TEXT | 128 | IDMAP_TEXT |
128 | HIBERNATE_TEXT | 129 | TRAMP_TEXT |
129 | *(.fixup) | 130 | *(.fixup) |
130 | *(.gnu.warning) | 131 | *(.gnu.warning) |
131 | . = ALIGN(16); | 132 | . = ALIGN(16); |
@@ -200,6 +201,11 @@ SECTIONS | |||
200 | . += RESERVED_TTBR0_SIZE; | 201 | . += RESERVED_TTBR0_SIZE; |
201 | #endif | 202 | #endif |
202 | 203 | ||
204 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
205 | tramp_pg_dir = .; | ||
206 | . += PAGE_SIZE; | ||
207 | #endif | ||
208 | |||
203 | _end = .; | 209 | _end = .; |
204 | 210 | ||
205 | STABS_DEBUG | 211 | STABS_DEBUG |
@@ -220,6 +226,10 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) | |||
220 | <= SZ_4K, "Hibernate exit text too big or misaligned") | 226 | <= SZ_4K, "Hibernate exit text too big or misaligned") |
221 | #endif | 227 | #endif |
222 | 228 | ||
229 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
230 | ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, | ||
231 | "Entry trampoline text too big") | ||
232 | #endif | ||
223 | /* | 233 | /* |
224 | * If padding is applied before .head.text, virt<->phys conversions will fail. | 234 | * If padding is applied before .head.text, virt<->phys conversions will fail. |
225 | */ | 235 | */ |
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index e47f9bc71079..51abbd1d98e3 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -43,7 +43,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
43 | 43 | ||
44 | ret = kvm_psci_call(vcpu); | 44 | ret = kvm_psci_call(vcpu); |
45 | if (ret < 0) { | 45 | if (ret < 0) { |
46 | kvm_inject_undefined(vcpu); | 46 | vcpu_set_reg(vcpu, 0, ~0UL); |
47 | return 1; | 47 | return 1; |
48 | } | 48 | } |
49 | 49 | ||
@@ -52,7 +52,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
52 | 52 | ||
53 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | 53 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
54 | { | 54 | { |
55 | kvm_inject_undefined(vcpu); | 55 | vcpu_set_reg(vcpu, 0, ~0UL); |
56 | return 1; | 56 | return 1; |
57 | } | 57 | } |
58 | 58 | ||
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index d7150e30438a..07c7ad97ee28 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S | |||
@@ -30,7 +30,7 @@ | |||
30 | * Alignment fixed up by hardware. | 30 | * Alignment fixed up by hardware. |
31 | */ | 31 | */ |
32 | ENTRY(__clear_user) | 32 | ENTRY(__clear_user) |
33 | uaccess_enable_not_uao x2, x3 | 33 | uaccess_enable_not_uao x2, x3, x4 |
34 | mov x2, x1 // save the size for fixup return | 34 | mov x2, x1 // save the size for fixup return |
35 | subs x1, x1, #8 | 35 | subs x1, x1, #8 |
36 | b.mi 2f | 36 | b.mi 2f |
@@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 | |||
50 | b.mi 5f | 50 | b.mi 5f |
51 | uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 | 51 | uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 |
52 | 5: mov x0, #0 | 52 | 5: mov x0, #0 |
53 | uaccess_disable_not_uao x2 | 53 | uaccess_disable_not_uao x2, x3 |
54 | ret | 54 | ret |
55 | ENDPROC(__clear_user) | 55 | ENDPROC(__clear_user) |
56 | 56 | ||
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 90154f3f7f2a..683adc358be7 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S | |||
@@ -64,10 +64,10 @@ | |||
64 | 64 | ||
65 | end .req x5 | 65 | end .req x5 |
66 | ENTRY(__arch_copy_from_user) | 66 | ENTRY(__arch_copy_from_user) |
67 | uaccess_enable_not_uao x3, x4 | 67 | uaccess_enable_not_uao x3, x4, x5 |
68 | add end, x0, x2 | 68 | add end, x0, x2 |
69 | #include "copy_template.S" | 69 | #include "copy_template.S" |
70 | uaccess_disable_not_uao x3 | 70 | uaccess_disable_not_uao x3, x4 |
71 | mov x0, #0 // Nothing to copy | 71 | mov x0, #0 // Nothing to copy |
72 | ret | 72 | ret |
73 | ENDPROC(__arch_copy_from_user) | 73 | ENDPROC(__arch_copy_from_user) |
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 718b1c4e2f85..e8bfaf19f778 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S | |||
@@ -65,10 +65,10 @@ | |||
65 | 65 | ||
66 | end .req x5 | 66 | end .req x5 |
67 | ENTRY(__copy_in_user) | 67 | ENTRY(__copy_in_user) |
68 | uaccess_enable_not_uao x3, x4 | 68 | uaccess_enable_not_uao x3, x4, x5 |
69 | add end, x0, x2 | 69 | add end, x0, x2 |
70 | #include "copy_template.S" | 70 | #include "copy_template.S" |
71 | uaccess_disable_not_uao x3 | 71 | uaccess_disable_not_uao x3, x4 |
72 | mov x0, #0 | 72 | mov x0, #0 |
73 | ret | 73 | ret |
74 | ENDPROC(__copy_in_user) | 74 | ENDPROC(__copy_in_user) |
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index e99e31c9acac..f6cfcc0441de 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S | |||
@@ -63,10 +63,10 @@ | |||
63 | 63 | ||
64 | end .req x5 | 64 | end .req x5 |
65 | ENTRY(__arch_copy_to_user) | 65 | ENTRY(__arch_copy_to_user) |
66 | uaccess_enable_not_uao x3, x4 | 66 | uaccess_enable_not_uao x3, x4, x5 |
67 | add end, x0, x2 | 67 | add end, x0, x2 |
68 | #include "copy_template.S" | 68 | #include "copy_template.S" |
69 | uaccess_disable_not_uao x3 | 69 | uaccess_disable_not_uao x3, x4 |
70 | mov x0, #0 | 70 | mov x0, #0 |
71 | ret | 71 | ret |
72 | ENDPROC(__arch_copy_to_user) | 72 | ENDPROC(__arch_copy_to_user) |
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 3be2cda5dbda..e5091d9cceb6 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S | |||
@@ -49,7 +49,7 @@ ENTRY(flush_icache_range) | |||
49 | * - end - virtual end address of region | 49 | * - end - virtual end address of region |
50 | */ | 50 | */ |
51 | ENTRY(__flush_cache_user_range) | 51 | ENTRY(__flush_cache_user_range) |
52 | uaccess_ttbr0_enable x2, x3 | 52 | uaccess_ttbr0_enable x2, x3, x4 |
53 | dcache_line_size x2, x3 | 53 | dcache_line_size x2, x3 |
54 | sub x3, x2, #1 | 54 | sub x3, x2, #1 |
55 | bic x4, x0, x3 | 55 | bic x4, x0, x3 |
@@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU | |||
72 | isb | 72 | isb |
73 | mov x0, #0 | 73 | mov x0, #0 |
74 | 1: | 74 | 1: |
75 | uaccess_ttbr0_disable x1 | 75 | uaccess_ttbr0_disable x1, x2 |
76 | ret | 76 | ret |
77 | 9: | 77 | 9: |
78 | mov x0, #-EFAULT | 78 | mov x0, #-EFAULT |
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 25128089c386..b3d4a7153617 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c | |||
@@ -38,7 +38,16 @@ static cpumask_t tlb_flush_pending; | |||
38 | 38 | ||
39 | #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) | 39 | #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) |
40 | #define ASID_FIRST_VERSION (1UL << asid_bits) | 40 | #define ASID_FIRST_VERSION (1UL << asid_bits) |
41 | #define NUM_USER_ASIDS ASID_FIRST_VERSION | 41 | |
42 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
43 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) | ||
44 | #define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) | ||
45 | #define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) | ||
46 | #else | ||
47 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION) | ||
48 | #define asid2idx(asid) ((asid) & ~ASID_MASK) | ||
49 | #define idx2asid(idx) asid2idx(idx) | ||
50 | #endif | ||
42 | 51 | ||
43 | static void flush_context(unsigned int cpu) | 52 | static void flush_context(unsigned int cpu) |
44 | { | 53 | { |
@@ -65,7 +74,7 @@ static void flush_context(unsigned int cpu) | |||
65 | */ | 74 | */ |
66 | if (asid == 0) | 75 | if (asid == 0) |
67 | asid = per_cpu(reserved_asids, i); | 76 | asid = per_cpu(reserved_asids, i); |
68 | __set_bit(asid & ~ASID_MASK, asid_map); | 77 | __set_bit(asid2idx(asid), asid_map); |
69 | per_cpu(reserved_asids, i) = asid; | 78 | per_cpu(reserved_asids, i) = asid; |
70 | } | 79 | } |
71 | 80 | ||
@@ -120,16 +129,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
120 | * We had a valid ASID in a previous life, so try to re-use | 129 | * We had a valid ASID in a previous life, so try to re-use |
121 | * it if possible. | 130 | * it if possible. |
122 | */ | 131 | */ |
123 | asid &= ~ASID_MASK; | 132 | if (!__test_and_set_bit(asid2idx(asid), asid_map)) |
124 | if (!__test_and_set_bit(asid, asid_map)) | ||
125 | return newasid; | 133 | return newasid; |
126 | } | 134 | } |
127 | 135 | ||
128 | /* | 136 | /* |
129 | * Allocate a free ASID. If we can't find one, take a note of the | 137 | * Allocate a free ASID. If we can't find one, take a note of the |
130 | * currently active ASIDs and mark the TLBs as requiring flushes. | 138 | * currently active ASIDs and mark the TLBs as requiring flushes. We |
131 | * We always count from ASID #1, as we use ASID #0 when setting a | 139 | * always count from ASID #2 (index 1), as we use ASID #0 when setting |
132 | * reserved TTBR0 for the init_mm. | 140 | * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd |
141 | * pairs. | ||
133 | */ | 142 | */ |
134 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); | 143 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); |
135 | if (asid != NUM_USER_ASIDS) | 144 | if (asid != NUM_USER_ASIDS) |
@@ -146,7 +155,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
146 | set_asid: | 155 | set_asid: |
147 | __set_bit(asid, asid_map); | 156 | __set_bit(asid, asid_map); |
148 | cur_idx = asid; | 157 | cur_idx = asid; |
149 | return asid | generation; | 158 | return idx2asid(asid) | generation; |
150 | } | 159 | } |
151 | 160 | ||
152 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) | 161 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) |
@@ -190,6 +199,15 @@ switch_mm_fastpath: | |||
190 | cpu_switch_mm(mm->pgd, mm); | 199 | cpu_switch_mm(mm->pgd, mm); |
191 | } | 200 | } |
192 | 201 | ||
202 | /* Errata workaround post TTBRx_EL1 update. */ | ||
203 | asmlinkage void post_ttbr_update_workaround(void) | ||
204 | { | ||
205 | asm(ALTERNATIVE("nop; nop; nop", | ||
206 | "ic iallu; dsb nsh; isb", | ||
207 | ARM64_WORKAROUND_CAVIUM_27456, | ||
208 | CONFIG_CAVIUM_ERRATUM_27456)); | ||
209 | } | ||
210 | |||
193 | static int asids_init(void) | 211 | static int asids_init(void) |
194 | { | 212 | { |
195 | int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4); | 213 | int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4); |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 74ac8a90ba3f..0d3306ff2065 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -427,6 +427,7 @@ void __init arm64_memblock_init(void) | |||
427 | 427 | ||
428 | reserve_elfcorehdr(); | 428 | reserve_elfcorehdr(); |
429 | 429 | ||
430 | high_memory = __va(memblock_end_of_DRAM() - 1) + 1; | ||
430 | dma_contiguous_reserve(arm64_dma_phys_limit); | 431 | dma_contiguous_reserve(arm64_dma_phys_limit); |
431 | 432 | ||
432 | memblock_allow_resize(); | 433 | memblock_allow_resize(); |
@@ -451,7 +452,6 @@ void __init bootmem_init(void) | |||
451 | sparse_init(); | 452 | sparse_init(); |
452 | zone_sizes_init(min, max); | 453 | zone_sizes_init(min, max); |
453 | 454 | ||
454 | high_memory = __va((max << PAGE_SHIFT) - 1) + 1; | ||
455 | max_pfn = max_low_pfn = max; | 455 | max_pfn = max_low_pfn = max; |
456 | } | 456 | } |
457 | 457 | ||
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f8a48efac468..77069315a4f0 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -483,6 +483,37 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, | |||
483 | vm_area_add_early(vma); | 483 | vm_area_add_early(vma); |
484 | } | 484 | } |
485 | 485 | ||
486 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
487 | static int __init map_entry_trampoline(void) | ||
488 | { | ||
489 | extern char __entry_tramp_text_start[]; | ||
490 | |||
491 | pgprot_t prot = PAGE_KERNEL_EXEC; | ||
492 | phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); | ||
493 | |||
494 | /* The trampoline is always mapped and can therefore be global */ | ||
495 | pgprot_val(prot) &= ~PTE_NG; | ||
496 | |||
497 | /* Map only the text into the trampoline page table */ | ||
498 | memset(tramp_pg_dir, 0, PGD_SIZE); | ||
499 | __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, | ||
500 | prot, late_pgtable_alloc); | ||
501 | |||
502 | /* Map both the text and data into the kernel page table */ | ||
503 | __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); | ||
504 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { | ||
505 | extern char __entry_tramp_data_start[]; | ||
506 | |||
507 | __set_fixmap(FIX_ENTRY_TRAMP_DATA, | ||
508 | __pa_symbol(__entry_tramp_data_start), | ||
509 | PAGE_KERNEL_RO); | ||
510 | } | ||
511 | |||
512 | return 0; | ||
513 | } | ||
514 | core_initcall(map_entry_trampoline); | ||
515 | #endif | ||
516 | |||
486 | /* | 517 | /* |
487 | * Create fine-grained mappings for the kernel. | 518 | * Create fine-grained mappings for the kernel. |
488 | */ | 519 | */ |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 0e7324d8d2ce..85f3c0c68992 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -136,12 +136,17 @@ ENDPROC(cpu_do_resume) | |||
136 | * - pgd_phys - physical address of new TTB | 136 | * - pgd_phys - physical address of new TTB |
137 | */ | 137 | */ |
138 | ENTRY(cpu_do_switch_mm) | 138 | ENTRY(cpu_do_switch_mm) |
139 | mrs x2, ttbr1_el1 | ||
139 | mmid x1, x1 // get mm->context.id | 140 | mmid x1, x1 // get mm->context.id |
140 | bfi x0, x1, #48, #16 // set the ASID | 141 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
141 | msr ttbr0_el1, x0 // set TTBR0 | 142 | bfi x0, x1, #48, #16 // set the ASID field in TTBR0 |
143 | #endif | ||
144 | bfi x2, x1, #48, #16 // set the ASID | ||
145 | msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) | ||
142 | isb | 146 | isb |
143 | post_ttbr0_update_workaround | 147 | msr ttbr0_el1, x0 // now update TTBR0 |
144 | ret | 148 | isb |
149 | b post_ttbr_update_workaround // Back to C code... | ||
145 | ENDPROC(cpu_do_switch_mm) | 150 | ENDPROC(cpu_do_switch_mm) |
146 | 151 | ||
147 | .pushsection ".idmap.text", "ax" | 152 | .pushsection ".idmap.text", "ax" |
@@ -221,7 +226,7 @@ ENTRY(__cpu_setup) | |||
221 | * both user and kernel. | 226 | * both user and kernel. |
222 | */ | 227 | */ |
223 | ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ | 228 | ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ |
224 | TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | 229 | TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 |
225 | tcr_set_idmap_t0sz x10, x9 | 230 | tcr_set_idmap_t0sz x10, x9 |
226 | 231 | ||
227 | /* | 232 | /* |
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index b96db5dafec4..a396beb7829b 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S | |||
@@ -98,12 +98,12 @@ ENTRY(privcmd_call) | |||
98 | * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation | 98 | * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation |
99 | * is enabled (it implies that hardware UAO and PAN disabled). | 99 | * is enabled (it implies that hardware UAO and PAN disabled). |
100 | */ | 100 | */ |
101 | uaccess_ttbr0_enable x6, x7 | 101 | uaccess_ttbr0_enable x6, x7, x8 |
102 | hvc XEN_IMM | 102 | hvc XEN_IMM |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * Disable userspace access from kernel once the hyp call completed. | 105 | * Disable userspace access from kernel once the hyp call completed. |
106 | */ | 106 | */ |
107 | uaccess_ttbr0_disable x6 | 107 | uaccess_ttbr0_disable x6, x7 |
108 | ret | 108 | ret |
109 | ENDPROC(privcmd_call); | 109 | ENDPROC(privcmd_call); |
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 3446b6fb3acb..9da4e2292fc7 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c | |||
@@ -576,7 +576,7 @@ static int __init ar7_register_uarts(void) | |||
576 | uart_port.type = PORT_AR7; | 576 | uart_port.type = PORT_AR7; |
577 | uart_port.uartclk = clk_get_rate(bus_clk) / 2; | 577 | uart_port.uartclk = clk_get_rate(bus_clk) / 2; |
578 | uart_port.iotype = UPIO_MEM32; | 578 | uart_port.iotype = UPIO_MEM32; |
579 | uart_port.flags = UPF_FIXED_TYPE; | 579 | uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF; |
580 | uart_port.regshift = 2; | 580 | uart_port.regshift = 2; |
581 | 581 | ||
582 | uart_port.line = 0; | 582 | uart_port.line = 0; |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 163b3449a8de..fcbc4e57d765 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -664,6 +664,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) | |||
664 | unsigned long switch_count; | 664 | unsigned long switch_count; |
665 | struct task_struct *t; | 665 | struct task_struct *t; |
666 | 666 | ||
667 | /* If nothing to change, return right away, successfully. */ | ||
668 | if (value == mips_get_process_fp_mode(task)) | ||
669 | return 0; | ||
670 | |||
671 | /* Only accept a mode change if 64-bit FP enabled for o32. */ | ||
672 | if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) | ||
673 | return -EOPNOTSUPP; | ||
674 | |||
675 | /* And only for o32 tasks. */ | ||
676 | if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS)) | ||
677 | return -EOPNOTSUPP; | ||
678 | |||
667 | /* Check the value is valid */ | 679 | /* Check the value is valid */ |
668 | if (value & ~known_bits) | 680 | if (value & ~known_bits) |
669 | return -EOPNOTSUPP; | 681 | return -EOPNOTSUPP; |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index a3f38e6b7ea1..c3d2d2c05fdb 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -439,63 +439,160 @@ static int gpr64_set(struct task_struct *target, | |||
439 | 439 | ||
440 | #endif /* CONFIG_64BIT */ | 440 | #endif /* CONFIG_64BIT */ |
441 | 441 | ||
442 | /* | ||
443 | * Copy the floating-point context to the supplied NT_PRFPREG buffer, | ||
444 | * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots | ||
445 | * correspond 1:1 to buffer slots. Only general registers are copied. | ||
446 | */ | ||
447 | static int fpr_get_fpa(struct task_struct *target, | ||
448 | unsigned int *pos, unsigned int *count, | ||
449 | void **kbuf, void __user **ubuf) | ||
450 | { | ||
451 | return user_regset_copyout(pos, count, kbuf, ubuf, | ||
452 | &target->thread.fpu, | ||
453 | 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); | ||
454 | } | ||
455 | |||
456 | /* | ||
457 | * Copy the floating-point context to the supplied NT_PRFPREG buffer, | ||
458 | * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's | ||
459 | * general register slots are copied to buffer slots. Only general | ||
460 | * registers are copied. | ||
461 | */ | ||
462 | static int fpr_get_msa(struct task_struct *target, | ||
463 | unsigned int *pos, unsigned int *count, | ||
464 | void **kbuf, void __user **ubuf) | ||
465 | { | ||
466 | unsigned int i; | ||
467 | u64 fpr_val; | ||
468 | int err; | ||
469 | |||
470 | BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); | ||
471 | for (i = 0; i < NUM_FPU_REGS; i++) { | ||
472 | fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); | ||
473 | err = user_regset_copyout(pos, count, kbuf, ubuf, | ||
474 | &fpr_val, i * sizeof(elf_fpreg_t), | ||
475 | (i + 1) * sizeof(elf_fpreg_t)); | ||
476 | if (err) | ||
477 | return err; | ||
478 | } | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * Copy the floating-point context to the supplied NT_PRFPREG buffer. | ||
485 | * Choose the appropriate helper for general registers, and then copy | ||
486 | * the FCSR register separately. | ||
487 | */ | ||
442 | static int fpr_get(struct task_struct *target, | 488 | static int fpr_get(struct task_struct *target, |
443 | const struct user_regset *regset, | 489 | const struct user_regset *regset, |
444 | unsigned int pos, unsigned int count, | 490 | unsigned int pos, unsigned int count, |
445 | void *kbuf, void __user *ubuf) | 491 | void *kbuf, void __user *ubuf) |
446 | { | 492 | { |
447 | unsigned i; | 493 | const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); |
448 | int err; | 494 | int err; |
449 | u64 fpr_val; | ||
450 | 495 | ||
451 | /* XXX fcr31 */ | 496 | if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) |
497 | err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf); | ||
498 | else | ||
499 | err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf); | ||
500 | if (err) | ||
501 | return err; | ||
452 | 502 | ||
453 | if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) | 503 | err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
454 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 504 | &target->thread.fpu.fcr31, |
455 | &target->thread.fpu, | 505 | fcr31_pos, fcr31_pos + sizeof(u32)); |
456 | 0, sizeof(elf_fpregset_t)); | ||
457 | 506 | ||
458 | for (i = 0; i < NUM_FPU_REGS; i++) { | 507 | return err; |
459 | fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); | 508 | } |
460 | err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 509 | |
461 | &fpr_val, i * sizeof(elf_fpreg_t), | 510 | /* |
462 | (i + 1) * sizeof(elf_fpreg_t)); | 511 | * Copy the supplied NT_PRFPREG buffer to the floating-point context, |
512 | * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP | ||
513 | * context's general register slots. Only general registers are copied. | ||
514 | */ | ||
515 | static int fpr_set_fpa(struct task_struct *target, | ||
516 | unsigned int *pos, unsigned int *count, | ||
517 | const void **kbuf, const void __user **ubuf) | ||
518 | { | ||
519 | return user_regset_copyin(pos, count, kbuf, ubuf, | ||
520 | &target->thread.fpu, | ||
521 | 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); | ||
522 | } | ||
523 | |||
524 | /* | ||
525 | * Copy the supplied NT_PRFPREG buffer to the floating-point context, | ||
526 | * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 | ||
527 | * bits only of FP context's general register slots. Only general | ||
528 | * registers are copied. | ||
529 | */ | ||
530 | static int fpr_set_msa(struct task_struct *target, | ||
531 | unsigned int *pos, unsigned int *count, | ||
532 | const void **kbuf, const void __user **ubuf) | ||
533 | { | ||
534 | unsigned int i; | ||
535 | u64 fpr_val; | ||
536 | int err; | ||
537 | |||
538 | BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); | ||
539 | for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { | ||
540 | err = user_regset_copyin(pos, count, kbuf, ubuf, | ||
541 | &fpr_val, i * sizeof(elf_fpreg_t), | ||
542 | (i + 1) * sizeof(elf_fpreg_t)); | ||
463 | if (err) | 543 | if (err) |
464 | return err; | 544 | return err; |
545 | set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); | ||
465 | } | 546 | } |
466 | 547 | ||
467 | return 0; | 548 | return 0; |
468 | } | 549 | } |
469 | 550 | ||
551 | /* | ||
552 | * Copy the supplied NT_PRFPREG buffer to the floating-point context. | ||
553 | * Choose the appropriate helper for general registers, and then copy | ||
554 | * the FCSR register separately. | ||
555 | * | ||
556 | * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', | ||
557 | * which is supposed to have been guaranteed by the kernel before | ||
558 | * calling us, e.g. in `ptrace_regset'. We enforce that requirement, | ||
559 | * so that we can safely avoid preinitializing temporaries for | ||
560 | * partial register writes. | ||
561 | */ | ||
470 | static int fpr_set(struct task_struct *target, | 562 | static int fpr_set(struct task_struct *target, |
471 | const struct user_regset *regset, | 563 | const struct user_regset *regset, |
472 | unsigned int pos, unsigned int count, | 564 | unsigned int pos, unsigned int count, |
473 | const void *kbuf, const void __user *ubuf) | 565 | const void *kbuf, const void __user *ubuf) |
474 | { | 566 | { |
475 | unsigned i; | 567 | const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); |
568 | u32 fcr31; | ||
476 | int err; | 569 | int err; |
477 | u64 fpr_val; | ||
478 | 570 | ||
479 | /* XXX fcr31 */ | 571 | BUG_ON(count % sizeof(elf_fpreg_t)); |
572 | |||
573 | if (pos + count > sizeof(elf_fpregset_t)) | ||
574 | return -EIO; | ||
480 | 575 | ||
481 | init_fp_ctx(target); | 576 | init_fp_ctx(target); |
482 | 577 | ||
483 | if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) | 578 | if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) |
484 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 579 | err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); |
485 | &target->thread.fpu, | 580 | else |
486 | 0, sizeof(elf_fpregset_t)); | 581 | err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); |
582 | if (err) | ||
583 | return err; | ||
487 | 584 | ||
488 | BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); | 585 | if (count > 0) { |
489 | for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) { | ||
490 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 586 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
491 | &fpr_val, i * sizeof(elf_fpreg_t), | 587 | &fcr31, |
492 | (i + 1) * sizeof(elf_fpreg_t)); | 588 | fcr31_pos, fcr31_pos + sizeof(u32)); |
493 | if (err) | 589 | if (err) |
494 | return err; | 590 | return err; |
495 | set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); | 591 | |
592 | ptrace_setfcr31(target, fcr31); | ||
496 | } | 593 | } |
497 | 594 | ||
498 | return 0; | 595 | return err; |
499 | } | 596 | } |
500 | 597 | ||
501 | enum mips_regset { | 598 | enum mips_regset { |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index dd058aa8a3b5..89d05de8040a 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -1777,7 +1777,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, | |||
1777 | SPFROMREG(fs, MIPSInst_FS(ir)); | 1777 | SPFROMREG(fs, MIPSInst_FS(ir)); |
1778 | SPFROMREG(fd, MIPSInst_FD(ir)); | 1778 | SPFROMREG(fd, MIPSInst_FD(ir)); |
1779 | rv.s = ieee754sp_maddf(fd, fs, ft); | 1779 | rv.s = ieee754sp_maddf(fd, fs, ft); |
1780 | break; | 1780 | goto copcsr; |
1781 | } | 1781 | } |
1782 | 1782 | ||
1783 | case fmsubf_op: { | 1783 | case fmsubf_op: { |
@@ -1790,7 +1790,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, | |||
1790 | SPFROMREG(fs, MIPSInst_FS(ir)); | 1790 | SPFROMREG(fs, MIPSInst_FS(ir)); |
1791 | SPFROMREG(fd, MIPSInst_FD(ir)); | 1791 | SPFROMREG(fd, MIPSInst_FD(ir)); |
1792 | rv.s = ieee754sp_msubf(fd, fs, ft); | 1792 | rv.s = ieee754sp_msubf(fd, fs, ft); |
1793 | break; | 1793 | goto copcsr; |
1794 | } | 1794 | } |
1795 | 1795 | ||
1796 | case frint_op: { | 1796 | case frint_op: { |
@@ -1814,7 +1814,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, | |||
1814 | SPFROMREG(fs, MIPSInst_FS(ir)); | 1814 | SPFROMREG(fs, MIPSInst_FS(ir)); |
1815 | rv.w = ieee754sp_2008class(fs); | 1815 | rv.w = ieee754sp_2008class(fs); |
1816 | rfmt = w_fmt; | 1816 | rfmt = w_fmt; |
1817 | break; | 1817 | goto copcsr; |
1818 | } | 1818 | } |
1819 | 1819 | ||
1820 | case fmin_op: { | 1820 | case fmin_op: { |
@@ -1826,7 +1826,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, | |||
1826 | SPFROMREG(ft, MIPSInst_FT(ir)); | 1826 | SPFROMREG(ft, MIPSInst_FT(ir)); |
1827 | SPFROMREG(fs, MIPSInst_FS(ir)); | 1827 | SPFROMREG(fs, MIPSInst_FS(ir)); |
1828 | rv.s = ieee754sp_fmin(fs, ft); | 1828 | rv.s = ieee754sp_fmin(fs, ft); |
1829 | break; | 1829 | goto copcsr; |
1830 | } | 1830 | } |
1831 | 1831 | ||
1832 | case fmina_op: { | 1832 | case fmina_op: { |
@@ -1838,7 +1838,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, | |||
1838 | SPFROMREG(ft, MIPSInst_FT(ir)); | 1838 | SPFROMREG(ft, MIPSInst_FT(ir)); |
1839 | SPFROMREG(fs, MIPSInst_FS(ir)); | 1839 | SPFROMREG(fs, MIPSInst_FS(ir)); |
1840 | rv.s = ieee754sp_fmina(fs, ft); | 1840 | rv.s = ieee754sp_fmina(fs, ft); |
1841 | break; | 1841 | goto copcsr; |
1842 | } | 1842 | } |
1843 | 1843 | ||
1844 | case fmax_op: { | 1844 | case fmax_op: { |
@@ -1850,7 +1850,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, | |||
1850 | SPFROMREG(ft, MIPSInst_FT(ir)); | 1850 | SPFROMREG(ft, MIPSInst_FT(ir)); |
1851 | SPFROMREG(fs, MIPSInst_FS(ir)); | 1851 | SPFROMREG(fs, MIPSInst_FS(ir)); |
1852 | rv.s = ieee754sp_fmax(fs, ft); | 1852 | rv.s = ieee754sp_fmax(fs, ft); |
1853 | break; | 1853 | goto copcsr; |
1854 | } | 1854 | } |
1855 | 1855 | ||
1856 | case fmaxa_op: { | 1856 | case fmaxa_op: { |
@@ -1862,7 +1862,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, | |||
1862 | SPFROMREG(ft, MIPSInst_FT(ir)); | 1862 | SPFROMREG(ft, MIPSInst_FT(ir)); |
1863 | SPFROMREG(fs, MIPSInst_FS(ir)); | 1863 | SPFROMREG(fs, MIPSInst_FS(ir)); |
1864 | rv.s = ieee754sp_fmaxa(fs, ft); | 1864 | rv.s = ieee754sp_fmaxa(fs, ft); |
1865 | break; | 1865 | goto copcsr; |
1866 | } | 1866 | } |
1867 | 1867 | ||
1868 | case fabs_op: | 1868 | case fabs_op: |
@@ -2095,7 +2095,7 @@ copcsr: | |||
2095 | DPFROMREG(fs, MIPSInst_FS(ir)); | 2095 | DPFROMREG(fs, MIPSInst_FS(ir)); |
2096 | DPFROMREG(fd, MIPSInst_FD(ir)); | 2096 | DPFROMREG(fd, MIPSInst_FD(ir)); |
2097 | rv.d = ieee754dp_maddf(fd, fs, ft); | 2097 | rv.d = ieee754dp_maddf(fd, fs, ft); |
2098 | break; | 2098 | goto copcsr; |
2099 | } | 2099 | } |
2100 | 2100 | ||
2101 | case fmsubf_op: { | 2101 | case fmsubf_op: { |
@@ -2108,7 +2108,7 @@ copcsr: | |||
2108 | DPFROMREG(fs, MIPSInst_FS(ir)); | 2108 | DPFROMREG(fs, MIPSInst_FS(ir)); |
2109 | DPFROMREG(fd, MIPSInst_FD(ir)); | 2109 | DPFROMREG(fd, MIPSInst_FD(ir)); |
2110 | rv.d = ieee754dp_msubf(fd, fs, ft); | 2110 | rv.d = ieee754dp_msubf(fd, fs, ft); |
2111 | break; | 2111 | goto copcsr; |
2112 | } | 2112 | } |
2113 | 2113 | ||
2114 | case frint_op: { | 2114 | case frint_op: { |
@@ -2132,7 +2132,7 @@ copcsr: | |||
2132 | DPFROMREG(fs, MIPSInst_FS(ir)); | 2132 | DPFROMREG(fs, MIPSInst_FS(ir)); |
2133 | rv.w = ieee754dp_2008class(fs); | 2133 | rv.w = ieee754dp_2008class(fs); |
2134 | rfmt = w_fmt; | 2134 | rfmt = w_fmt; |
2135 | break; | 2135 | goto copcsr; |
2136 | } | 2136 | } |
2137 | 2137 | ||
2138 | case fmin_op: { | 2138 | case fmin_op: { |
@@ -2144,7 +2144,7 @@ copcsr: | |||
2144 | DPFROMREG(ft, MIPSInst_FT(ir)); | 2144 | DPFROMREG(ft, MIPSInst_FT(ir)); |
2145 | DPFROMREG(fs, MIPSInst_FS(ir)); | 2145 | DPFROMREG(fs, MIPSInst_FS(ir)); |
2146 | rv.d = ieee754dp_fmin(fs, ft); | 2146 | rv.d = ieee754dp_fmin(fs, ft); |
2147 | break; | 2147 | goto copcsr; |
2148 | } | 2148 | } |
2149 | 2149 | ||
2150 | case fmina_op: { | 2150 | case fmina_op: { |
@@ -2156,7 +2156,7 @@ copcsr: | |||
2156 | DPFROMREG(ft, MIPSInst_FT(ir)); | 2156 | DPFROMREG(ft, MIPSInst_FT(ir)); |
2157 | DPFROMREG(fs, MIPSInst_FS(ir)); | 2157 | DPFROMREG(fs, MIPSInst_FS(ir)); |
2158 | rv.d = ieee754dp_fmina(fs, ft); | 2158 | rv.d = ieee754dp_fmina(fs, ft); |
2159 | break; | 2159 | goto copcsr; |
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | case fmax_op: { | 2162 | case fmax_op: { |
@@ -2168,7 +2168,7 @@ copcsr: | |||
2168 | DPFROMREG(ft, MIPSInst_FT(ir)); | 2168 | DPFROMREG(ft, MIPSInst_FT(ir)); |
2169 | DPFROMREG(fs, MIPSInst_FS(ir)); | 2169 | DPFROMREG(fs, MIPSInst_FS(ir)); |
2170 | rv.d = ieee754dp_fmax(fs, ft); | 2170 | rv.d = ieee754dp_fmax(fs, ft); |
2171 | break; | 2171 | goto copcsr; |
2172 | } | 2172 | } |
2173 | 2173 | ||
2174 | case fmaxa_op: { | 2174 | case fmaxa_op: { |
@@ -2180,7 +2180,7 @@ copcsr: | |||
2180 | DPFROMREG(ft, MIPSInst_FT(ir)); | 2180 | DPFROMREG(ft, MIPSInst_FT(ir)); |
2181 | DPFROMREG(fs, MIPSInst_FS(ir)); | 2181 | DPFROMREG(fs, MIPSInst_FS(ir)); |
2182 | rv.d = ieee754dp_fmaxa(fs, ft); | 2182 | rv.d = ieee754dp_fmaxa(fs, ft); |
2183 | break; | 2183 | goto copcsr; |
2184 | } | 2184 | } |
2185 | 2185 | ||
2186 | case fabs_op: | 2186 | case fabs_op: |
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index b9920b1edd5a..70cef54dc40f 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c | |||
@@ -437,7 +437,7 @@ transfer_failed: | |||
437 | 437 | ||
438 | info.si_signo = SIGSEGV; | 438 | info.si_signo = SIGSEGV; |
439 | info.si_errno = 0; | 439 | info.si_errno = 0; |
440 | info.si_code = 0; | 440 | info.si_code = SEGV_MAPERR; |
441 | info.si_addr = (void *) regs->pc; | 441 | info.si_addr = (void *) regs->pc; |
442 | force_sig_info(SIGSEGV, &info, current); | 442 | force_sig_info(SIGSEGV, &info, current); |
443 | return; | 443 | return; |
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 3d3f6062f49c..605a284922fb 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c | |||
@@ -302,12 +302,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) | |||
302 | siginfo_t info; | 302 | siginfo_t info; |
303 | 303 | ||
304 | if (user_mode(regs)) { | 304 | if (user_mode(regs)) { |
305 | /* Send a SIGSEGV */ | 305 | /* Send a SIGBUS */ |
306 | info.si_signo = SIGSEGV; | 306 | info.si_signo = SIGBUS; |
307 | info.si_errno = 0; | 307 | info.si_errno = 0; |
308 | /* info.si_code has been set above */ | 308 | info.si_code = BUS_ADRALN; |
309 | info.si_addr = (void *)address; | 309 | info.si_addr = (void __user *)address; |
310 | force_sig_info(SIGSEGV, &info, current); | 310 | force_sig_info(SIGBUS, &info, current); |
311 | } else { | 311 | } else { |
312 | printk("KERNEL: Unaligned Access 0x%.8lx\n", address); | 312 | printk("KERNEL: Unaligned Access 0x%.8lx\n", address); |
313 | show_registers(regs); | 313 | show_registers(regs); |
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h index 8121aa6db2ff..51bb6b8eade6 100644 --- a/arch/parisc/include/asm/ldcw.h +++ b/arch/parisc/include/asm/ldcw.h | |||
@@ -11,6 +11,7 @@ | |||
11 | for the semaphore. */ | 11 | for the semaphore. */ |
12 | 12 | ||
13 | #define __PA_LDCW_ALIGNMENT 16 | 13 | #define __PA_LDCW_ALIGNMENT 16 |
14 | #define __PA_LDCW_ALIGN_ORDER 4 | ||
14 | #define __ldcw_align(a) ({ \ | 15 | #define __ldcw_align(a) ({ \ |
15 | unsigned long __ret = (unsigned long) &(a)->lock[0]; \ | 16 | unsigned long __ret = (unsigned long) &(a)->lock[0]; \ |
16 | __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ | 17 | __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ |
@@ -28,6 +29,7 @@ | |||
28 | ldcd). */ | 29 | ldcd). */ |
29 | 30 | ||
30 | #define __PA_LDCW_ALIGNMENT 4 | 31 | #define __PA_LDCW_ALIGNMENT 4 |
32 | #define __PA_LDCW_ALIGN_ORDER 2 | ||
31 | #define __ldcw_align(a) (&(a)->slock) | 33 | #define __ldcw_align(a) (&(a)->slock) |
32 | #define __LDCW "ldcw,co" | 34 | #define __LDCW "ldcw,co" |
33 | 35 | ||
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 623496c11756..5dc831955de5 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/pgtable.h> | 35 | #include <asm/pgtable.h> |
36 | #include <asm/signal.h> | 36 | #include <asm/signal.h> |
37 | #include <asm/unistd.h> | 37 | #include <asm/unistd.h> |
38 | #include <asm/ldcw.h> | ||
38 | #include <asm/thread_info.h> | 39 | #include <asm/thread_info.h> |
39 | 40 | ||
40 | #include <linux/linkage.h> | 41 | #include <linux/linkage.h> |
@@ -46,6 +47,14 @@ | |||
46 | #endif | 47 | #endif |
47 | 48 | ||
48 | .import pa_tlb_lock,data | 49 | .import pa_tlb_lock,data |
50 | .macro load_pa_tlb_lock reg | ||
51 | #if __PA_LDCW_ALIGNMENT > 4 | ||
52 | load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg | ||
53 | depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg | ||
54 | #else | ||
55 | load32 PA(pa_tlb_lock), \reg | ||
56 | #endif | ||
57 | .endm | ||
49 | 58 | ||
50 | /* space_to_prot macro creates a prot id from a space id */ | 59 | /* space_to_prot macro creates a prot id from a space id */ |
51 | 60 | ||
@@ -457,7 +466,7 @@ | |||
457 | .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault | 466 | .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault |
458 | #ifdef CONFIG_SMP | 467 | #ifdef CONFIG_SMP |
459 | cmpib,COND(=),n 0,\spc,2f | 468 | cmpib,COND(=),n 0,\spc,2f |
460 | load32 PA(pa_tlb_lock),\tmp | 469 | load_pa_tlb_lock \tmp |
461 | 1: LDCW 0(\tmp),\tmp1 | 470 | 1: LDCW 0(\tmp),\tmp1 |
462 | cmpib,COND(=) 0,\tmp1,1b | 471 | cmpib,COND(=) 0,\tmp1,1b |
463 | nop | 472 | nop |
@@ -480,7 +489,7 @@ | |||
480 | /* Release pa_tlb_lock lock. */ | 489 | /* Release pa_tlb_lock lock. */ |
481 | .macro tlb_unlock1 spc,tmp | 490 | .macro tlb_unlock1 spc,tmp |
482 | #ifdef CONFIG_SMP | 491 | #ifdef CONFIG_SMP |
483 | load32 PA(pa_tlb_lock),\tmp | 492 | load_pa_tlb_lock \tmp |
484 | tlb_unlock0 \spc,\tmp | 493 | tlb_unlock0 \spc,\tmp |
485 | #endif | 494 | #endif |
486 | .endm | 495 | .endm |
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index a4761b772406..16073f472118 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <asm/assembly.h> | 36 | #include <asm/assembly.h> |
37 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
38 | #include <asm/cache.h> | 38 | #include <asm/cache.h> |
39 | #include <asm/ldcw.h> | ||
39 | #include <linux/linkage.h> | 40 | #include <linux/linkage.h> |
40 | 41 | ||
41 | .text | 42 | .text |
@@ -333,8 +334,12 @@ ENDPROC(flush_data_cache_local) | |||
333 | 334 | ||
334 | .macro tlb_lock la,flags,tmp | 335 | .macro tlb_lock la,flags,tmp |
335 | #ifdef CONFIG_SMP | 336 | #ifdef CONFIG_SMP |
336 | ldil L%pa_tlb_lock,%r1 | 337 | #if __PA_LDCW_ALIGNMENT > 4 |
337 | ldo R%pa_tlb_lock(%r1),\la | 338 | load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la |
339 | depi 0,31,__PA_LDCW_ALIGN_ORDER, \la | ||
340 | #else | ||
341 | load32 pa_tlb_lock, \la | ||
342 | #endif | ||
338 | rsm PSW_SM_I,\flags | 343 | rsm PSW_SM_I,\flags |
339 | 1: LDCW 0(\la),\tmp | 344 | 1: LDCW 0(\la),\tmp |
340 | cmpib,<>,n 0,\tmp,3f | 345 | cmpib,<>,n 0,\tmp,3f |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index c628f47a9052..755eb1275dbb 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -129,13 +129,14 @@ config PPC | |||
129 | select IRQ_FORCED_THREADING | 129 | select IRQ_FORCED_THREADING |
130 | select HAVE_RCU_TABLE_FREE if SMP | 130 | select HAVE_RCU_TABLE_FREE if SMP |
131 | select HAVE_SYSCALL_TRACEPOINTS | 131 | select HAVE_SYSCALL_TRACEPOINTS |
132 | select HAVE_BPF_JIT | 132 | select HAVE_BPF_JIT if CPU_BIG_ENDIAN |
133 | select HAVE_ARCH_JUMP_LABEL | 133 | select HAVE_ARCH_JUMP_LABEL |
134 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 134 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
135 | select ARCH_HAS_GCOV_PROFILE_ALL | 135 | select ARCH_HAS_GCOV_PROFILE_ALL |
136 | select GENERIC_SMP_IDLE_THREAD | 136 | select GENERIC_SMP_IDLE_THREAD |
137 | select GENERIC_CMOS_UPDATE | 137 | select GENERIC_CMOS_UPDATE |
138 | select GENERIC_TIME_VSYSCALL_OLD | 138 | select GENERIC_TIME_VSYSCALL_OLD |
139 | select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64 | ||
139 | select GENERIC_CLOCKEVENTS | 140 | select GENERIC_CLOCKEVENTS |
140 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP | 141 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP |
141 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST | 142 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST |
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index a703452d67b6..555e22d5e07f 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h | |||
@@ -209,5 +209,11 @@ exc_##label##_book3e: | |||
209 | ori r3,r3,vector_offset@l; \ | 209 | ori r3,r3,vector_offset@l; \ |
210 | mtspr SPRN_IVOR##vector_number,r3; | 210 | mtspr SPRN_IVOR##vector_number,r3; |
211 | 211 | ||
212 | #define RFI_TO_KERNEL \ | ||
213 | rfi | ||
214 | |||
215 | #define RFI_TO_USER \ | ||
216 | rfi | ||
217 | |||
212 | #endif /* _ASM_POWERPC_EXCEPTION_64E_H */ | 218 | #endif /* _ASM_POWERPC_EXCEPTION_64E_H */ |
213 | 219 | ||
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 77f52b26dad6..9bddbec441b8 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -50,6 +50,59 @@ | |||
50 | #define EX_PPR 88 /* SMT thread status register (priority) */ | 50 | #define EX_PPR 88 /* SMT thread status register (priority) */ |
51 | #define EX_CTR 96 | 51 | #define EX_CTR 96 |
52 | 52 | ||
53 | /* | ||
54 | * Macros for annotating the expected destination of (h)rfid | ||
55 | * | ||
56 | * The nop instructions allow us to insert one or more instructions to flush the | ||
57 | * L1-D cache when returning to userspace or a guest. | ||
58 | */ | ||
59 | #define RFI_FLUSH_SLOT \ | ||
60 | RFI_FLUSH_FIXUP_SECTION; \ | ||
61 | nop; \ | ||
62 | nop; \ | ||
63 | nop | ||
64 | |||
65 | #define RFI_TO_KERNEL \ | ||
66 | rfid | ||
67 | |||
68 | #define RFI_TO_USER \ | ||
69 | RFI_FLUSH_SLOT; \ | ||
70 | rfid; \ | ||
71 | b rfi_flush_fallback | ||
72 | |||
73 | #define RFI_TO_USER_OR_KERNEL \ | ||
74 | RFI_FLUSH_SLOT; \ | ||
75 | rfid; \ | ||
76 | b rfi_flush_fallback | ||
77 | |||
78 | #define RFI_TO_GUEST \ | ||
79 | RFI_FLUSH_SLOT; \ | ||
80 | rfid; \ | ||
81 | b rfi_flush_fallback | ||
82 | |||
83 | #define HRFI_TO_KERNEL \ | ||
84 | hrfid | ||
85 | |||
86 | #define HRFI_TO_USER \ | ||
87 | RFI_FLUSH_SLOT; \ | ||
88 | hrfid; \ | ||
89 | b hrfi_flush_fallback | ||
90 | |||
91 | #define HRFI_TO_USER_OR_KERNEL \ | ||
92 | RFI_FLUSH_SLOT; \ | ||
93 | hrfid; \ | ||
94 | b hrfi_flush_fallback | ||
95 | |||
96 | #define HRFI_TO_GUEST \ | ||
97 | RFI_FLUSH_SLOT; \ | ||
98 | hrfid; \ | ||
99 | b hrfi_flush_fallback | ||
100 | |||
101 | #define HRFI_TO_UNKNOWN \ | ||
102 | RFI_FLUSH_SLOT; \ | ||
103 | hrfid; \ | ||
104 | b hrfi_flush_fallback | ||
105 | |||
53 | #ifdef CONFIG_RELOCATABLE | 106 | #ifdef CONFIG_RELOCATABLE |
54 | #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ | 107 | #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ |
55 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ | 108 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ |
@@ -191,7 +244,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) | |||
191 | mtspr SPRN_##h##SRR0,r12; \ | 244 | mtspr SPRN_##h##SRR0,r12; \ |
192 | mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ | 245 | mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ |
193 | mtspr SPRN_##h##SRR1,r10; \ | 246 | mtspr SPRN_##h##SRR1,r10; \ |
194 | h##rfid; \ | 247 | h##RFI_TO_KERNEL; \ |
195 | b . /* prevent speculative execution */ | 248 | b . /* prevent speculative execution */ |
196 | #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ | 249 | #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ |
197 | __EXCEPTION_PROLOG_PSERIES_1(label, h) | 250 | __EXCEPTION_PROLOG_PSERIES_1(label, h) |
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 9a67a38bf7b9..7068bafbb2d6 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h | |||
@@ -184,4 +184,19 @@ label##3: \ | |||
184 | FTR_ENTRY_OFFSET label##1b-label##3b; \ | 184 | FTR_ENTRY_OFFSET label##1b-label##3b; \ |
185 | .popsection; | 185 | .popsection; |
186 | 186 | ||
187 | #define RFI_FLUSH_FIXUP_SECTION \ | ||
188 | 951: \ | ||
189 | .pushsection __rfi_flush_fixup,"a"; \ | ||
190 | .align 2; \ | ||
191 | 952: \ | ||
192 | FTR_ENTRY_OFFSET 951b-952b; \ | ||
193 | .popsection; | ||
194 | |||
195 | |||
196 | #ifndef __ASSEMBLY__ | ||
197 | |||
198 | extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; | ||
199 | |||
200 | #endif | ||
201 | |||
187 | #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ | 202 | #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ |
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 85bc8c0d257b..449bbb87c257 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -239,6 +239,7 @@ | |||
239 | #define H_GET_HCA_INFO 0x1B8 | 239 | #define H_GET_HCA_INFO 0x1B8 |
240 | #define H_GET_PERF_COUNT 0x1BC | 240 | #define H_GET_PERF_COUNT 0x1BC |
241 | #define H_MANAGE_TRACE 0x1C0 | 241 | #define H_MANAGE_TRACE 0x1C0 |
242 | #define H_GET_CPU_CHARACTERISTICS 0x1C8 | ||
242 | #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 | 243 | #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 |
243 | #define H_QUERY_INT_STATE 0x1E4 | 244 | #define H_QUERY_INT_STATE 0x1E4 |
244 | #define H_POLL_PENDING 0x1D8 | 245 | #define H_POLL_PENDING 0x1D8 |
@@ -285,7 +286,19 @@ | |||
285 | #define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3 | 286 | #define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3 |
286 | #define H_SET_MODE_RESOURCE_LE 4 | 287 | #define H_SET_MODE_RESOURCE_LE 4 |
287 | 288 | ||
289 | /* H_GET_CPU_CHARACTERISTICS return values */ | ||
290 | #define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0 | ||
291 | #define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1 | ||
292 | #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2 | ||
293 | #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3 | ||
294 | #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4 | ||
295 | |||
296 | #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 | ||
297 | #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 | ||
298 | #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 | ||
299 | |||
288 | #ifndef __ASSEMBLY__ | 300 | #ifndef __ASSEMBLY__ |
301 | #include <linux/types.h> | ||
289 | 302 | ||
290 | /** | 303 | /** |
291 | * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments | 304 | * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments |
@@ -423,6 +436,11 @@ extern long pseries_big_endian_exceptions(void); | |||
423 | 436 | ||
424 | #endif /* CONFIG_PPC_PSERIES */ | 437 | #endif /* CONFIG_PPC_PSERIES */ |
425 | 438 | ||
439 | struct h_cpu_char_result { | ||
440 | u64 character; | ||
441 | u64 behaviour; | ||
442 | }; | ||
443 | |||
426 | #endif /* __ASSEMBLY__ */ | 444 | #endif /* __ASSEMBLY__ */ |
427 | #endif /* __KERNEL__ */ | 445 | #endif /* __KERNEL__ */ |
428 | #endif /* _ASM_POWERPC_HVCALL_H */ | 446 | #endif /* _ASM_POWERPC_HVCALL_H */ |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 70bd4381f8e6..45e2aefece16 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -192,6 +192,16 @@ struct paca_struct { | |||
192 | #endif | 192 | #endif |
193 | struct kvmppc_host_state kvm_hstate; | 193 | struct kvmppc_host_state kvm_hstate; |
194 | #endif | 194 | #endif |
195 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
196 | /* | ||
197 | * rfi fallback flush must be in its own cacheline to prevent | ||
198 | * other paca data leaking into the L1d | ||
199 | */ | ||
200 | u64 exrfi[13] __aligned(0x80); | ||
201 | void *rfi_flush_fallback_area; | ||
202 | u64 l1d_flush_congruence; | ||
203 | u64 l1d_flush_sets; | ||
204 | #endif | ||
195 | }; | 205 | }; |
196 | 206 | ||
197 | extern struct paca_struct *paca; | 207 | extern struct paca_struct *paca; |
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 67859edbf8fd..6e05cb397a5c 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h | |||
@@ -323,4 +323,18 @@ static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawr | |||
323 | return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0); | 323 | return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0); |
324 | } | 324 | } |
325 | 325 | ||
326 | static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) | ||
327 | { | ||
328 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
329 | long rc; | ||
330 | |||
331 | rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf); | ||
332 | if (rc == H_SUCCESS) { | ||
333 | p->character = retbuf[0]; | ||
334 | p->behaviour = retbuf[1]; | ||
335 | } | ||
336 | |||
337 | return rc; | ||
338 | } | ||
339 | |||
326 | #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ | 340 | #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ |
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index dd0fc18d8103..160bb2311bbb 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -224,6 +224,16 @@ name: \ | |||
224 | .globl name; \ | 224 | .globl name; \ |
225 | name: | 225 | name: |
226 | 226 | ||
227 | #define _KPROBE_TOC(name) \ | ||
228 | .section ".kprobes.text","a"; \ | ||
229 | .align 2 ; \ | ||
230 | .type name,@function; \ | ||
231 | .globl name; \ | ||
232 | name: \ | ||
233 | 0: addis r2,r12,(.TOC.-0b)@ha; \ | ||
234 | addi r2,r2,(.TOC.-0b)@l; \ | ||
235 | .localentry name,.-name | ||
236 | |||
227 | #define DOTSYM(a) a | 237 | #define DOTSYM(a) a |
228 | 238 | ||
229 | #else | 239 | #else |
@@ -261,6 +271,8 @@ name: \ | |||
261 | .type GLUE(.,name),@function; \ | 271 | .type GLUE(.,name),@function; \ |
262 | GLUE(.,name): | 272 | GLUE(.,name): |
263 | 273 | ||
274 | #define _KPROBE_TOC(n) _KPROBE(n) | ||
275 | |||
264 | #define DOTSYM(a) GLUE(.,a) | 276 | #define DOTSYM(a) GLUE(.,a) |
265 | 277 | ||
266 | #endif | 278 | #endif |
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index e9d384cbd021..7916b56f2e60 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h | |||
@@ -26,6 +26,19 @@ void initmem_init(void); | |||
26 | void setup_panic(void); | 26 | void setup_panic(void); |
27 | #define ARCH_PANIC_TIMEOUT 180 | 27 | #define ARCH_PANIC_TIMEOUT 180 |
28 | 28 | ||
29 | void rfi_flush_enable(bool enable); | ||
30 | |||
31 | /* These are bit flags */ | ||
32 | enum l1d_flush_type { | ||
33 | L1D_FLUSH_NONE = 0x1, | ||
34 | L1D_FLUSH_FALLBACK = 0x2, | ||
35 | L1D_FLUSH_ORI = 0x4, | ||
36 | L1D_FLUSH_MTTRIG = 0x8, | ||
37 | }; | ||
38 | |||
39 | void __init setup_rfi_flush(enum l1d_flush_type, bool enable); | ||
40 | void do_rfi_flush_fixups(enum l1d_flush_type types); | ||
41 | |||
29 | #endif /* !__ASSEMBLY__ */ | 42 | #endif /* !__ASSEMBLY__ */ |
30 | 43 | ||
31 | #endif /* _ASM_POWERPC_SETUP_H */ | 44 | #endif /* _ASM_POWERPC_SETUP_H */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 40da69163d51..d92705e3a0c1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -243,6 +243,10 @@ int main(void) | |||
243 | #ifdef CONFIG_PPC_BOOK3S_64 | 243 | #ifdef CONFIG_PPC_BOOK3S_64 |
244 | DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp)); | 244 | DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp)); |
245 | DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce)); | 245 | DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce)); |
246 | DEFINE(PACA_RFI_FLUSH_FALLBACK_AREA, offsetof(struct paca_struct, rfi_flush_fallback_area)); | ||
247 | DEFINE(PACA_EXRFI, offsetof(struct paca_struct, exrfi)); | ||
248 | DEFINE(PACA_L1D_FLUSH_CONGRUENCE, offsetof(struct paca_struct, l1d_flush_congruence)); | ||
249 | DEFINE(PACA_L1D_FLUSH_SETS, offsetof(struct paca_struct, l1d_flush_sets)); | ||
246 | #endif | 250 | #endif |
247 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | 251 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); |
248 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); | 252 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f6fd0332c3a2..2837232bbffb 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -36,6 +36,11 @@ | |||
36 | #include <asm/hw_irq.h> | 36 | #include <asm/hw_irq.h> |
37 | #include <asm/context_tracking.h> | 37 | #include <asm/context_tracking.h> |
38 | #include <asm/tm.h> | 38 | #include <asm/tm.h> |
39 | #ifdef CONFIG_PPC_BOOK3S | ||
40 | #include <asm/exception-64s.h> | ||
41 | #else | ||
42 | #include <asm/exception-64e.h> | ||
43 | #endif | ||
39 | 44 | ||
40 | /* | 45 | /* |
41 | * System calls. | 46 | * System calls. |
@@ -225,13 +230,23 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | |||
225 | ACCOUNT_CPU_USER_EXIT(r11, r12) | 230 | ACCOUNT_CPU_USER_EXIT(r11, r12) |
226 | HMT_MEDIUM_LOW_HAS_PPR | 231 | HMT_MEDIUM_LOW_HAS_PPR |
227 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ | 232 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
233 | ld r2,GPR2(r1) | ||
234 | ld r1,GPR1(r1) | ||
235 | mtlr r4 | ||
236 | mtcr r5 | ||
237 | mtspr SPRN_SRR0,r7 | ||
238 | mtspr SPRN_SRR1,r8 | ||
239 | RFI_TO_USER | ||
240 | b . /* prevent speculative execution */ | ||
241 | |||
242 | /* exit to kernel */ | ||
228 | 1: ld r2,GPR2(r1) | 243 | 1: ld r2,GPR2(r1) |
229 | ld r1,GPR1(r1) | 244 | ld r1,GPR1(r1) |
230 | mtlr r4 | 245 | mtlr r4 |
231 | mtcr r5 | 246 | mtcr r5 |
232 | mtspr SPRN_SRR0,r7 | 247 | mtspr SPRN_SRR0,r7 |
233 | mtspr SPRN_SRR1,r8 | 248 | mtspr SPRN_SRR1,r8 |
234 | RFI | 249 | RFI_TO_KERNEL |
235 | b . /* prevent speculative execution */ | 250 | b . /* prevent speculative execution */ |
236 | 251 | ||
237 | syscall_error: | 252 | syscall_error: |
@@ -353,8 +368,7 @@ tabort_syscall: | |||
353 | mtmsrd r10, 1 | 368 | mtmsrd r10, 1 |
354 | mtspr SPRN_SRR0, r11 | 369 | mtspr SPRN_SRR0, r11 |
355 | mtspr SPRN_SRR1, r12 | 370 | mtspr SPRN_SRR1, r12 |
356 | 371 | RFI_TO_USER | |
357 | rfid | ||
358 | b . /* prevent speculative execution */ | 372 | b . /* prevent speculative execution */ |
359 | #endif | 373 | #endif |
360 | 374 | ||
@@ -887,7 +901,7 @@ BEGIN_FTR_SECTION | |||
887 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | 901 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
888 | ACCOUNT_CPU_USER_EXIT(r2, r4) | 902 | ACCOUNT_CPU_USER_EXIT(r2, r4) |
889 | REST_GPR(13, r1) | 903 | REST_GPR(13, r1) |
890 | 1: | 904 | |
891 | mtspr SPRN_SRR1,r3 | 905 | mtspr SPRN_SRR1,r3 |
892 | 906 | ||
893 | ld r2,_CCR(r1) | 907 | ld r2,_CCR(r1) |
@@ -900,8 +914,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |||
900 | ld r3,GPR3(r1) | 914 | ld r3,GPR3(r1) |
901 | ld r4,GPR4(r1) | 915 | ld r4,GPR4(r1) |
902 | ld r1,GPR1(r1) | 916 | ld r1,GPR1(r1) |
917 | RFI_TO_USER | ||
918 | b . /* prevent speculative execution */ | ||
903 | 919 | ||
904 | rfid | 920 | 1: mtspr SPRN_SRR1,r3 |
921 | |||
922 | ld r2,_CCR(r1) | ||
923 | mtcrf 0xFF,r2 | ||
924 | ld r2,_NIP(r1) | ||
925 | mtspr SPRN_SRR0,r2 | ||
926 | |||
927 | ld r0,GPR0(r1) | ||
928 | ld r2,GPR2(r1) | ||
929 | ld r3,GPR3(r1) | ||
930 | ld r4,GPR4(r1) | ||
931 | ld r1,GPR1(r1) | ||
932 | RFI_TO_KERNEL | ||
905 | b . /* prevent speculative execution */ | 933 | b . /* prevent speculative execution */ |
906 | 934 | ||
907 | #endif /* CONFIG_PPC_BOOK3E */ | 935 | #endif /* CONFIG_PPC_BOOK3E */ |
@@ -1077,7 +1105,7 @@ _GLOBAL(enter_rtas) | |||
1077 | 1105 | ||
1078 | mtspr SPRN_SRR0,r5 | 1106 | mtspr SPRN_SRR0,r5 |
1079 | mtspr SPRN_SRR1,r6 | 1107 | mtspr SPRN_SRR1,r6 |
1080 | rfid | 1108 | RFI_TO_KERNEL |
1081 | b . /* prevent speculative execution */ | 1109 | b . /* prevent speculative execution */ |
1082 | 1110 | ||
1083 | rtas_return_loc: | 1111 | rtas_return_loc: |
@@ -1102,7 +1130,7 @@ rtas_return_loc: | |||
1102 | 1130 | ||
1103 | mtspr SPRN_SRR0,r3 | 1131 | mtspr SPRN_SRR0,r3 |
1104 | mtspr SPRN_SRR1,r4 | 1132 | mtspr SPRN_SRR1,r4 |
1105 | rfid | 1133 | RFI_TO_KERNEL |
1106 | b . /* prevent speculative execution */ | 1134 | b . /* prevent speculative execution */ |
1107 | 1135 | ||
1108 | .align 3 | 1136 | .align 3 |
@@ -1173,7 +1201,7 @@ _GLOBAL(enter_prom) | |||
1173 | LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) | 1201 | LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) |
1174 | andc r11,r11,r12 | 1202 | andc r11,r11,r12 |
1175 | mtsrr1 r11 | 1203 | mtsrr1 r11 |
1176 | rfid | 1204 | RFI_TO_KERNEL |
1177 | #endif /* CONFIG_PPC_BOOK3E */ | 1205 | #endif /* CONFIG_PPC_BOOK3E */ |
1178 | 1206 | ||
1179 | 1: /* Return from OF */ | 1207 | 1: /* Return from OF */ |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index b81ccc5fb32d..938a30fef031 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -46,7 +46,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ | |||
46 | mtspr SPRN_SRR0,r10 ; \ | 46 | mtspr SPRN_SRR0,r10 ; \ |
47 | ld r10,PACAKMSR(r13) ; \ | 47 | ld r10,PACAKMSR(r13) ; \ |
48 | mtspr SPRN_SRR1,r10 ; \ | 48 | mtspr SPRN_SRR1,r10 ; \ |
49 | rfid ; \ | 49 | RFI_TO_KERNEL ; \ |
50 | b . ; /* prevent speculative execution */ | 50 | b . ; /* prevent speculative execution */ |
51 | 51 | ||
52 | #define SYSCALL_PSERIES_3 \ | 52 | #define SYSCALL_PSERIES_3 \ |
@@ -54,7 +54,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ | |||
54 | 1: mfspr r12,SPRN_SRR1 ; \ | 54 | 1: mfspr r12,SPRN_SRR1 ; \ |
55 | xori r12,r12,MSR_LE ; \ | 55 | xori r12,r12,MSR_LE ; \ |
56 | mtspr SPRN_SRR1,r12 ; \ | 56 | mtspr SPRN_SRR1,r12 ; \ |
57 | rfid ; /* return to userspace */ \ | 57 | RFI_TO_USER ; /* return to userspace */ \ |
58 | b . ; /* prevent speculative execution */ | 58 | b . ; /* prevent speculative execution */ |
59 | 59 | ||
60 | #if defined(CONFIG_RELOCATABLE) | 60 | #if defined(CONFIG_RELOCATABLE) |
@@ -507,7 +507,7 @@ BEGIN_FTR_SECTION | |||
507 | LOAD_HANDLER(r12, machine_check_handle_early) | 507 | LOAD_HANDLER(r12, machine_check_handle_early) |
508 | 1: mtspr SPRN_SRR0,r12 | 508 | 1: mtspr SPRN_SRR0,r12 |
509 | mtspr SPRN_SRR1,r11 | 509 | mtspr SPRN_SRR1,r11 |
510 | rfid | 510 | RFI_TO_KERNEL |
511 | b . /* prevent speculative execution */ | 511 | b . /* prevent speculative execution */ |
512 | 2: | 512 | 2: |
513 | /* Stack overflow. Stay on emergency stack and panic. | 513 | /* Stack overflow. Stay on emergency stack and panic. |
@@ -601,7 +601,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |||
601 | ld r11,PACA_EXGEN+EX_R11(r13) | 601 | ld r11,PACA_EXGEN+EX_R11(r13) |
602 | ld r12,PACA_EXGEN+EX_R12(r13) | 602 | ld r12,PACA_EXGEN+EX_R12(r13) |
603 | ld r13,PACA_EXGEN+EX_R13(r13) | 603 | ld r13,PACA_EXGEN+EX_R13(r13) |
604 | HRFID | 604 | HRFI_TO_UNKNOWN |
605 | b . | 605 | b . |
606 | #endif | 606 | #endif |
607 | 607 | ||
@@ -666,7 +666,7 @@ masked_##_H##interrupt: \ | |||
666 | ld r10,PACA_EXGEN+EX_R10(r13); \ | 666 | ld r10,PACA_EXGEN+EX_R10(r13); \ |
667 | ld r11,PACA_EXGEN+EX_R11(r13); \ | 667 | ld r11,PACA_EXGEN+EX_R11(r13); \ |
668 | GET_SCRATCH0(r13); \ | 668 | GET_SCRATCH0(r13); \ |
669 | ##_H##rfid; \ | 669 | ##_H##RFI_TO_KERNEL; \ |
670 | b . | 670 | b . |
671 | 671 | ||
672 | MASKED_INTERRUPT() | 672 | MASKED_INTERRUPT() |
@@ -756,7 +756,7 @@ kvmppc_skip_interrupt: | |||
756 | addi r13, r13, 4 | 756 | addi r13, r13, 4 |
757 | mtspr SPRN_SRR0, r13 | 757 | mtspr SPRN_SRR0, r13 |
758 | GET_SCRATCH0(r13) | 758 | GET_SCRATCH0(r13) |
759 | rfid | 759 | RFI_TO_KERNEL |
760 | b . | 760 | b . |
761 | 761 | ||
762 | kvmppc_skip_Hinterrupt: | 762 | kvmppc_skip_Hinterrupt: |
@@ -768,7 +768,7 @@ kvmppc_skip_Hinterrupt: | |||
768 | addi r13, r13, 4 | 768 | addi r13, r13, 4 |
769 | mtspr SPRN_HSRR0, r13 | 769 | mtspr SPRN_HSRR0, r13 |
770 | GET_SCRATCH0(r13) | 770 | GET_SCRATCH0(r13) |
771 | hrfid | 771 | HRFI_TO_KERNEL |
772 | b . | 772 | b . |
773 | #endif | 773 | #endif |
774 | 774 | ||
@@ -1439,7 +1439,7 @@ machine_check_handle_early: | |||
1439 | li r3,MSR_ME | 1439 | li r3,MSR_ME |
1440 | andc r10,r10,r3 /* Turn off MSR_ME */ | 1440 | andc r10,r10,r3 /* Turn off MSR_ME */ |
1441 | mtspr SPRN_SRR1,r10 | 1441 | mtspr SPRN_SRR1,r10 |
1442 | rfid | 1442 | RFI_TO_KERNEL |
1443 | b . | 1443 | b . |
1444 | 2: | 1444 | 2: |
1445 | /* | 1445 | /* |
@@ -1457,7 +1457,7 @@ machine_check_handle_early: | |||
1457 | */ | 1457 | */ |
1458 | bl machine_check_queue_event | 1458 | bl machine_check_queue_event |
1459 | MACHINE_CHECK_HANDLER_WINDUP | 1459 | MACHINE_CHECK_HANDLER_WINDUP |
1460 | rfid | 1460 | RFI_TO_USER_OR_KERNEL |
1461 | 9: | 1461 | 9: |
1462 | /* Deliver the machine check to host kernel in V mode. */ | 1462 | /* Deliver the machine check to host kernel in V mode. */ |
1463 | MACHINE_CHECK_HANDLER_WINDUP | 1463 | MACHINE_CHECK_HANDLER_WINDUP |
@@ -1503,6 +1503,8 @@ slb_miss_realmode: | |||
1503 | 1503 | ||
1504 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | 1504 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ |
1505 | beq- 2f | 1505 | beq- 2f |
1506 | andi. r10,r12,MSR_PR /* check for user mode (PR != 0) */ | ||
1507 | bne 1f | ||
1506 | 1508 | ||
1507 | .machine push | 1509 | .machine push |
1508 | .machine "power4" | 1510 | .machine "power4" |
@@ -1516,7 +1518,23 @@ slb_miss_realmode: | |||
1516 | ld r11,PACA_EXSLB+EX_R11(r13) | 1518 | ld r11,PACA_EXSLB+EX_R11(r13) |
1517 | ld r12,PACA_EXSLB+EX_R12(r13) | 1519 | ld r12,PACA_EXSLB+EX_R12(r13) |
1518 | ld r13,PACA_EXSLB+EX_R13(r13) | 1520 | ld r13,PACA_EXSLB+EX_R13(r13) |
1519 | rfid | 1521 | RFI_TO_KERNEL |
1522 | b . /* prevent speculative execution */ | ||
1523 | |||
1524 | 1: | ||
1525 | .machine push | ||
1526 | .machine "power4" | ||
1527 | mtcrf 0x80,r9 | ||
1528 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
1529 | .machine pop | ||
1530 | |||
1531 | RESTORE_PPR_PACA(PACA_EXSLB, r9) | ||
1532 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
1533 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
1534 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
1535 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
1536 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1537 | RFI_TO_USER | ||
1520 | b . /* prevent speculative execution */ | 1538 | b . /* prevent speculative execution */ |
1521 | 1539 | ||
1522 | 2: mfspr r11,SPRN_SRR0 | 1540 | 2: mfspr r11,SPRN_SRR0 |
@@ -1525,7 +1543,7 @@ slb_miss_realmode: | |||
1525 | mtspr SPRN_SRR0,r10 | 1543 | mtspr SPRN_SRR0,r10 |
1526 | ld r10,PACAKMSR(r13) | 1544 | ld r10,PACAKMSR(r13) |
1527 | mtspr SPRN_SRR1,r10 | 1545 | mtspr SPRN_SRR1,r10 |
1528 | rfid | 1546 | RFI_TO_KERNEL |
1529 | b . | 1547 | b . |
1530 | 1548 | ||
1531 | unrecov_slb: | 1549 | unrecov_slb: |
@@ -1546,6 +1564,92 @@ power4_fixup_nap: | |||
1546 | blr | 1564 | blr |
1547 | #endif | 1565 | #endif |
1548 | 1566 | ||
1567 | .globl rfi_flush_fallback | ||
1568 | rfi_flush_fallback: | ||
1569 | SET_SCRATCH0(r13); | ||
1570 | GET_PACA(r13); | ||
1571 | std r9,PACA_EXRFI+EX_R9(r13) | ||
1572 | std r10,PACA_EXRFI+EX_R10(r13) | ||
1573 | std r11,PACA_EXRFI+EX_R11(r13) | ||
1574 | std r12,PACA_EXRFI+EX_R12(r13) | ||
1575 | std r8,PACA_EXRFI+EX_R13(r13) | ||
1576 | mfctr r9 | ||
1577 | ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) | ||
1578 | ld r11,PACA_L1D_FLUSH_SETS(r13) | ||
1579 | ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) | ||
1580 | /* | ||
1581 | * The load adresses are at staggered offsets within cachelines, | ||
1582 | * which suits some pipelines better (on others it should not | ||
1583 | * hurt). | ||
1584 | */ | ||
1585 | addi r12,r12,8 | ||
1586 | mtctr r11 | ||
1587 | DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ | ||
1588 | |||
1589 | /* order ld/st prior to dcbt stop all streams with flushing */ | ||
1590 | sync | ||
1591 | 1: li r8,0 | ||
1592 | .rept 8 /* 8-way set associative */ | ||
1593 | ldx r11,r10,r8 | ||
1594 | add r8,r8,r12 | ||
1595 | xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not | ||
1596 | add r8,r8,r11 // Add 0, this creates a dependency on the ldx | ||
1597 | .endr | ||
1598 | addi r10,r10,128 /* 128 byte cache line */ | ||
1599 | bdnz 1b | ||
1600 | |||
1601 | mtctr r9 | ||
1602 | ld r9,PACA_EXRFI+EX_R9(r13) | ||
1603 | ld r10,PACA_EXRFI+EX_R10(r13) | ||
1604 | ld r11,PACA_EXRFI+EX_R11(r13) | ||
1605 | ld r12,PACA_EXRFI+EX_R12(r13) | ||
1606 | ld r8,PACA_EXRFI+EX_R13(r13) | ||
1607 | GET_SCRATCH0(r13); | ||
1608 | rfid | ||
1609 | |||
1610 | .globl hrfi_flush_fallback | ||
1611 | hrfi_flush_fallback: | ||
1612 | SET_SCRATCH0(r13); | ||
1613 | GET_PACA(r13); | ||
1614 | std r9,PACA_EXRFI+EX_R9(r13) | ||
1615 | std r10,PACA_EXRFI+EX_R10(r13) | ||
1616 | std r11,PACA_EXRFI+EX_R11(r13) | ||
1617 | std r12,PACA_EXRFI+EX_R12(r13) | ||
1618 | std r8,PACA_EXRFI+EX_R13(r13) | ||
1619 | mfctr r9 | ||
1620 | ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) | ||
1621 | ld r11,PACA_L1D_FLUSH_SETS(r13) | ||
1622 | ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) | ||
1623 | /* | ||
1624 | * The load adresses are at staggered offsets within cachelines, | ||
1625 | * which suits some pipelines better (on others it should not | ||
1626 | * hurt). | ||
1627 | */ | ||
1628 | addi r12,r12,8 | ||
1629 | mtctr r11 | ||
1630 | DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ | ||
1631 | |||
1632 | /* order ld/st prior to dcbt stop all streams with flushing */ | ||
1633 | sync | ||
1634 | 1: li r8,0 | ||
1635 | .rept 8 /* 8-way set associative */ | ||
1636 | ldx r11,r10,r8 | ||
1637 | add r8,r8,r12 | ||
1638 | xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not | ||
1639 | add r8,r8,r11 // Add 0, this creates a dependency on the ldx | ||
1640 | .endr | ||
1641 | addi r10,r10,128 /* 128 byte cache line */ | ||
1642 | bdnz 1b | ||
1643 | |||
1644 | mtctr r9 | ||
1645 | ld r9,PACA_EXRFI+EX_R9(r13) | ||
1646 | ld r10,PACA_EXRFI+EX_R10(r13) | ||
1647 | ld r11,PACA_EXRFI+EX_R11(r13) | ||
1648 | ld r12,PACA_EXRFI+EX_R12(r13) | ||
1649 | ld r8,PACA_EXRFI+EX_R13(r13) | ||
1650 | GET_SCRATCH0(r13); | ||
1651 | hrfid | ||
1652 | |||
1549 | /* | 1653 | /* |
1550 | * Hash table stuff | 1654 | * Hash table stuff |
1551 | */ | 1655 | */ |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index db475d41b57a..107588295b39 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -66,7 +66,7 @@ PPC64_CACHES: | |||
66 | * flush all bytes from start through stop-1 inclusive | 66 | * flush all bytes from start through stop-1 inclusive |
67 | */ | 67 | */ |
68 | 68 | ||
69 | _KPROBE(flush_icache_range) | 69 | _KPROBE_TOC(flush_icache_range) |
70 | BEGIN_FTR_SECTION | 70 | BEGIN_FTR_SECTION |
71 | PURGE_PREFETCHED_INS | 71 | PURGE_PREFETCHED_INS |
72 | blr | 72 | blr |
@@ -117,7 +117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) | |||
117 | * | 117 | * |
118 | * flush all bytes from start to stop-1 inclusive | 118 | * flush all bytes from start to stop-1 inclusive |
119 | */ | 119 | */ |
120 | _GLOBAL(flush_dcache_range) | 120 | _GLOBAL_TOC(flush_dcache_range) |
121 | 121 | ||
122 | /* | 122 | /* |
123 | * Flush the data cache to memory | 123 | * Flush the data cache to memory |
@@ -701,31 +701,3 @@ _GLOBAL(kexec_sequence) | |||
701 | li r5,0 | 701 | li r5,0 |
702 | blr /* image->start(physid, image->start, 0); */ | 702 | blr /* image->start(physid, image->start, 0); */ |
703 | #endif /* CONFIG_KEXEC */ | 703 | #endif /* CONFIG_KEXEC */ |
704 | |||
705 | #ifdef CONFIG_MODULES | ||
706 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
707 | |||
708 | #ifdef CONFIG_MODVERSIONS | ||
709 | .weak __crc_TOC. | ||
710 | .section "___kcrctab+TOC.","a" | ||
711 | .globl __kcrctab_TOC. | ||
712 | __kcrctab_TOC.: | ||
713 | .llong __crc_TOC. | ||
714 | #endif | ||
715 | |||
716 | /* | ||
717 | * Export a fake .TOC. since both modpost and depmod will complain otherwise. | ||
718 | * Both modpost and depmod strip the leading . so we do the same here. | ||
719 | */ | ||
720 | .section "__ksymtab_strings","a" | ||
721 | __kstrtab_TOC.: | ||
722 | .asciz "TOC." | ||
723 | |||
724 | .section "___ksymtab+TOC.","a" | ||
725 | /* This symbol name is important: it's used by modpost to find exported syms */ | ||
726 | .globl __ksymtab_TOC. | ||
727 | __ksymtab_TOC.: | ||
728 | .llong 0 /* .value */ | ||
729 | .llong __kstrtab_TOC. | ||
730 | #endif /* ELFv2 */ | ||
731 | #endif /* MODULES */ | ||
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index e4f7d4eed20c..08b7a40de5f8 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c | |||
@@ -326,7 +326,10 @@ static void dedotify_versions(struct modversion_info *vers, | |||
326 | } | 326 | } |
327 | } | 327 | } |
328 | 328 | ||
329 | /* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ | 329 | /* |
330 | * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC. | ||
331 | * seem to be defined (value set later). | ||
332 | */ | ||
330 | static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) | 333 | static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) |
331 | { | 334 | { |
332 | unsigned int i; | 335 | unsigned int i; |
@@ -334,8 +337,11 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) | |||
334 | for (i = 1; i < numsyms; i++) { | 337 | for (i = 1; i < numsyms; i++) { |
335 | if (syms[i].st_shndx == SHN_UNDEF) { | 338 | if (syms[i].st_shndx == SHN_UNDEF) { |
336 | char *name = strtab + syms[i].st_name; | 339 | char *name = strtab + syms[i].st_name; |
337 | if (name[0] == '.') | 340 | if (name[0] == '.') { |
341 | if (strcmp(name+1, "TOC.") == 0) | ||
342 | syms[i].st_shndx = SHN_ABS; | ||
338 | syms[i].st_name++; | 343 | syms[i].st_name++; |
344 | } | ||
339 | } | 345 | } |
340 | } | 346 | } |
341 | } | 347 | } |
@@ -351,7 +357,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, | |||
351 | numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); | 357 | numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); |
352 | 358 | ||
353 | for (i = 1; i < numsyms; i++) { | 359 | for (i = 1; i < numsyms; i++) { |
354 | if (syms[i].st_shndx == SHN_UNDEF | 360 | if (syms[i].st_shndx == SHN_ABS |
355 | && strcmp(strtab + syms[i].st_name, "TOC.") == 0) | 361 | && strcmp(strtab + syms[i].st_name, "TOC.") == 0) |
356 | return &syms[i]; | 362 | return &syms[i]; |
357 | } | 363 | } |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b7abf3cd2a67..ef5f566f3977 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -209,7 +209,8 @@ void enable_kernel_vsx(void) | |||
209 | WARN_ON(preemptible()); | 209 | WARN_ON(preemptible()); |
210 | 210 | ||
211 | #ifdef CONFIG_SMP | 211 | #ifdef CONFIG_SMP |
212 | if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) | 212 | if (current->thread.regs && |
213 | (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) | ||
213 | giveup_vsx(current); | 214 | giveup_vsx(current); |
214 | else | 215 | else |
215 | giveup_vsx(NULL); /* just enable vsx for kernel - force */ | 216 | giveup_vsx(NULL); /* just enable vsx for kernel - force */ |
@@ -231,7 +232,7 @@ void flush_vsx_to_thread(struct task_struct *tsk) | |||
231 | { | 232 | { |
232 | if (tsk->thread.regs) { | 233 | if (tsk->thread.regs) { |
233 | preempt_disable(); | 234 | preempt_disable(); |
234 | if (tsk->thread.regs->msr & MSR_VSX) { | 235 | if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) { |
235 | #ifdef CONFIG_SMP | 236 | #ifdef CONFIG_SMP |
236 | BUG_ON(tsk != current); | 237 | BUG_ON(tsk != current); |
237 | #endif | 238 | #endif |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a20823210ac0..df4a87eb8da4 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/hugetlb.h> | 38 | #include <linux/hugetlb.h> |
39 | #include <linux/memory.h> | 39 | #include <linux/memory.h> |
40 | #include <linux/nmi.h> | 40 | #include <linux/nmi.h> |
41 | #include <linux/debugfs.h> | ||
41 | 42 | ||
42 | #include <asm/io.h> | 43 | #include <asm/io.h> |
43 | #include <asm/kdump.h> | 44 | #include <asm/kdump.h> |
@@ -834,4 +835,142 @@ static int __init disable_hardlockup_detector(void) | |||
834 | return 0; | 835 | return 0; |
835 | } | 836 | } |
836 | early_initcall(disable_hardlockup_detector); | 837 | early_initcall(disable_hardlockup_detector); |
838 | |||
839 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
840 | static enum l1d_flush_type enabled_flush_types; | ||
841 | static void *l1d_flush_fallback_area; | ||
842 | static bool no_rfi_flush; | ||
843 | bool rfi_flush; | ||
844 | |||
845 | static int __init handle_no_rfi_flush(char *p) | ||
846 | { | ||
847 | pr_info("rfi-flush: disabled on command line."); | ||
848 | no_rfi_flush = true; | ||
849 | return 0; | ||
850 | } | ||
851 | early_param("no_rfi_flush", handle_no_rfi_flush); | ||
852 | |||
853 | /* | ||
854 | * The RFI flush is not KPTI, but because users will see doco that says to use | ||
855 | * nopti we hijack that option here to also disable the RFI flush. | ||
856 | */ | ||
857 | static int __init handle_no_pti(char *p) | ||
858 | { | ||
859 | pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); | ||
860 | handle_no_rfi_flush(NULL); | ||
861 | return 0; | ||
862 | } | ||
863 | early_param("nopti", handle_no_pti); | ||
864 | |||
865 | static void do_nothing(void *unused) | ||
866 | { | ||
867 | /* | ||
868 | * We don't need to do the flush explicitly, just enter+exit kernel is | ||
869 | * sufficient, the RFI exit handlers will do the right thing. | ||
870 | */ | ||
871 | } | ||
872 | |||
873 | void rfi_flush_enable(bool enable) | ||
874 | { | ||
875 | if (rfi_flush == enable) | ||
876 | return; | ||
877 | |||
878 | if (enable) { | ||
879 | do_rfi_flush_fixups(enabled_flush_types); | ||
880 | on_each_cpu(do_nothing, NULL, 1); | ||
881 | } else | ||
882 | do_rfi_flush_fixups(L1D_FLUSH_NONE); | ||
883 | |||
884 | rfi_flush = enable; | ||
885 | } | ||
886 | |||
887 | static void init_fallback_flush(void) | ||
888 | { | ||
889 | u64 l1d_size, limit; | ||
890 | int cpu; | ||
891 | |||
892 | l1d_size = ppc64_caches.dsize; | ||
893 | limit = min(safe_stack_limit(), ppc64_rma_size); | ||
894 | |||
895 | /* | ||
896 | * Align to L1d size, and size it at 2x L1d size, to catch possible | ||
897 | * hardware prefetch runoff. We don't have a recipe for load patterns to | ||
898 | * reliably avoid the prefetcher. | ||
899 | */ | ||
900 | l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); | ||
901 | memset(l1d_flush_fallback_area, 0, l1d_size * 2); | ||
902 | |||
903 | for_each_possible_cpu(cpu) { | ||
904 | /* | ||
905 | * The fallback flush is currently coded for 8-way | ||
906 | * associativity. Different associativity is possible, but it | ||
907 | * will be treated as 8-way and may not evict the lines as | ||
908 | * effectively. | ||
909 | * | ||
910 | * 128 byte lines are mandatory. | ||
911 | */ | ||
912 | u64 c = l1d_size / 8; | ||
913 | |||
914 | paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; | ||
915 | paca[cpu].l1d_flush_congruence = c; | ||
916 | paca[cpu].l1d_flush_sets = c / 128; | ||
917 | } | ||
918 | } | ||
919 | |||
920 | void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) | ||
921 | { | ||
922 | if (types & L1D_FLUSH_FALLBACK) { | ||
923 | pr_info("rfi-flush: Using fallback displacement flush\n"); | ||
924 | init_fallback_flush(); | ||
925 | } | ||
926 | |||
927 | if (types & L1D_FLUSH_ORI) | ||
928 | pr_info("rfi-flush: Using ori type flush\n"); | ||
929 | |||
930 | if (types & L1D_FLUSH_MTTRIG) | ||
931 | pr_info("rfi-flush: Using mttrig type flush\n"); | ||
932 | |||
933 | enabled_flush_types = types; | ||
934 | |||
935 | if (!no_rfi_flush) | ||
936 | rfi_flush_enable(enable); | ||
937 | } | ||
938 | |||
939 | #ifdef CONFIG_DEBUG_FS | ||
940 | static int rfi_flush_set(void *data, u64 val) | ||
941 | { | ||
942 | if (val == 1) | ||
943 | rfi_flush_enable(true); | ||
944 | else if (val == 0) | ||
945 | rfi_flush_enable(false); | ||
946 | else | ||
947 | return -EINVAL; | ||
948 | |||
949 | return 0; | ||
950 | } | ||
951 | |||
952 | static int rfi_flush_get(void *data, u64 *val) | ||
953 | { | ||
954 | *val = rfi_flush ? 1 : 0; | ||
955 | return 0; | ||
956 | } | ||
957 | |||
958 | DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); | ||
959 | |||
960 | static __init int rfi_flush_debugfs_init(void) | ||
961 | { | ||
962 | debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); | ||
963 | return 0; | ||
964 | } | ||
965 | device_initcall(rfi_flush_debugfs_init); | ||
966 | #endif | ||
967 | |||
968 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) | ||
969 | { | ||
970 | if (rfi_flush) | ||
971 | return sprintf(buf, "Mitigation: RFI Flush\n"); | ||
972 | |||
973 | return sprintf(buf, "Vulnerable\n"); | ||
974 | } | ||
975 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
837 | #endif | 976 | #endif |
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index d41fd0af8980..072a23a17350 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
@@ -72,6 +72,15 @@ SECTIONS | |||
72 | /* Read-only data */ | 72 | /* Read-only data */ |
73 | RODATA | 73 | RODATA |
74 | 74 | ||
75 | #ifdef CONFIG_PPC64 | ||
76 | . = ALIGN(8); | ||
77 | __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { | ||
78 | __start___rfi_flush_fixup = .; | ||
79 | *(__rfi_flush_fixup) | ||
80 | __stop___rfi_flush_fixup = .; | ||
81 | } | ||
82 | #endif | ||
83 | |||
75 | EXCEPTION_TABLE(0) | 84 | EXCEPTION_TABLE(0) |
76 | 85 | ||
77 | NOTES :kernel :notes | 86 | NOTES :kernel :notes |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ffab9269bfe4..4463718ae614 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -64,7 +64,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline) | |||
64 | mtmsrd r0,1 /* clear RI in MSR */ | 64 | mtmsrd r0,1 /* clear RI in MSR */ |
65 | mtsrr0 r5 | 65 | mtsrr0 r5 |
66 | mtsrr1 r6 | 66 | mtsrr1 r6 |
67 | RFI | 67 | RFI_TO_KERNEL |
68 | 68 | ||
69 | kvmppc_call_hv_entry: | 69 | kvmppc_call_hv_entry: |
70 | ld r4, HSTATE_KVM_VCPU(r13) | 70 | ld r4, HSTATE_KVM_VCPU(r13) |
@@ -170,7 +170,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
170 | mtsrr0 r8 | 170 | mtsrr0 r8 |
171 | mtsrr1 r7 | 171 | mtsrr1 r7 |
172 | beq cr1, 13f /* machine check */ | 172 | beq cr1, 13f /* machine check */ |
173 | RFI | 173 | RFI_TO_KERNEL |
174 | 174 | ||
175 | /* On POWER7, we have external interrupts set to use HSRR0/1 */ | 175 | /* On POWER7, we have external interrupts set to use HSRR0/1 */ |
176 | 11: mtspr SPRN_HSRR0, r8 | 176 | 11: mtspr SPRN_HSRR0, r8 |
@@ -965,8 +965,7 @@ BEGIN_FTR_SECTION | |||
965 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | 965 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
966 | ld r0, VCPU_GPR(R0)(r4) | 966 | ld r0, VCPU_GPR(R0)(r4) |
967 | ld r4, VCPU_GPR(R4)(r4) | 967 | ld r4, VCPU_GPR(R4)(r4) |
968 | 968 | HRFI_TO_GUEST | |
969 | hrfid | ||
970 | b . | 969 | b . |
971 | 970 | ||
972 | secondary_too_late: | 971 | secondary_too_late: |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 16c4d88ba27d..a328f99a887c 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -46,6 +46,9 @@ | |||
46 | 46 | ||
47 | #define FUNC(name) name | 47 | #define FUNC(name) name |
48 | 48 | ||
49 | #define RFI_TO_KERNEL RFI | ||
50 | #define RFI_TO_GUEST RFI | ||
51 | |||
49 | .macro INTERRUPT_TRAMPOLINE intno | 52 | .macro INTERRUPT_TRAMPOLINE intno |
50 | 53 | ||
51 | .global kvmppc_trampoline_\intno | 54 | .global kvmppc_trampoline_\intno |
@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins: | |||
141 | GET_SCRATCH0(r13) | 144 | GET_SCRATCH0(r13) |
142 | 145 | ||
143 | /* And get back into the code */ | 146 | /* And get back into the code */ |
144 | RFI | 147 | RFI_TO_KERNEL |
145 | #endif | 148 | #endif |
146 | 149 | ||
147 | /* | 150 | /* |
@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline) | |||
164 | ori r5, r5, MSR_EE | 167 | ori r5, r5, MSR_EE |
165 | mtsrr0 r7 | 168 | mtsrr0 r7 |
166 | mtsrr1 r6 | 169 | mtsrr1 r6 |
167 | RFI | 170 | RFI_TO_KERNEL |
168 | 171 | ||
169 | #include "book3s_segment.S" | 172 | #include "book3s_segment.S" |
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index ca8f174289bb..7c982956d709 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
@@ -156,7 +156,7 @@ no_dcbz32_on: | |||
156 | PPC_LL r9, SVCPU_R9(r3) | 156 | PPC_LL r9, SVCPU_R9(r3) |
157 | PPC_LL r3, (SVCPU_R3)(r3) | 157 | PPC_LL r3, (SVCPU_R3)(r3) |
158 | 158 | ||
159 | RFI | 159 | RFI_TO_GUEST |
160 | kvmppc_handler_trampoline_enter_end: | 160 | kvmppc_handler_trampoline_enter_end: |
161 | 161 | ||
162 | 162 | ||
@@ -389,5 +389,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |||
389 | cmpwi r12, BOOK3S_INTERRUPT_DOORBELL | 389 | cmpwi r12, BOOK3S_INTERRUPT_DOORBELL |
390 | beqa BOOK3S_INTERRUPT_DOORBELL | 390 | beqa BOOK3S_INTERRUPT_DOORBELL |
391 | 391 | ||
392 | RFI | 392 | RFI_TO_KERNEL |
393 | kvmppc_handler_trampoline_exit_end: | 393 | kvmppc_handler_trampoline_exit_end: |
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 7ce3870d7ddd..a18d648d31a6 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/code-patching.h> | 20 | #include <asm/code-patching.h> |
21 | #include <asm/page.h> | 21 | #include <asm/page.h> |
22 | #include <asm/sections.h> | 22 | #include <asm/sections.h> |
23 | #include <asm/setup.h> | ||
23 | 24 | ||
24 | 25 | ||
25 | struct fixup_entry { | 26 | struct fixup_entry { |
@@ -113,6 +114,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) | |||
113 | } | 114 | } |
114 | } | 115 | } |
115 | 116 | ||
117 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
118 | void do_rfi_flush_fixups(enum l1d_flush_type types) | ||
119 | { | ||
120 | unsigned int instrs[3], *dest; | ||
121 | long *start, *end; | ||
122 | int i; | ||
123 | |||
124 | start = PTRRELOC(&__start___rfi_flush_fixup), | ||
125 | end = PTRRELOC(&__stop___rfi_flush_fixup); | ||
126 | |||
127 | instrs[0] = 0x60000000; /* nop */ | ||
128 | instrs[1] = 0x60000000; /* nop */ | ||
129 | instrs[2] = 0x60000000; /* nop */ | ||
130 | |||
131 | if (types & L1D_FLUSH_FALLBACK) | ||
132 | /* b .+16 to fallback flush */ | ||
133 | instrs[0] = 0x48000010; | ||
134 | |||
135 | i = 0; | ||
136 | if (types & L1D_FLUSH_ORI) { | ||
137 | instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ | ||
138 | instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ | ||
139 | } | ||
140 | |||
141 | if (types & L1D_FLUSH_MTTRIG) | ||
142 | instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ | ||
143 | |||
144 | for (i = 0; start < end; start++, i++) { | ||
145 | dest = (void *)start + *start; | ||
146 | |||
147 | pr_devel("patching dest %lx\n", (unsigned long)dest); | ||
148 | |||
149 | patch_instruction(dest, instrs[0]); | ||
150 | patch_instruction(dest + 1, instrs[1]); | ||
151 | patch_instruction(dest + 2, instrs[2]); | ||
152 | } | ||
153 | |||
154 | printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i); | ||
155 | } | ||
156 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
157 | |||
116 | void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) | 158 | void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) |
117 | { | 159 | { |
118 | long *start, *end; | 160 | long *start, *end; |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index d1e65ce545b3..b2ab164a8094 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -401,8 +401,12 @@ static __u64 power_pmu_bhrb_to(u64 addr) | |||
401 | int ret; | 401 | int ret; |
402 | __u64 target; | 402 | __u64 target; |
403 | 403 | ||
404 | if (is_kernel_addr(addr)) | 404 | if (is_kernel_addr(addr)) { |
405 | return branch_target((unsigned int *)addr); | 405 | if (probe_kernel_read(&instr, (void *)addr, sizeof(instr))) |
406 | return 0; | ||
407 | |||
408 | return branch_target(&instr); | ||
409 | } | ||
406 | 410 | ||
407 | /* Userspace: need copy instruction here then translate it */ | 411 | /* Userspace: need copy instruction here then translate it */ |
408 | pagefault_disable(); | 412 | pagefault_disable(); |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index f48afc06ba14..30c6b3b7be90 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -35,13 +35,63 @@ | |||
35 | #include <asm/opal.h> | 35 | #include <asm/opal.h> |
36 | #include <asm/kexec.h> | 36 | #include <asm/kexec.h> |
37 | #include <asm/smp.h> | 37 | #include <asm/smp.h> |
38 | #include <asm/tm.h> | ||
39 | #include <asm/setup.h> | ||
38 | 40 | ||
39 | #include "powernv.h" | 41 | #include "powernv.h" |
40 | 42 | ||
43 | static void pnv_setup_rfi_flush(void) | ||
44 | { | ||
45 | struct device_node *np, *fw_features; | ||
46 | enum l1d_flush_type type; | ||
47 | int enable; | ||
48 | |||
49 | /* Default to fallback in case fw-features are not available */ | ||
50 | type = L1D_FLUSH_FALLBACK; | ||
51 | enable = 1; | ||
52 | |||
53 | np = of_find_node_by_name(NULL, "ibm,opal"); | ||
54 | fw_features = of_get_child_by_name(np, "fw-features"); | ||
55 | of_node_put(np); | ||
56 | |||
57 | if (fw_features) { | ||
58 | np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2"); | ||
59 | if (np && of_property_read_bool(np, "enabled")) | ||
60 | type = L1D_FLUSH_MTTRIG; | ||
61 | |||
62 | of_node_put(np); | ||
63 | |||
64 | np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0"); | ||
65 | if (np && of_property_read_bool(np, "enabled")) | ||
66 | type = L1D_FLUSH_ORI; | ||
67 | |||
68 | of_node_put(np); | ||
69 | |||
70 | /* Enable unless firmware says NOT to */ | ||
71 | enable = 2; | ||
72 | np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0"); | ||
73 | if (np && of_property_read_bool(np, "disabled")) | ||
74 | enable--; | ||
75 | |||
76 | of_node_put(np); | ||
77 | |||
78 | np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1"); | ||
79 | if (np && of_property_read_bool(np, "disabled")) | ||
80 | enable--; | ||
81 | |||
82 | of_node_put(np); | ||
83 | of_node_put(fw_features); | ||
84 | } | ||
85 | |||
86 | setup_rfi_flush(type, enable > 0); | ||
87 | } | ||
88 | |||
41 | static void __init pnv_setup_arch(void) | 89 | static void __init pnv_setup_arch(void) |
42 | { | 90 | { |
43 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); | 91 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); |
44 | 92 | ||
93 | pnv_setup_rfi_flush(); | ||
94 | |||
45 | /* Initialize SMP */ | 95 | /* Initialize SMP */ |
46 | pnv_smp_init(); | 96 | pnv_smp_init(); |
47 | 97 | ||
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 36df46eaba24..dd2545fc9947 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -499,6 +499,39 @@ static void __init find_and_init_phbs(void) | |||
499 | of_pci_check_probe_only(); | 499 | of_pci_check_probe_only(); |
500 | } | 500 | } |
501 | 501 | ||
502 | static void pseries_setup_rfi_flush(void) | ||
503 | { | ||
504 | struct h_cpu_char_result result; | ||
505 | enum l1d_flush_type types; | ||
506 | bool enable; | ||
507 | long rc; | ||
508 | |||
509 | /* Enable by default */ | ||
510 | enable = true; | ||
511 | |||
512 | rc = plpar_get_cpu_characteristics(&result); | ||
513 | if (rc == H_SUCCESS) { | ||
514 | types = L1D_FLUSH_NONE; | ||
515 | |||
516 | if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) | ||
517 | types |= L1D_FLUSH_MTTRIG; | ||
518 | if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) | ||
519 | types |= L1D_FLUSH_ORI; | ||
520 | |||
521 | /* Use fallback if nothing set in hcall */ | ||
522 | if (types == L1D_FLUSH_NONE) | ||
523 | types = L1D_FLUSH_FALLBACK; | ||
524 | |||
525 | if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) | ||
526 | enable = false; | ||
527 | } else { | ||
528 | /* Default to fallback if case hcall is not available */ | ||
529 | types = L1D_FLUSH_FALLBACK; | ||
530 | } | ||
531 | |||
532 | setup_rfi_flush(types, enable); | ||
533 | } | ||
534 | |||
502 | static void __init pSeries_setup_arch(void) | 535 | static void __init pSeries_setup_arch(void) |
503 | { | 536 | { |
504 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); | 537 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); |
@@ -515,7 +548,9 @@ static void __init pSeries_setup_arch(void) | |||
515 | 548 | ||
516 | fwnmi_init(); | 549 | fwnmi_init(); |
517 | 550 | ||
518 | /* By default, only probe PCI (can be overriden by rtas_pci) */ | 551 | pseries_setup_rfi_flush(); |
552 | |||
553 | /* By default, only probe PCI (can be overridden by rtas_pci) */ | ||
519 | pci_add_flags(PCI_PROBE_ONLY); | 554 | pci_add_flags(PCI_PROBE_ONLY); |
520 | 555 | ||
521 | /* Find and initialize PCI host bridges */ | 556 | /* Find and initialize PCI host bridges */ |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c41094ca3b73..a62de16633f2 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -127,6 +127,7 @@ config S390 | |||
127 | select HAVE_ARCH_TRACEHOOK | 127 | select HAVE_ARCH_TRACEHOOK |
128 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE | 128 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE |
129 | select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES | 129 | select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES |
130 | select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES | ||
130 | select HAVE_CMPXCHG_DOUBLE | 131 | select HAVE_CMPXCHG_DOUBLE |
131 | select HAVE_CMPXCHG_LOCAL | 132 | select HAVE_CMPXCHG_LOCAL |
132 | select HAVE_DEBUG_KMEMLEAK | 133 | select HAVE_DEBUG_KMEMLEAK |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 437e61159279..86f934255eb6 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid) | |||
110 | 110 | ||
111 | COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid) | 111 | COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid) |
112 | { | 112 | { |
113 | return sys_setgid((gid_t)gid); | 113 | return sys_setgid(low2highgid(gid)); |
114 | } | 114 | } |
115 | 115 | ||
116 | COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) | 116 | COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) |
@@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) | |||
120 | 120 | ||
121 | COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid) | 121 | COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid) |
122 | { | 122 | { |
123 | return sys_setuid((uid_t)uid); | 123 | return sys_setuid(low2highuid(uid)); |
124 | } | 124 | } |
125 | 125 | ||
126 | COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid) | 126 | COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid) |
@@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp, | |||
173 | 173 | ||
174 | COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid) | 174 | COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid) |
175 | { | 175 | { |
176 | return sys_setfsuid((uid_t)uid); | 176 | return sys_setfsuid(low2highuid(uid)); |
177 | } | 177 | } |
178 | 178 | ||
179 | COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid) | 179 | COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid) |
180 | { | 180 | { |
181 | return sys_setfsgid((gid_t)gid); | 181 | return sys_setfsgid(low2highgid(gid)); |
182 | } | 182 | } |
183 | 183 | ||
184 | static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) | 184 | static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) |
@@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis | |||
263 | return retval; | 263 | return retval; |
264 | } | 264 | } |
265 | 265 | ||
266 | groups_sort(group_info); | ||
266 | retval = set_current_groups(group_info); | 267 | retval = set_current_groups(group_info); |
267 | put_group_info(group_info); | 268 | put_group_info(group_info); |
268 | 269 | ||
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index fb0901ec4306..f5bf24df5c6d 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -83,13 +83,13 @@ static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, | |||
83 | switch (action) { | 83 | switch (action) { |
84 | case PM_SUSPEND_PREPARE: | 84 | case PM_SUSPEND_PREPARE: |
85 | case PM_HIBERNATION_PREPARE: | 85 | case PM_HIBERNATION_PREPARE: |
86 | if (crashk_res.start) | 86 | if (kexec_crash_image) |
87 | crash_map_reserved_pages(); | 87 | arch_kexec_unprotect_crashkres(); |
88 | break; | 88 | break; |
89 | case PM_POST_SUSPEND: | 89 | case PM_POST_SUSPEND: |
90 | case PM_POST_HIBERNATION: | 90 | case PM_POST_HIBERNATION: |
91 | if (crashk_res.start) | 91 | if (kexec_crash_image) |
92 | crash_unmap_reserved_pages(); | 92 | arch_kexec_protect_crashkres(); |
93 | break; | 93 | break; |
94 | default: | 94 | default: |
95 | return NOTIFY_DONE; | 95 | return NOTIFY_DONE; |
@@ -100,6 +100,8 @@ static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, | |||
100 | static int __init machine_kdump_pm_init(void) | 100 | static int __init machine_kdump_pm_init(void) |
101 | { | 101 | { |
102 | pm_notifier(machine_kdump_pm_cb, 0); | 102 | pm_notifier(machine_kdump_pm_cb, 0); |
103 | /* Create initial mapping for crashkernel memory */ | ||
104 | arch_kexec_unprotect_crashkres(); | ||
103 | return 0; | 105 | return 0; |
104 | } | 106 | } |
105 | arch_initcall(machine_kdump_pm_init); | 107 | arch_initcall(machine_kdump_pm_init); |
@@ -134,6 +136,8 @@ static int kdump_csum_valid(struct kimage *image) | |||
134 | #endif | 136 | #endif |
135 | } | 137 | } |
136 | 138 | ||
139 | #ifdef CONFIG_CRASH_DUMP | ||
140 | |||
137 | /* | 141 | /* |
138 | * Map or unmap crashkernel memory | 142 | * Map or unmap crashkernel memory |
139 | */ | 143 | */ |
@@ -155,21 +159,25 @@ static void crash_map_pages(int enable) | |||
155 | } | 159 | } |
156 | 160 | ||
157 | /* | 161 | /* |
158 | * Map crashkernel memory | 162 | * Unmap crashkernel memory |
159 | */ | 163 | */ |
160 | void crash_map_reserved_pages(void) | 164 | void arch_kexec_protect_crashkres(void) |
161 | { | 165 | { |
162 | crash_map_pages(1); | 166 | if (crashk_res.end) |
167 | crash_map_pages(0); | ||
163 | } | 168 | } |
164 | 169 | ||
165 | /* | 170 | /* |
166 | * Unmap crashkernel memory | 171 | * Map crashkernel memory |
167 | */ | 172 | */ |
168 | void crash_unmap_reserved_pages(void) | 173 | void arch_kexec_unprotect_crashkres(void) |
169 | { | 174 | { |
170 | crash_map_pages(0); | 175 | if (crashk_res.end) |
176 | crash_map_pages(1); | ||
171 | } | 177 | } |
172 | 178 | ||
179 | #endif | ||
180 | |||
173 | /* | 181 | /* |
174 | * Give back memory to hypervisor before new kdump is loaded | 182 | * Give back memory to hypervisor before new kdump is loaded |
175 | */ | 183 | */ |
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index ff639342a8be..c5b997757988 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
@@ -607,7 +607,8 @@ asmlinkage void do_divide_error(unsigned long r4) | |||
607 | break; | 607 | break; |
608 | } | 608 | } |
609 | 609 | ||
610 | force_sig_info(SIGFPE, &info, current); | 610 | info.si_signo = SIGFPE; |
611 | force_sig_info(info.si_signo, &info, current); | ||
611 | } | 612 | } |
612 | #endif | 613 | #endif |
613 | 614 | ||
diff --git a/arch/um/Makefile b/arch/um/Makefile index e3abe6f3156d..9ccf462131c4 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile | |||
@@ -117,7 +117,7 @@ archheaders: | |||
117 | archprepare: include/generated/user_constants.h | 117 | archprepare: include/generated/user_constants.h |
118 | 118 | ||
119 | LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static | 119 | LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static |
120 | LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib | 120 | LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie) |
121 | 121 | ||
122 | CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \ | 122 | CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \ |
123 | $(call cc-option, -fno-stack-protector,) \ | 123 | $(call cc-option, -fno-stack-protector,) \ |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d50d233a7b95..59a328bd7670 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -42,7 +42,7 @@ config X86 | |||
42 | select ARCH_USE_CMPXCHG_LOCKREF if X86_64 | 42 | select ARCH_USE_CMPXCHG_LOCKREF if X86_64 |
43 | select ARCH_USE_QUEUED_RWLOCKS | 43 | select ARCH_USE_QUEUED_RWLOCKS |
44 | select ARCH_USE_QUEUED_SPINLOCKS | 44 | select ARCH_USE_QUEUED_SPINLOCKS |
45 | select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP | 45 | select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
46 | select ARCH_WANTS_DYNAMIC_TASK_STRUCT | 46 | select ARCH_WANTS_DYNAMIC_TASK_STRUCT |
47 | select ARCH_WANT_FRAME_POINTERS | 47 | select ARCH_WANT_FRAME_POINTERS |
48 | select ARCH_WANT_IPC_PARSE_VERSION if X86_32 | 48 | select ARCH_WANT_IPC_PARSE_VERSION if X86_32 |
@@ -64,6 +64,7 @@ config X86 | |||
64 | select GENERIC_CLOCKEVENTS_MIN_ADJUST | 64 | select GENERIC_CLOCKEVENTS_MIN_ADJUST |
65 | select GENERIC_CMOS_UPDATE | 65 | select GENERIC_CMOS_UPDATE |
66 | select GENERIC_CPU_AUTOPROBE | 66 | select GENERIC_CPU_AUTOPROBE |
67 | select GENERIC_CPU_VULNERABILITIES | ||
67 | select GENERIC_EARLY_IOREMAP | 68 | select GENERIC_EARLY_IOREMAP |
68 | select GENERIC_FIND_FIRST_BIT | 69 | select GENERIC_FIND_FIRST_BIT |
69 | select GENERIC_IOMAP | 70 | select GENERIC_IOMAP |
@@ -90,6 +91,8 @@ config X86 | |||
90 | select HAVE_ARCH_TRACEHOOK | 91 | select HAVE_ARCH_TRACEHOOK |
91 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE | 92 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE |
92 | select HAVE_ARCH_WITHIN_STACK_FRAMES | 93 | select HAVE_ARCH_WITHIN_STACK_FRAMES |
94 | select HAVE_BPF_JIT if X86_64 | ||
95 | select HAVE_EBPF_JIT if X86_64 | ||
93 | select HAVE_CC_STACKPROTECTOR | 96 | select HAVE_CC_STACKPROTECTOR |
94 | select HAVE_CMPXCHG_DOUBLE | 97 | select HAVE_CMPXCHG_DOUBLE |
95 | select HAVE_CMPXCHG_LOCAL | 98 | select HAVE_CMPXCHG_LOCAL |
@@ -398,6 +401,19 @@ config GOLDFISH | |||
398 | def_bool y | 401 | def_bool y |
399 | depends on X86_GOLDFISH | 402 | depends on X86_GOLDFISH |
400 | 403 | ||
404 | config RETPOLINE | ||
405 | bool "Avoid speculative indirect branches in kernel" | ||
406 | default y | ||
407 | ---help--- | ||
408 | Compile kernel with the retpoline compiler options to guard against | ||
409 | kernel-to-user data leaks by avoiding speculative indirect | ||
410 | branches. Requires a compiler with -mindirect-branch=thunk-extern | ||
411 | support for full protection. The kernel may run slower. | ||
412 | |||
413 | Without compiler support, at least indirect branches in assembler | ||
414 | code are eliminated. Since this includes the syscall entry path, | ||
415 | it is not entirely pointless. | ||
416 | |||
401 | if X86_32 | 417 | if X86_32 |
402 | config X86_EXTENDED_PLATFORM | 418 | config X86_EXTENDED_PLATFORM |
403 | bool "Support for extended (non-PC) x86 platforms" | 419 | bool "Support for extended (non-PC) x86 platforms" |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 53949c886341..e83a32ef62c6 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -191,6 +191,14 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | |||
191 | KBUILD_CFLAGS += $(mflags-y) | 191 | KBUILD_CFLAGS += $(mflags-y) |
192 | KBUILD_AFLAGS += $(mflags-y) | 192 | KBUILD_AFLAGS += $(mflags-y) |
193 | 193 | ||
194 | # Avoid indirect branches in kernel to deal with Spectre | ||
195 | ifdef CONFIG_RETPOLINE | ||
196 | RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) | ||
197 | ifneq ($(RETPOLINE_CFLAGS),) | ||
198 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE | ||
199 | endif | ||
200 | endif | ||
201 | |||
194 | archscripts: scripts_basic | 202 | archscripts: scripts_basic |
195 | $(Q)$(MAKE) $(build)=arch/x86/tools relocs | 203 | $(Q)$(MAKE) $(build)=arch/x86/tools relocs |
196 | 204 | ||
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 3783dc3e10b3..4abb284a5b9c 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | #undef CONFIG_PARAVIRT | 10 | #undef CONFIG_PARAVIRT |
11 | #undef CONFIG_PARAVIRT_SPINLOCKS | 11 | #undef CONFIG_PARAVIRT_SPINLOCKS |
12 | #undef CONFIG_PAGE_TABLE_ISOLATION | ||
12 | #undef CONFIG_KASAN | 13 | #undef CONFIG_KASAN |
13 | 14 | ||
14 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 6bd2c6c95373..3f93dedb5a4d 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
33 | #include <asm/inst.h> | 33 | #include <asm/inst.h> |
34 | #include <asm/nospec-branch.h> | ||
34 | 35 | ||
35 | /* | 36 | /* |
36 | * The following macros are used to move an (un)aligned 16 byte value to/from | 37 | * The following macros are used to move an (un)aligned 16 byte value to/from |
@@ -2714,7 +2715,7 @@ ENTRY(aesni_xts_crypt8) | |||
2714 | pxor INC, STATE4 | 2715 | pxor INC, STATE4 |
2715 | movdqu IV, 0x30(OUTP) | 2716 | movdqu IV, 0x30(OUTP) |
2716 | 2717 | ||
2717 | call *%r11 | 2718 | CALL_NOSPEC %r11 |
2718 | 2719 | ||
2719 | movdqu 0x00(OUTP), INC | 2720 | movdqu 0x00(OUTP), INC |
2720 | pxor INC, STATE1 | 2721 | pxor INC, STATE1 |
@@ -2759,7 +2760,7 @@ ENTRY(aesni_xts_crypt8) | |||
2759 | _aesni_gf128mul_x_ble() | 2760 | _aesni_gf128mul_x_ble() |
2760 | movups IV, (IVP) | 2761 | movups IV, (IVP) |
2761 | 2762 | ||
2762 | call *%r11 | 2763 | CALL_NOSPEC %r11 |
2763 | 2764 | ||
2764 | movdqu 0x40(OUTP), INC | 2765 | movdqu 0x40(OUTP), INC |
2765 | pxor INC, STATE1 | 2766 | pxor INC, STATE1 |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 3633ad6145c5..c18806b5db2a 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -965,7 +965,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req) | |||
965 | 965 | ||
966 | if (sg_is_last(req->src) && | 966 | if (sg_is_last(req->src) && |
967 | req->src->offset + req->src->length <= PAGE_SIZE && | 967 | req->src->offset + req->src->length <= PAGE_SIZE && |
968 | sg_is_last(req->dst) && | 968 | sg_is_last(req->dst) && req->dst->length && |
969 | req->dst->offset + req->dst->length <= PAGE_SIZE) { | 969 | req->dst->offset + req->dst->length <= PAGE_SIZE) { |
970 | one_entry_in_sg = 1; | 970 | one_entry_in_sg = 1; |
971 | scatterwalk_start(&src_sg_walk, req->src); | 971 | scatterwalk_start(&src_sg_walk, req->src); |
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index ce71f9212409..5881756f78a2 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S | |||
@@ -16,6 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | #include <asm/nospec-branch.h> | ||
19 | 20 | ||
20 | #define CAMELLIA_TABLE_BYTE_LEN 272 | 21 | #define CAMELLIA_TABLE_BYTE_LEN 272 |
21 | 22 | ||
@@ -1210,7 +1211,7 @@ camellia_xts_crypt_16way: | |||
1210 | vpxor 14 * 16(%rax), %xmm15, %xmm14; | 1211 | vpxor 14 * 16(%rax), %xmm15, %xmm14; |
1211 | vpxor 15 * 16(%rax), %xmm15, %xmm15; | 1212 | vpxor 15 * 16(%rax), %xmm15, %xmm15; |
1212 | 1213 | ||
1213 | call *%r9; | 1214 | CALL_NOSPEC %r9; |
1214 | 1215 | ||
1215 | addq $(16 * 16), %rsp; | 1216 | addq $(16 * 16), %rsp; |
1216 | 1217 | ||
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 0e0b8863a34b..0d45b04b490a 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <asm/nospec-branch.h> | ||
14 | 15 | ||
15 | #define CAMELLIA_TABLE_BYTE_LEN 272 | 16 | #define CAMELLIA_TABLE_BYTE_LEN 272 |
16 | 17 | ||
@@ -1323,7 +1324,7 @@ camellia_xts_crypt_32way: | |||
1323 | vpxor 14 * 32(%rax), %ymm15, %ymm14; | 1324 | vpxor 14 * 32(%rax), %ymm15, %ymm14; |
1324 | vpxor 15 * 32(%rax), %ymm15, %ymm15; | 1325 | vpxor 15 * 32(%rax), %ymm15, %ymm15; |
1325 | 1326 | ||
1326 | call *%r9; | 1327 | CALL_NOSPEC %r9; |
1327 | 1328 | ||
1328 | addq $(16 * 32), %rsp; | 1329 | addq $(16 * 32), %rsp; |
1329 | 1330 | ||
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 4fe27e074194..48767520cbe0 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S | |||
@@ -45,6 +45,7 @@ | |||
45 | 45 | ||
46 | #include <asm/inst.h> | 46 | #include <asm/inst.h> |
47 | #include <linux/linkage.h> | 47 | #include <linux/linkage.h> |
48 | #include <asm/nospec-branch.h> | ||
48 | 49 | ||
49 | ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction | 50 | ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction |
50 | 51 | ||
@@ -172,7 +173,7 @@ continue_block: | |||
172 | movzxw (bufp, %rax, 2), len | 173 | movzxw (bufp, %rax, 2), len |
173 | offset=crc_array-jump_table | 174 | offset=crc_array-jump_table |
174 | lea offset(bufp, len, 1), bufp | 175 | lea offset(bufp, len, 1), bufp |
175 | jmp *bufp | 176 | JMP_NOSPEC bufp |
176 | 177 | ||
177 | ################################################################ | 178 | ################################################################ |
178 | ## 2a) PROCESS FULL BLOCKS: | 179 | ## 2a) PROCESS FULL BLOCKS: |
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index 4264a3d59589..7c064887b783 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c | |||
@@ -164,7 +164,6 @@ static struct shash_alg alg = { | |||
164 | .init = poly1305_simd_init, | 164 | .init = poly1305_simd_init, |
165 | .update = poly1305_simd_update, | 165 | .update = poly1305_simd_update, |
166 | .final = crypto_poly1305_final, | 166 | .final = crypto_poly1305_final, |
167 | .setkey = crypto_poly1305_setkey, | ||
168 | .descsize = sizeof(struct poly1305_simd_desc_ctx), | 167 | .descsize = sizeof(struct poly1305_simd_desc_ctx), |
169 | .base = { | 168 | .base = { |
170 | .cra_name = "poly1305", | 169 | .cra_name = "poly1305", |
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index ae678ad128a9..d437f3871e53 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/alternative-asm.h> | 44 | #include <asm/alternative-asm.h> |
45 | #include <asm/asm.h> | 45 | #include <asm/asm.h> |
46 | #include <asm/smap.h> | 46 | #include <asm/smap.h> |
47 | #include <asm/nospec-branch.h> | ||
47 | 48 | ||
48 | .section .entry.text, "ax" | 49 | .section .entry.text, "ax" |
49 | 50 | ||
@@ -226,7 +227,8 @@ ENTRY(ret_from_kernel_thread) | |||
226 | pushl $0x0202 # Reset kernel eflags | 227 | pushl $0x0202 # Reset kernel eflags |
227 | popfl | 228 | popfl |
228 | movl PT_EBP(%esp), %eax | 229 | movl PT_EBP(%esp), %eax |
229 | call *PT_EBX(%esp) | 230 | movl PT_EBX(%esp), %edx |
231 | CALL_NOSPEC %edx | ||
230 | movl $0, PT_EAX(%esp) | 232 | movl $0, PT_EAX(%esp) |
231 | 233 | ||
232 | /* | 234 | /* |
@@ -861,7 +863,8 @@ trace: | |||
861 | movl 0x4(%ebp), %edx | 863 | movl 0x4(%ebp), %edx |
862 | subl $MCOUNT_INSN_SIZE, %eax | 864 | subl $MCOUNT_INSN_SIZE, %eax |
863 | 865 | ||
864 | call *ftrace_trace_function | 866 | movl ftrace_trace_function, %ecx |
867 | CALL_NOSPEC %ecx | ||
865 | 868 | ||
866 | popl %edx | 869 | popl %edx |
867 | popl %ecx | 870 | popl %ecx |
@@ -896,7 +899,7 @@ return_to_handler: | |||
896 | movl %eax, %ecx | 899 | movl %eax, %ecx |
897 | popl %edx | 900 | popl %edx |
898 | popl %eax | 901 | popl %eax |
899 | jmp *%ecx | 902 | JMP_NOSPEC %ecx |
900 | #endif | 903 | #endif |
901 | 904 | ||
902 | #ifdef CONFIG_TRACING | 905 | #ifdef CONFIG_TRACING |
@@ -938,7 +941,7 @@ error_code: | |||
938 | movl %ecx, %es | 941 | movl %ecx, %es |
939 | TRACE_IRQS_OFF | 942 | TRACE_IRQS_OFF |
940 | movl %esp, %eax # pt_regs pointer | 943 | movl %esp, %eax # pt_regs pointer |
941 | call *%edi | 944 | CALL_NOSPEC %edi |
942 | jmp ret_from_exception | 945 | jmp ret_from_exception |
943 | END(page_fault) | 946 | END(page_fault) |
944 | 947 | ||
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index cc0f2f5da19b..a03b22c615d9 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -35,6 +35,8 @@ | |||
35 | #include <asm/asm.h> | 35 | #include <asm/asm.h> |
36 | #include <asm/smap.h> | 36 | #include <asm/smap.h> |
37 | #include <asm/pgtable_types.h> | 37 | #include <asm/pgtable_types.h> |
38 | #include <asm/kaiser.h> | ||
39 | #include <asm/nospec-branch.h> | ||
38 | #include <linux/err.h> | 40 | #include <linux/err.h> |
39 | 41 | ||
40 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | 42 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
@@ -135,6 +137,7 @@ ENTRY(entry_SYSCALL_64) | |||
135 | * it is too small to ever cause noticeable irq latency. | 137 | * it is too small to ever cause noticeable irq latency. |
136 | */ | 138 | */ |
137 | SWAPGS_UNSAFE_STACK | 139 | SWAPGS_UNSAFE_STACK |
140 | SWITCH_KERNEL_CR3_NO_STACK | ||
138 | /* | 141 | /* |
139 | * A hypervisor implementation might want to use a label | 142 | * A hypervisor implementation might want to use a label |
140 | * after the swapgs, so that it can do the swapgs | 143 | * after the swapgs, so that it can do the swapgs |
@@ -182,7 +185,13 @@ entry_SYSCALL_64_fastpath: | |||
182 | #endif | 185 | #endif |
183 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ | 186 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
184 | movq %r10, %rcx | 187 | movq %r10, %rcx |
188 | #ifdef CONFIG_RETPOLINE | ||
189 | movq sys_call_table(, %rax, 8), %rax | ||
190 | call __x86_indirect_thunk_rax | ||
191 | #else | ||
185 | call *sys_call_table(, %rax, 8) | 192 | call *sys_call_table(, %rax, 8) |
193 | #endif | ||
194 | |||
186 | movq %rax, RAX(%rsp) | 195 | movq %rax, RAX(%rsp) |
187 | 1: | 196 | 1: |
188 | /* | 197 | /* |
@@ -207,9 +216,17 @@ entry_SYSCALL_64_fastpath: | |||
207 | testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) | 216 | testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
208 | jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ | 217 | jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ |
209 | 218 | ||
210 | RESTORE_C_REGS_EXCEPT_RCX_R11 | ||
211 | movq RIP(%rsp), %rcx | 219 | movq RIP(%rsp), %rcx |
212 | movq EFLAGS(%rsp), %r11 | 220 | movq EFLAGS(%rsp), %r11 |
221 | RESTORE_C_REGS_EXCEPT_RCX_R11 | ||
222 | /* | ||
223 | * This opens a window where we have a user CR3, but are | ||
224 | * running in the kernel. This makes using the CS | ||
225 | * register useless for telling whether or not we need to | ||
226 | * switch CR3 in NMIs. Normal interrupts are OK because | ||
227 | * they are off here. | ||
228 | */ | ||
229 | SWITCH_USER_CR3 | ||
213 | movq RSP(%rsp), %rsp | 230 | movq RSP(%rsp), %rsp |
214 | /* | 231 | /* |
215 | * 64-bit SYSRET restores rip from rcx, | 232 | * 64-bit SYSRET restores rip from rcx, |
@@ -266,7 +283,12 @@ tracesys_phase2: | |||
266 | #endif | 283 | #endif |
267 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ | 284 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
268 | movq %r10, %rcx /* fixup for C */ | 285 | movq %r10, %rcx /* fixup for C */ |
286 | #ifdef CONFIG_RETPOLINE | ||
287 | movq sys_call_table(, %rax, 8), %rax | ||
288 | call __x86_indirect_thunk_rax | ||
289 | #else | ||
269 | call *sys_call_table(, %rax, 8) | 290 | call *sys_call_table(, %rax, 8) |
291 | #endif | ||
270 | movq %rax, RAX(%rsp) | 292 | movq %rax, RAX(%rsp) |
271 | 1: | 293 | 1: |
272 | /* Use IRET because user could have changed pt_regs->foo */ | 294 | /* Use IRET because user could have changed pt_regs->foo */ |
@@ -347,10 +369,26 @@ GLOBAL(int_ret_from_sys_call) | |||
347 | syscall_return_via_sysret: | 369 | syscall_return_via_sysret: |
348 | /* rcx and r11 are already restored (see code above) */ | 370 | /* rcx and r11 are already restored (see code above) */ |
349 | RESTORE_C_REGS_EXCEPT_RCX_R11 | 371 | RESTORE_C_REGS_EXCEPT_RCX_R11 |
372 | /* | ||
373 | * This opens a window where we have a user CR3, but are | ||
374 | * running in the kernel. This makes using the CS | ||
375 | * register useless for telling whether or not we need to | ||
376 | * switch CR3 in NMIs. Normal interrupts are OK because | ||
377 | * they are off here. | ||
378 | */ | ||
379 | SWITCH_USER_CR3 | ||
350 | movq RSP(%rsp), %rsp | 380 | movq RSP(%rsp), %rsp |
351 | USERGS_SYSRET64 | 381 | USERGS_SYSRET64 |
352 | 382 | ||
353 | opportunistic_sysret_failed: | 383 | opportunistic_sysret_failed: |
384 | /* | ||
385 | * This opens a window where we have a user CR3, but are | ||
386 | * running in the kernel. This makes using the CS | ||
387 | * register useless for telling whether or not we need to | ||
388 | * switch CR3 in NMIs. Normal interrupts are OK because | ||
389 | * they are off here. | ||
390 | */ | ||
391 | SWITCH_USER_CR3 | ||
354 | SWAPGS | 392 | SWAPGS |
355 | jmp restore_c_regs_and_iret | 393 | jmp restore_c_regs_and_iret |
356 | END(entry_SYSCALL_64) | 394 | END(entry_SYSCALL_64) |
@@ -465,7 +503,7 @@ ENTRY(ret_from_fork) | |||
465 | * nb: we depend on RESTORE_EXTRA_REGS above | 503 | * nb: we depend on RESTORE_EXTRA_REGS above |
466 | */ | 504 | */ |
467 | movq %rbp, %rdi | 505 | movq %rbp, %rdi |
468 | call *%rbx | 506 | CALL_NOSPEC %rbx |
469 | movl $0, RAX(%rsp) | 507 | movl $0, RAX(%rsp) |
470 | RESTORE_EXTRA_REGS | 508 | RESTORE_EXTRA_REGS |
471 | jmp int_ret_from_sys_call | 509 | jmp int_ret_from_sys_call |
@@ -509,6 +547,7 @@ END(irq_entries_start) | |||
509 | * tracking that we're in kernel mode. | 547 | * tracking that we're in kernel mode. |
510 | */ | 548 | */ |
511 | SWAPGS | 549 | SWAPGS |
550 | SWITCH_KERNEL_CR3 | ||
512 | 551 | ||
513 | /* | 552 | /* |
514 | * We need to tell lockdep that IRQs are off. We can't do this until | 553 | * We need to tell lockdep that IRQs are off. We can't do this until |
@@ -568,6 +607,7 @@ GLOBAL(retint_user) | |||
568 | mov %rsp,%rdi | 607 | mov %rsp,%rdi |
569 | call prepare_exit_to_usermode | 608 | call prepare_exit_to_usermode |
570 | TRACE_IRQS_IRETQ | 609 | TRACE_IRQS_IRETQ |
610 | SWITCH_USER_CR3 | ||
571 | SWAPGS | 611 | SWAPGS |
572 | jmp restore_regs_and_iret | 612 | jmp restore_regs_and_iret |
573 | 613 | ||
@@ -625,6 +665,7 @@ native_irq_return_ldt: | |||
625 | pushq %rax | 665 | pushq %rax |
626 | pushq %rdi | 666 | pushq %rdi |
627 | SWAPGS | 667 | SWAPGS |
668 | SWITCH_KERNEL_CR3 | ||
628 | movq PER_CPU_VAR(espfix_waddr), %rdi | 669 | movq PER_CPU_VAR(espfix_waddr), %rdi |
629 | movq %rax, (0*8)(%rdi) /* RAX */ | 670 | movq %rax, (0*8)(%rdi) /* RAX */ |
630 | movq (2*8)(%rsp), %rax /* RIP */ | 671 | movq (2*8)(%rsp), %rax /* RIP */ |
@@ -640,6 +681,7 @@ native_irq_return_ldt: | |||
640 | andl $0xffff0000, %eax | 681 | andl $0xffff0000, %eax |
641 | popq %rdi | 682 | popq %rdi |
642 | orq PER_CPU_VAR(espfix_stack), %rax | 683 | orq PER_CPU_VAR(espfix_stack), %rax |
684 | SWITCH_USER_CR3 | ||
643 | SWAPGS | 685 | SWAPGS |
644 | movq %rax, %rsp | 686 | movq %rax, %rsp |
645 | popq %rax | 687 | popq %rax |
@@ -989,13 +1031,17 @@ idtentry async_page_fault do_async_page_fault has_error_code=1 | |||
989 | #endif | 1031 | #endif |
990 | 1032 | ||
991 | #ifdef CONFIG_X86_MCE | 1033 | #ifdef CONFIG_X86_MCE |
992 | idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) | 1034 | idtentry machine_check do_mce has_error_code=0 paranoid=1 |
993 | #endif | 1035 | #endif |
994 | 1036 | ||
995 | /* | 1037 | /* |
996 | * Save all registers in pt_regs, and switch gs if needed. | 1038 | * Save all registers in pt_regs, and switch gs if needed. |
997 | * Use slow, but surefire "are we in kernel?" check. | 1039 | * Use slow, but surefire "are we in kernel?" check. |
998 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise | 1040 | * |
1041 | * Return: ebx=0: needs swapgs but not SWITCH_USER_CR3 in paranoid_exit | ||
1042 | * ebx=1: needs neither swapgs nor SWITCH_USER_CR3 in paranoid_exit | ||
1043 | * ebx=2: needs both swapgs and SWITCH_USER_CR3 in paranoid_exit | ||
1044 | * ebx=3: needs SWITCH_USER_CR3 but not swapgs in paranoid_exit | ||
999 | */ | 1045 | */ |
1000 | ENTRY(paranoid_entry) | 1046 | ENTRY(paranoid_entry) |
1001 | cld | 1047 | cld |
@@ -1008,7 +1054,26 @@ ENTRY(paranoid_entry) | |||
1008 | js 1f /* negative -> in kernel */ | 1054 | js 1f /* negative -> in kernel */ |
1009 | SWAPGS | 1055 | SWAPGS |
1010 | xorl %ebx, %ebx | 1056 | xorl %ebx, %ebx |
1011 | 1: ret | 1057 | 1: |
1058 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
1059 | /* | ||
1060 | * We might have come in between a swapgs and a SWITCH_KERNEL_CR3 | ||
1061 | * on entry, or between a SWITCH_USER_CR3 and a swapgs on exit. | ||
1062 | * Do a conditional SWITCH_KERNEL_CR3: this could safely be done | ||
1063 | * unconditionally, but we need to find out whether the reverse | ||
1064 | * should be done on return (conveyed to paranoid_exit in %ebx). | ||
1065 | */ | ||
1066 | ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER | ||
1067 | testl $KAISER_SHADOW_PGD_OFFSET, %eax | ||
1068 | jz 2f | ||
1069 | orl $2, %ebx | ||
1070 | andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax | ||
1071 | /* If PCID enabled, set X86_CR3_PCID_NOFLUSH_BIT */ | ||
1072 | ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID | ||
1073 | movq %rax, %cr3 | ||
1074 | 2: | ||
1075 | #endif | ||
1076 | ret | ||
1012 | END(paranoid_entry) | 1077 | END(paranoid_entry) |
1013 | 1078 | ||
1014 | /* | 1079 | /* |
@@ -1021,19 +1086,26 @@ END(paranoid_entry) | |||
1021 | * be complicated. Fortunately, we there's no good reason | 1086 | * be complicated. Fortunately, we there's no good reason |
1022 | * to try to handle preemption here. | 1087 | * to try to handle preemption here. |
1023 | * | 1088 | * |
1024 | * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) | 1089 | * On entry: ebx=0: needs swapgs but not SWITCH_USER_CR3 |
1090 | * ebx=1: needs neither swapgs nor SWITCH_USER_CR3 | ||
1091 | * ebx=2: needs both swapgs and SWITCH_USER_CR3 | ||
1092 | * ebx=3: needs SWITCH_USER_CR3 but not swapgs | ||
1025 | */ | 1093 | */ |
1026 | ENTRY(paranoid_exit) | 1094 | ENTRY(paranoid_exit) |
1027 | DISABLE_INTERRUPTS(CLBR_NONE) | 1095 | DISABLE_INTERRUPTS(CLBR_NONE) |
1028 | TRACE_IRQS_OFF_DEBUG | 1096 | TRACE_IRQS_OFF_DEBUG |
1029 | testl %ebx, %ebx /* swapgs needed? */ | 1097 | TRACE_IRQS_IRETQ_DEBUG |
1098 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
1099 | /* No ALTERNATIVE for X86_FEATURE_KAISER: paranoid_entry sets %ebx */ | ||
1100 | testl $2, %ebx /* SWITCH_USER_CR3 needed? */ | ||
1101 | jz paranoid_exit_no_switch | ||
1102 | SWITCH_USER_CR3 | ||
1103 | paranoid_exit_no_switch: | ||
1104 | #endif | ||
1105 | testl $1, %ebx /* swapgs needed? */ | ||
1030 | jnz paranoid_exit_no_swapgs | 1106 | jnz paranoid_exit_no_swapgs |
1031 | TRACE_IRQS_IRETQ | ||
1032 | SWAPGS_UNSAFE_STACK | 1107 | SWAPGS_UNSAFE_STACK |
1033 | jmp paranoid_exit_restore | ||
1034 | paranoid_exit_no_swapgs: | 1108 | paranoid_exit_no_swapgs: |
1035 | TRACE_IRQS_IRETQ_DEBUG | ||
1036 | paranoid_exit_restore: | ||
1037 | RESTORE_EXTRA_REGS | 1109 | RESTORE_EXTRA_REGS |
1038 | RESTORE_C_REGS | 1110 | RESTORE_C_REGS |
1039 | REMOVE_PT_GPREGS_FROM_STACK 8 | 1111 | REMOVE_PT_GPREGS_FROM_STACK 8 |
@@ -1048,6 +1120,13 @@ ENTRY(error_entry) | |||
1048 | cld | 1120 | cld |
1049 | SAVE_C_REGS 8 | 1121 | SAVE_C_REGS 8 |
1050 | SAVE_EXTRA_REGS 8 | 1122 | SAVE_EXTRA_REGS 8 |
1123 | /* | ||
1124 | * error_entry() always returns with a kernel gsbase and | ||
1125 | * CR3. We must also have a kernel CR3/gsbase before | ||
1126 | * calling TRACE_IRQS_*. Just unconditionally switch to | ||
1127 | * the kernel CR3 here. | ||
1128 | */ | ||
1129 | SWITCH_KERNEL_CR3 | ||
1051 | xorl %ebx, %ebx | 1130 | xorl %ebx, %ebx |
1052 | testb $3, CS+8(%rsp) | 1131 | testb $3, CS+8(%rsp) |
1053 | jz .Lerror_kernelspace | 1132 | jz .Lerror_kernelspace |
@@ -1210,6 +1289,10 @@ ENTRY(nmi) | |||
1210 | */ | 1289 | */ |
1211 | 1290 | ||
1212 | SWAPGS_UNSAFE_STACK | 1291 | SWAPGS_UNSAFE_STACK |
1292 | /* | ||
1293 | * percpu variables are mapped with user CR3, so no need | ||
1294 | * to switch CR3 here. | ||
1295 | */ | ||
1213 | cld | 1296 | cld |
1214 | movq %rsp, %rdx | 1297 | movq %rsp, %rdx |
1215 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | 1298 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
@@ -1243,12 +1326,34 @@ ENTRY(nmi) | |||
1243 | 1326 | ||
1244 | movq %rsp, %rdi | 1327 | movq %rsp, %rdi |
1245 | movq $-1, %rsi | 1328 | movq $-1, %rsi |
1329 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
1330 | /* Unconditionally use kernel CR3 for do_nmi() */ | ||
1331 | /* %rax is saved above, so OK to clobber here */ | ||
1332 | ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER | ||
1333 | /* If PCID enabled, NOFLUSH now and NOFLUSH on return */ | ||
1334 | ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID | ||
1335 | pushq %rax | ||
1336 | /* mask off "user" bit of pgd address and 12 PCID bits: */ | ||
1337 | andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax | ||
1338 | movq %rax, %cr3 | ||
1339 | 2: | ||
1340 | #endif | ||
1246 | call do_nmi | 1341 | call do_nmi |
1247 | 1342 | ||
1343 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
1344 | /* | ||
1345 | * Unconditionally restore CR3. I know we return to | ||
1346 | * kernel code that needs user CR3, but do we ever return | ||
1347 | * to "user mode" where we need the kernel CR3? | ||
1348 | */ | ||
1349 | ALTERNATIVE "", "popq %rax; movq %rax, %cr3", X86_FEATURE_KAISER | ||
1350 | #endif | ||
1351 | |||
1248 | /* | 1352 | /* |
1249 | * Return back to user mode. We must *not* do the normal exit | 1353 | * Return back to user mode. We must *not* do the normal exit |
1250 | * work, because we don't want to enable interrupts. Fortunately, | 1354 | * work, because we don't want to enable interrupts. Do not |
1251 | * do_nmi doesn't modify pt_regs. | 1355 | * switch to user CR3: we might be going back to kernel code |
1356 | * that had a user CR3 set. | ||
1252 | */ | 1357 | */ |
1253 | SWAPGS | 1358 | SWAPGS |
1254 | jmp restore_c_regs_and_iret | 1359 | jmp restore_c_regs_and_iret |
@@ -1445,22 +1550,55 @@ end_repeat_nmi: | |||
1445 | ALLOC_PT_GPREGS_ON_STACK | 1550 | ALLOC_PT_GPREGS_ON_STACK |
1446 | 1551 | ||
1447 | /* | 1552 | /* |
1448 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit | 1553 | * Use the same approach as paranoid_entry to handle SWAPGS, but |
1449 | * as we should not be calling schedule in NMI context. | 1554 | * without CR3 handling since we do that differently in NMIs. No |
1450 | * Even with normal interrupts enabled. An NMI should not be | 1555 | * need to use paranoid_exit as we should not be calling schedule |
1451 | * setting NEED_RESCHED or anything that normal interrupts and | 1556 | * in NMI context. Even with normal interrupts enabled. An NMI |
1452 | * exceptions might do. | 1557 | * should not be setting NEED_RESCHED or anything that normal |
1558 | * interrupts and exceptions might do. | ||
1453 | */ | 1559 | */ |
1454 | call paranoid_entry | 1560 | cld |
1455 | 1561 | SAVE_C_REGS | |
1456 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ | 1562 | SAVE_EXTRA_REGS |
1563 | movl $1, %ebx | ||
1564 | movl $MSR_GS_BASE, %ecx | ||
1565 | rdmsr | ||
1566 | testl %edx, %edx | ||
1567 | js 1f /* negative -> in kernel */ | ||
1568 | SWAPGS | ||
1569 | xorl %ebx, %ebx | ||
1570 | 1: | ||
1457 | movq %rsp, %rdi | 1571 | movq %rsp, %rdi |
1458 | movq $-1, %rsi | 1572 | movq $-1, %rsi |
1573 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
1574 | /* Unconditionally use kernel CR3 for do_nmi() */ | ||
1575 | /* %rax is saved above, so OK to clobber here */ | ||
1576 | ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER | ||
1577 | /* If PCID enabled, NOFLUSH now and NOFLUSH on return */ | ||
1578 | ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID | ||
1579 | pushq %rax | ||
1580 | /* mask off "user" bit of pgd address and 12 PCID bits: */ | ||
1581 | andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax | ||
1582 | movq %rax, %cr3 | ||
1583 | 2: | ||
1584 | #endif | ||
1585 | |||
1586 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ | ||
1459 | call do_nmi | 1587 | call do_nmi |
1460 | 1588 | ||
1589 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
1590 | /* | ||
1591 | * Unconditionally restore CR3. We might be returning to | ||
1592 | * kernel code that needs user CR3, like just just before | ||
1593 | * a sysret. | ||
1594 | */ | ||
1595 | ALTERNATIVE "", "popq %rax; movq %rax, %cr3", X86_FEATURE_KAISER | ||
1596 | #endif | ||
1597 | |||
1461 | testl %ebx, %ebx /* swapgs needed? */ | 1598 | testl %ebx, %ebx /* swapgs needed? */ |
1462 | jnz nmi_restore | 1599 | jnz nmi_restore |
1463 | nmi_swapgs: | 1600 | nmi_swapgs: |
1601 | /* We fixed up CR3 above, so no need to switch it here */ | ||
1464 | SWAPGS_UNSAFE_STACK | 1602 | SWAPGS_UNSAFE_STACK |
1465 | nmi_restore: | 1603 | nmi_restore: |
1466 | RESTORE_EXTRA_REGS | 1604 | RESTORE_EXTRA_REGS |
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 15cfebaa7688..d03bf0e28b8b 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <asm/irqflags.h> | 13 | #include <asm/irqflags.h> |
14 | #include <asm/asm.h> | 14 | #include <asm/asm.h> |
15 | #include <asm/smap.h> | 15 | #include <asm/smap.h> |
16 | #include <asm/pgtable_types.h> | ||
17 | #include <asm/kaiser.h> | ||
16 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
17 | #include <linux/err.h> | 19 | #include <linux/err.h> |
18 | 20 | ||
@@ -50,6 +52,7 @@ ENDPROC(native_usergs_sysret32) | |||
50 | ENTRY(entry_SYSENTER_compat) | 52 | ENTRY(entry_SYSENTER_compat) |
51 | /* Interrupts are off on entry. */ | 53 | /* Interrupts are off on entry. */ |
52 | SWAPGS_UNSAFE_STACK | 54 | SWAPGS_UNSAFE_STACK |
55 | SWITCH_KERNEL_CR3_NO_STACK | ||
53 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | 56 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
54 | 57 | ||
55 | /* | 58 | /* |
@@ -161,6 +164,7 @@ ENDPROC(entry_SYSENTER_compat) | |||
161 | ENTRY(entry_SYSCALL_compat) | 164 | ENTRY(entry_SYSCALL_compat) |
162 | /* Interrupts are off on entry. */ | 165 | /* Interrupts are off on entry. */ |
163 | SWAPGS_UNSAFE_STACK | 166 | SWAPGS_UNSAFE_STACK |
167 | SWITCH_KERNEL_CR3_NO_STACK | ||
164 | 168 | ||
165 | /* Stash user ESP and switch to the kernel stack. */ | 169 | /* Stash user ESP and switch to the kernel stack. */ |
166 | movl %esp, %r8d | 170 | movl %esp, %r8d |
@@ -208,6 +212,7 @@ ENTRY(entry_SYSCALL_compat) | |||
208 | /* Opportunistic SYSRET */ | 212 | /* Opportunistic SYSRET */ |
209 | sysret32_from_system_call: | 213 | sysret32_from_system_call: |
210 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ | 214 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ |
215 | SWITCH_USER_CR3 | ||
211 | movq RBX(%rsp), %rbx /* pt_regs->rbx */ | 216 | movq RBX(%rsp), %rbx /* pt_regs->rbx */ |
212 | movq RBP(%rsp), %rbp /* pt_regs->rbp */ | 217 | movq RBP(%rsp), %rbp /* pt_regs->rbp */ |
213 | movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ | 218 | movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ |
@@ -269,6 +274,7 @@ ENTRY(entry_INT80_compat) | |||
269 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 274 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
270 | ASM_CLAC /* Do this early to minimize exposure */ | 275 | ASM_CLAC /* Do this early to minimize exposure */ |
271 | SWAPGS | 276 | SWAPGS |
277 | SWITCH_KERNEL_CR3_NO_STACK | ||
272 | 278 | ||
273 | /* | 279 | /* |
274 | * User tracing code (ptrace or signal handlers) might assume that | 280 | * User tracing code (ptrace or signal handlers) might assume that |
@@ -311,6 +317,7 @@ ENTRY(entry_INT80_compat) | |||
311 | 317 | ||
312 | /* Go back to user mode. */ | 318 | /* Go back to user mode. */ |
313 | TRACE_IRQS_ON | 319 | TRACE_IRQS_ON |
320 | SWITCH_USER_CR3 | ||
314 | SWAPGS | 321 | SWAPGS |
315 | jmp restore_regs_and_iret | 322 | jmp restore_regs_and_iret |
316 | END(entry_INT80_compat) | 323 | END(entry_INT80_compat) |
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index ca94fa649251..5dd363d54348 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c | |||
@@ -36,6 +36,11 @@ static notrace cycle_t vread_hpet(void) | |||
36 | } | 36 | } |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | #ifdef CONFIG_PARAVIRT_CLOCK | ||
40 | extern u8 pvclock_page | ||
41 | __attribute__((visibility("hidden"))); | ||
42 | #endif | ||
43 | |||
39 | #ifndef BUILD_VDSO32 | 44 | #ifndef BUILD_VDSO32 |
40 | 45 | ||
41 | #include <linux/kernel.h> | 46 | #include <linux/kernel.h> |
@@ -62,63 +67,65 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) | |||
62 | 67 | ||
63 | #ifdef CONFIG_PARAVIRT_CLOCK | 68 | #ifdef CONFIG_PARAVIRT_CLOCK |
64 | 69 | ||
65 | static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu) | 70 | static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void) |
66 | { | 71 | { |
67 | const struct pvclock_vsyscall_time_info *pvti_base; | 72 | return (const struct pvclock_vsyscall_time_info *)&pvclock_page; |
68 | int idx = cpu / (PAGE_SIZE/PVTI_SIZE); | ||
69 | int offset = cpu % (PAGE_SIZE/PVTI_SIZE); | ||
70 | |||
71 | BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END); | ||
72 | |||
73 | pvti_base = (struct pvclock_vsyscall_time_info *) | ||
74 | __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx); | ||
75 | |||
76 | return &pvti_base[offset]; | ||
77 | } | 73 | } |
78 | 74 | ||
79 | static notrace cycle_t vread_pvclock(int *mode) | 75 | static notrace cycle_t vread_pvclock(int *mode) |
80 | { | 76 | { |
81 | const struct pvclock_vsyscall_time_info *pvti; | 77 | const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti; |
82 | cycle_t ret; | 78 | cycle_t ret; |
83 | u64 last; | 79 | u64 tsc, pvti_tsc; |
84 | u32 version; | 80 | u64 last, delta, pvti_system_time; |
85 | u8 flags; | 81 | u32 version, pvti_tsc_to_system_mul, pvti_tsc_shift; |
86 | unsigned cpu, cpu1; | ||
87 | |||
88 | 82 | ||
89 | /* | 83 | /* |
90 | * Note: hypervisor must guarantee that: | 84 | * Note: The kernel and hypervisor must guarantee that cpu ID |
91 | * 1. cpu ID number maps 1:1 to per-CPU pvclock time info. | 85 | * number maps 1:1 to per-CPU pvclock time info. |
92 | * 2. that per-CPU pvclock time info is updated if the | 86 | * |
93 | * underlying CPU changes. | 87 | * Because the hypervisor is entirely unaware of guest userspace |
94 | * 3. that version is increased whenever underlying CPU | 88 | * preemption, it cannot guarantee that per-CPU pvclock time |
95 | * changes. | 89 | * info is updated if the underlying CPU changes or that that |
90 | * version is increased whenever underlying CPU changes. | ||
96 | * | 91 | * |
92 | * On KVM, we are guaranteed that pvti updates for any vCPU are | ||
93 | * atomic as seen by *all* vCPUs. This is an even stronger | ||
94 | * guarantee than we get with a normal seqlock. | ||
95 | * | ||
96 | * On Xen, we don't appear to have that guarantee, but Xen still | ||
97 | * supplies a valid seqlock using the version field. | ||
98 | |||
99 | * We only do pvclock vdso timing at all if | ||
100 | * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to | ||
101 | * mean that all vCPUs have matching pvti and that the TSC is | ||
102 | * synced, so we can just look at vCPU 0's pvti. | ||
97 | */ | 103 | */ |
98 | do { | 104 | |
99 | cpu = __getcpu() & VGETCPU_CPU_MASK; | 105 | if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) { |
100 | /* TODO: We can put vcpu id into higher bits of pvti.version. | ||
101 | * This will save a couple of cycles by getting rid of | ||
102 | * __getcpu() calls (Gleb). | ||
103 | */ | ||
104 | |||
105 | pvti = get_pvti(cpu); | ||
106 | |||
107 | version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); | ||
108 | |||
109 | /* | ||
110 | * Test we're still on the cpu as well as the version. | ||
111 | * We could have been migrated just after the first | ||
112 | * vgetcpu but before fetching the version, so we | ||
113 | * wouldn't notice a version change. | ||
114 | */ | ||
115 | cpu1 = __getcpu() & VGETCPU_CPU_MASK; | ||
116 | } while (unlikely(cpu != cpu1 || | ||
117 | (pvti->pvti.version & 1) || | ||
118 | pvti->pvti.version != version)); | ||
119 | |||
120 | if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) | ||
121 | *mode = VCLOCK_NONE; | 106 | *mode = VCLOCK_NONE; |
107 | return 0; | ||
108 | } | ||
109 | |||
110 | do { | ||
111 | version = pvti->version; | ||
112 | |||
113 | /* This is also a read barrier, so we'll read version first. */ | ||
114 | tsc = rdtsc_ordered(); | ||
115 | |||
116 | pvti_tsc_to_system_mul = pvti->tsc_to_system_mul; | ||
117 | pvti_tsc_shift = pvti->tsc_shift; | ||
118 | pvti_system_time = pvti->system_time; | ||
119 | pvti_tsc = pvti->tsc_timestamp; | ||
120 | |||
121 | /* Make sure that the version double-check is last. */ | ||
122 | smp_rmb(); | ||
123 | } while (unlikely((version & 1) || version != pvti->version)); | ||
124 | |||
125 | delta = tsc - pvti_tsc; | ||
126 | ret = pvti_system_time + | ||
127 | pvclock_scale_delta(delta, pvti_tsc_to_system_mul, | ||
128 | pvti_tsc_shift); | ||
122 | 129 | ||
123 | /* refer to tsc.c read_tsc() comment for rationale */ | 130 | /* refer to tsc.c read_tsc() comment for rationale */ |
124 | last = gtod->cycle_last; | 131 | last = gtod->cycle_last; |
diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S index de2c921025f5..4158acc17df0 100644 --- a/arch/x86/entry/vdso/vdso-layout.lds.S +++ b/arch/x86/entry/vdso/vdso-layout.lds.S | |||
@@ -25,7 +25,7 @@ SECTIONS | |||
25 | * segment. | 25 | * segment. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | vvar_start = . - 2 * PAGE_SIZE; | 28 | vvar_start = . - 3 * PAGE_SIZE; |
29 | vvar_page = vvar_start; | 29 | vvar_page = vvar_start; |
30 | 30 | ||
31 | /* Place all vvars at the offsets in asm/vvar.h. */ | 31 | /* Place all vvars at the offsets in asm/vvar.h. */ |
@@ -36,6 +36,7 @@ SECTIONS | |||
36 | #undef EMIT_VVAR | 36 | #undef EMIT_VVAR |
37 | 37 | ||
38 | hpet_page = vvar_start + PAGE_SIZE; | 38 | hpet_page = vvar_start + PAGE_SIZE; |
39 | pvclock_page = vvar_start + 2 * PAGE_SIZE; | ||
39 | 40 | ||
40 | . = SIZEOF_HEADERS; | 41 | . = SIZEOF_HEADERS; |
41 | 42 | ||
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c index 785d9922b106..491020b2826d 100644 --- a/arch/x86/entry/vdso/vdso2c.c +++ b/arch/x86/entry/vdso/vdso2c.c | |||
@@ -73,6 +73,7 @@ enum { | |||
73 | sym_vvar_start, | 73 | sym_vvar_start, |
74 | sym_vvar_page, | 74 | sym_vvar_page, |
75 | sym_hpet_page, | 75 | sym_hpet_page, |
76 | sym_pvclock_page, | ||
76 | sym_VDSO_FAKE_SECTION_TABLE_START, | 77 | sym_VDSO_FAKE_SECTION_TABLE_START, |
77 | sym_VDSO_FAKE_SECTION_TABLE_END, | 78 | sym_VDSO_FAKE_SECTION_TABLE_END, |
78 | }; | 79 | }; |
@@ -80,6 +81,7 @@ enum { | |||
80 | const int special_pages[] = { | 81 | const int special_pages[] = { |
81 | sym_vvar_page, | 82 | sym_vvar_page, |
82 | sym_hpet_page, | 83 | sym_hpet_page, |
84 | sym_pvclock_page, | ||
83 | }; | 85 | }; |
84 | 86 | ||
85 | struct vdso_sym { | 87 | struct vdso_sym { |
@@ -91,6 +93,7 @@ struct vdso_sym required_syms[] = { | |||
91 | [sym_vvar_start] = {"vvar_start", true}, | 93 | [sym_vvar_start] = {"vvar_start", true}, |
92 | [sym_vvar_page] = {"vvar_page", true}, | 94 | [sym_vvar_page] = {"vvar_page", true}, |
93 | [sym_hpet_page] = {"hpet_page", true}, | 95 | [sym_hpet_page] = {"hpet_page", true}, |
96 | [sym_pvclock_page] = {"pvclock_page", true}, | ||
94 | [sym_VDSO_FAKE_SECTION_TABLE_START] = { | 97 | [sym_VDSO_FAKE_SECTION_TABLE_START] = { |
95 | "VDSO_FAKE_SECTION_TABLE_START", false | 98 | "VDSO_FAKE_SECTION_TABLE_START", false |
96 | }, | 99 | }, |
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 64df47148160..b8f69e264ac4 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/random.h> | 12 | #include <linux/random.h> |
13 | #include <linux/elf.h> | 13 | #include <linux/elf.h> |
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <asm/pvclock.h> | ||
15 | #include <asm/vgtod.h> | 16 | #include <asm/vgtod.h> |
16 | #include <asm/proto.h> | 17 | #include <asm/proto.h> |
17 | #include <asm/vdso.h> | 18 | #include <asm/vdso.h> |
@@ -100,6 +101,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) | |||
100 | .name = "[vvar]", | 101 | .name = "[vvar]", |
101 | .pages = no_pages, | 102 | .pages = no_pages, |
102 | }; | 103 | }; |
104 | struct pvclock_vsyscall_time_info *pvti; | ||
103 | 105 | ||
104 | if (calculate_addr) { | 106 | if (calculate_addr) { |
105 | addr = vdso_addr(current->mm->start_stack, | 107 | addr = vdso_addr(current->mm->start_stack, |
@@ -169,6 +171,18 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) | |||
169 | } | 171 | } |
170 | #endif | 172 | #endif |
171 | 173 | ||
174 | pvti = pvclock_pvti_cpu0_va(); | ||
175 | if (pvti && image->sym_pvclock_page) { | ||
176 | ret = remap_pfn_range(vma, | ||
177 | text_start + image->sym_pvclock_page, | ||
178 | __pa(pvti) >> PAGE_SHIFT, | ||
179 | PAGE_SIZE, | ||
180 | PAGE_READONLY); | ||
181 | |||
182 | if (ret) | ||
183 | goto up_fail; | ||
184 | } | ||
185 | |||
172 | up_fail: | 186 | up_fail: |
173 | if (ret) | 187 | if (ret) |
174 | current->mm->context.vdso = NULL; | 188 | current->mm->context.vdso = NULL; |
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 174c2549939d..2d359991a273 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c | |||
@@ -46,6 +46,7 @@ static enum { EMULATE, NATIVE, NONE } vsyscall_mode = | |||
46 | #else | 46 | #else |
47 | EMULATE; | 47 | EMULATE; |
48 | #endif | 48 | #endif |
49 | unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL; | ||
49 | 50 | ||
50 | static int __init vsyscall_setup(char *str) | 51 | static int __init vsyscall_setup(char *str) |
51 | { | 52 | { |
@@ -66,6 +67,11 @@ static int __init vsyscall_setup(char *str) | |||
66 | } | 67 | } |
67 | early_param("vsyscall", vsyscall_setup); | 68 | early_param("vsyscall", vsyscall_setup); |
68 | 69 | ||
70 | bool vsyscall_enabled(void) | ||
71 | { | ||
72 | return vsyscall_mode != NONE; | ||
73 | } | ||
74 | |||
69 | static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, | 75 | static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, |
70 | const char *message) | 76 | const char *message) |
71 | { | 77 | { |
@@ -331,11 +337,11 @@ void __init map_vsyscall(void) | |||
331 | extern char __vsyscall_page; | 337 | extern char __vsyscall_page; |
332 | unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); | 338 | unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); |
333 | 339 | ||
340 | if (vsyscall_mode != NATIVE) | ||
341 | vsyscall_pgprot = __PAGE_KERNEL_VVAR; | ||
334 | if (vsyscall_mode != NONE) | 342 | if (vsyscall_mode != NONE) |
335 | __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, | 343 | __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, |
336 | vsyscall_mode == NATIVE | 344 | __pgprot(vsyscall_pgprot)); |
337 | ? PAGE_KERNEL_VSYSCALL | ||
338 | : PAGE_KERNEL_VVAR); | ||
339 | 345 | ||
340 | BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != | 346 | BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != |
341 | (unsigned long)VSYSCALL_ADDR); | 347 | (unsigned long)VSYSCALL_ADDR); |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 09936e9c8154..215ea9214215 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_ALTERNATIVE_H | 1 | #ifndef _ASM_X86_ALTERNATIVE_H |
2 | #define _ASM_X86_ALTERNATIVE_H | 2 | #define _ASM_X86_ALTERNATIVE_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
4 | #include <linux/types.h> | 6 | #include <linux/types.h> |
5 | #include <linux/stddef.h> | 7 | #include <linux/stddef.h> |
6 | #include <linux/stringify.h> | 8 | #include <linux/stringify.h> |
@@ -138,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
138 | ".popsection\n" \ | 140 | ".popsection\n" \ |
139 | ".pushsection .altinstr_replacement, \"ax\"\n" \ | 141 | ".pushsection .altinstr_replacement, \"ax\"\n" \ |
140 | ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ | 142 | ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ |
141 | ".popsection" | 143 | ".popsection\n" |
142 | 144 | ||
143 | #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ | 145 | #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ |
144 | OLDINSTR_2(oldinstr, 1, 2) \ | 146 | OLDINSTR_2(oldinstr, 1, 2) \ |
@@ -149,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
149 | ".pushsection .altinstr_replacement, \"ax\"\n" \ | 151 | ".pushsection .altinstr_replacement, \"ax\"\n" \ |
150 | ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ | 152 | ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ |
151 | ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ | 153 | ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ |
152 | ".popsection" | 154 | ".popsection\n" |
153 | 155 | ||
154 | /* | 156 | /* |
155 | * This must be included *after* the definition of ALTERNATIVE due to | 157 | * This must be included *after* the definition of ALTERNATIVE due to |
@@ -271,4 +273,6 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); | |||
271 | extern int poke_int3_handler(struct pt_regs *regs); | 273 | extern int poke_int3_handler(struct pt_regs *regs); |
272 | extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); | 274 | extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); |
273 | 275 | ||
276 | #endif /* __ASSEMBLY__ */ | ||
277 | |||
274 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 278 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..b15aa4083dfd --- /dev/null +++ b/arch/x86/include/asm/asm-prototypes.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #include <asm/ftrace.h> | ||
2 | #include <asm/uaccess.h> | ||
3 | #include <asm/string.h> | ||
4 | #include <asm/page.h> | ||
5 | #include <asm/checksum.h> | ||
6 | |||
7 | #include <asm-generic/asm-prototypes.h> | ||
8 | |||
9 | #include <asm/page.h> | ||
10 | #include <asm/pgtable.h> | ||
11 | #include <asm/special_insns.h> | ||
12 | #include <asm/preempt.h> | ||
13 | #include <asm/asm.h> | ||
14 | |||
15 | #ifndef CONFIG_X86_CMPXCHG64 | ||
16 | extern void cmpxchg8b_emu(void); | ||
17 | #endif | ||
18 | |||
19 | #ifdef CONFIG_RETPOLINE | ||
20 | #ifdef CONFIG_X86_32 | ||
21 | #define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void); | ||
22 | #else | ||
23 | #define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void); | ||
24 | INDIRECT_THUNK(8) | ||
25 | INDIRECT_THUNK(9) | ||
26 | INDIRECT_THUNK(10) | ||
27 | INDIRECT_THUNK(11) | ||
28 | INDIRECT_THUNK(12) | ||
29 | INDIRECT_THUNK(13) | ||
30 | INDIRECT_THUNK(14) | ||
31 | INDIRECT_THUNK(15) | ||
32 | #endif | ||
33 | INDIRECT_THUNK(ax) | ||
34 | INDIRECT_THUNK(bx) | ||
35 | INDIRECT_THUNK(cx) | ||
36 | INDIRECT_THUNK(dx) | ||
37 | INDIRECT_THUNK(si) | ||
38 | INDIRECT_THUNK(di) | ||
39 | INDIRECT_THUNK(bp) | ||
40 | INDIRECT_THUNK(sp) | ||
41 | #endif /* CONFIG_RETPOLINE */ | ||
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 189679aba703..1c79c8add0eb 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h | |||
@@ -11,10 +11,12 @@ | |||
11 | # define __ASM_FORM_COMMA(x) " " #x "," | 11 | # define __ASM_FORM_COMMA(x) " " #x "," |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | #ifdef CONFIG_X86_32 | 14 | #ifndef __x86_64__ |
15 | /* 32 bit */ | ||
15 | # define __ASM_SEL(a,b) __ASM_FORM(a) | 16 | # define __ASM_SEL(a,b) __ASM_FORM(a) |
16 | # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a) | 17 | # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a) |
17 | #else | 18 | #else |
19 | /* 64 bit */ | ||
18 | # define __ASM_SEL(a,b) __ASM_FORM(b) | 20 | # define __ASM_SEL(a,b) __ASM_FORM(b) |
19 | # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b) | 21 | # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b) |
20 | #endif | 22 | #endif |
@@ -105,4 +107,15 @@ | |||
105 | /* For C file, we already have NOKPROBE_SYMBOL macro */ | 107 | /* For C file, we already have NOKPROBE_SYMBOL macro */ |
106 | #endif | 108 | #endif |
107 | 109 | ||
110 | #ifndef __ASSEMBLY__ | ||
111 | /* | ||
112 | * This output constraint should be used for any inline asm which has a "call" | ||
113 | * instruction. Otherwise the asm may be inserted before the frame pointer | ||
114 | * gets set up by the containing function. If you forget to do this, objtool | ||
115 | * may print a "call without frame pointer save/setup" warning. | ||
116 | */ | ||
117 | register unsigned long current_stack_pointer asm(_ASM_SP); | ||
118 | #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) | ||
119 | #endif | ||
120 | |||
108 | #endif /* _ASM_X86_ASM_H */ | 121 | #endif /* _ASM_X86_ASM_H */ |
diff --git a/arch/x86/include/asm/cmdline.h b/arch/x86/include/asm/cmdline.h index e01f7f7ccb0c..84ae170bc3d0 100644 --- a/arch/x86/include/asm/cmdline.h +++ b/arch/x86/include/asm/cmdline.h | |||
@@ -2,5 +2,7 @@ | |||
2 | #define _ASM_X86_CMDLINE_H | 2 | #define _ASM_X86_CMDLINE_H |
3 | 3 | ||
4 | int cmdline_find_option_bool(const char *cmdline_ptr, const char *option); | 4 | int cmdline_find_option_bool(const char *cmdline_ptr, const char *option); |
5 | int cmdline_find_option(const char *cmdline_ptr, const char *option, | ||
6 | char *buffer, int bufsize); | ||
5 | 7 | ||
6 | #endif /* _ASM_X86_CMDLINE_H */ | 8 | #endif /* _ASM_X86_CMDLINE_H */ |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index f7ba9fbf12ee..641f0f2c2982 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -187,6 +187,7 @@ | |||
187 | #define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */ | 187 | #define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */ |
188 | #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ | 188 | #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ |
189 | #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ | 189 | #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ |
190 | #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */ | ||
190 | #define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ | 191 | #define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ |
191 | #define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */ | 192 | #define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */ |
192 | #define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ | 193 | #define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ |
@@ -198,6 +199,12 @@ | |||
198 | #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ | 199 | #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ |
199 | #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ | 200 | #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ |
200 | #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ | 201 | #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ |
202 | #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ | ||
203 | |||
204 | #define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */ | ||
205 | #define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */ | ||
206 | /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */ | ||
207 | #define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */ | ||
201 | 208 | ||
202 | /* Virtualization flags: Linux defined, word 8 */ | 209 | /* Virtualization flags: Linux defined, word 8 */ |
203 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ | 210 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
@@ -273,6 +280,9 @@ | |||
273 | #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ | 280 | #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ |
274 | #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ | 281 | #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ |
275 | #define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ | 282 | #define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ |
283 | #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ | ||
284 | #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ | ||
285 | #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ | ||
276 | 286 | ||
277 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 287 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
278 | 288 | ||
@@ -355,6 +365,8 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; | |||
355 | set_bit(bit, (unsigned long *)cpu_caps_set); \ | 365 | set_bit(bit, (unsigned long *)cpu_caps_set); \ |
356 | } while (0) | 366 | } while (0) |
357 | 367 | ||
368 | #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) | ||
369 | |||
358 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) | 370 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
359 | #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) | 371 | #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) |
360 | #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) | 372 | #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) |
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 4e10d73cf018..880db91d9457 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -43,7 +43,7 @@ struct gdt_page { | |||
43 | struct desc_struct gdt[GDT_ENTRIES]; | 43 | struct desc_struct gdt[GDT_ENTRIES]; |
44 | } __attribute__((aligned(PAGE_SIZE))); | 44 | } __attribute__((aligned(PAGE_SIZE))); |
45 | 45 | ||
46 | DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); | 46 | DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page); |
47 | 47 | ||
48 | static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) | 48 | static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) |
49 | { | 49 | { |
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index f226df064660..8b17c2ad1048 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h | |||
@@ -21,11 +21,13 @@ | |||
21 | # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) | 21 | # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) |
22 | # define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31)) | 22 | # define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31)) |
23 | # define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31)) | 23 | # define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31)) |
24 | # define DISABLE_PCID 0 | ||
24 | #else | 25 | #else |
25 | # define DISABLE_VME 0 | 26 | # define DISABLE_VME 0 |
26 | # define DISABLE_K6_MTRR 0 | 27 | # define DISABLE_K6_MTRR 0 |
27 | # define DISABLE_CYRIX_ARR 0 | 28 | # define DISABLE_CYRIX_ARR 0 |
28 | # define DISABLE_CENTAUR_MCR 0 | 29 | # define DISABLE_CENTAUR_MCR 0 |
30 | # define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31)) | ||
29 | #endif /* CONFIG_X86_64 */ | 31 | #endif /* CONFIG_X86_64 */ |
30 | 32 | ||
31 | /* | 33 | /* |
@@ -35,7 +37,7 @@ | |||
35 | #define DISABLED_MASK1 0 | 37 | #define DISABLED_MASK1 0 |
36 | #define DISABLED_MASK2 0 | 38 | #define DISABLED_MASK2 0 |
37 | #define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR) | 39 | #define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR) |
38 | #define DISABLED_MASK4 0 | 40 | #define DISABLED_MASK4 (DISABLE_PCID) |
39 | #define DISABLED_MASK5 0 | 41 | #define DISABLED_MASK5 0 |
40 | #define DISABLED_MASK6 0 | 42 | #define DISABLED_MASK6 0 |
41 | #define DISABLED_MASK7 0 | 43 | #define DISABLED_MASK7 0 |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 7178043b0e1d..9b76cd331990 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -22,12 +22,8 @@ typedef struct { | |||
22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
23 | unsigned int irq_resched_count; | 23 | unsigned int irq_resched_count; |
24 | unsigned int irq_call_count; | 24 | unsigned int irq_call_count; |
25 | /* | ||
26 | * irq_tlb_count is double-counted in irq_call_count, so it must be | ||
27 | * subtracted from irq_call_count when displaying irq_call_count | ||
28 | */ | ||
29 | unsigned int irq_tlb_count; | ||
30 | #endif | 25 | #endif |
26 | unsigned int irq_tlb_count; | ||
31 | #ifdef CONFIG_X86_THERMAL_VECTOR | 27 | #ifdef CONFIG_X86_THERMAL_VECTOR |
32 | unsigned int irq_thermal_count; | 28 | unsigned int irq_thermal_count; |
33 | #endif | 29 | #endif |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 59caa55fb9b5..ee52ff858699 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -187,7 +187,7 @@ extern char irq_entries_start[]; | |||
187 | #define VECTOR_RETRIGGERED ((void *)~0UL) | 187 | #define VECTOR_RETRIGGERED ((void *)~0UL) |
188 | 188 | ||
189 | typedef struct irq_desc* vector_irq_t[NR_VECTORS]; | 189 | typedef struct irq_desc* vector_irq_t[NR_VECTORS]; |
190 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | 190 | DECLARE_PER_CPU_USER_MAPPED(vector_irq_t, vector_irq); |
191 | 191 | ||
192 | #endif /* !ASSEMBLY_ */ | 192 | #endif /* !ASSEMBLY_ */ |
193 | 193 | ||
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h new file mode 100644 index 000000000000..6999f7d01a0d --- /dev/null +++ b/arch/x86/include/asm/intel-family.h | |||
@@ -0,0 +1,68 @@ | |||
1 | #ifndef _ASM_X86_INTEL_FAMILY_H | ||
2 | #define _ASM_X86_INTEL_FAMILY_H | ||
3 | |||
4 | /* | ||
5 | * "Big Core" Processors (Branded as Core, Xeon, etc...) | ||
6 | * | ||
7 | * The "_X" parts are generally the EP and EX Xeons, or the | ||
8 | * "Extreme" ones, like Broadwell-E. | ||
9 | * | ||
10 | * Things ending in "2" are usually because we have no better | ||
11 | * name for them. There's no processor called "WESTMERE2". | ||
12 | */ | ||
13 | |||
14 | #define INTEL_FAM6_CORE_YONAH 0x0E | ||
15 | #define INTEL_FAM6_CORE2_MEROM 0x0F | ||
16 | #define INTEL_FAM6_CORE2_MEROM_L 0x16 | ||
17 | #define INTEL_FAM6_CORE2_PENRYN 0x17 | ||
18 | #define INTEL_FAM6_CORE2_DUNNINGTON 0x1D | ||
19 | |||
20 | #define INTEL_FAM6_NEHALEM 0x1E | ||
21 | #define INTEL_FAM6_NEHALEM_EP 0x1A | ||
22 | #define INTEL_FAM6_NEHALEM_EX 0x2E | ||
23 | #define INTEL_FAM6_WESTMERE 0x25 | ||
24 | #define INTEL_FAM6_WESTMERE2 0x1F | ||
25 | #define INTEL_FAM6_WESTMERE_EP 0x2C | ||
26 | #define INTEL_FAM6_WESTMERE_EX 0x2F | ||
27 | |||
28 | #define INTEL_FAM6_SANDYBRIDGE 0x2A | ||
29 | #define INTEL_FAM6_SANDYBRIDGE_X 0x2D | ||
30 | #define INTEL_FAM6_IVYBRIDGE 0x3A | ||
31 | #define INTEL_FAM6_IVYBRIDGE_X 0x3E | ||
32 | |||
33 | #define INTEL_FAM6_HASWELL_CORE 0x3C | ||
34 | #define INTEL_FAM6_HASWELL_X 0x3F | ||
35 | #define INTEL_FAM6_HASWELL_ULT 0x45 | ||
36 | #define INTEL_FAM6_HASWELL_GT3E 0x46 | ||
37 | |||
38 | #define INTEL_FAM6_BROADWELL_CORE 0x3D | ||
39 | #define INTEL_FAM6_BROADWELL_XEON_D 0x56 | ||
40 | #define INTEL_FAM6_BROADWELL_GT3E 0x47 | ||
41 | #define INTEL_FAM6_BROADWELL_X 0x4F | ||
42 | |||
43 | #define INTEL_FAM6_SKYLAKE_MOBILE 0x4E | ||
44 | #define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E | ||
45 | #define INTEL_FAM6_SKYLAKE_X 0x55 | ||
46 | #define INTEL_FAM6_KABYLAKE_MOBILE 0x8E | ||
47 | #define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E | ||
48 | |||
49 | /* "Small Core" Processors (Atom) */ | ||
50 | |||
51 | #define INTEL_FAM6_ATOM_PINEVIEW 0x1C | ||
52 | #define INTEL_FAM6_ATOM_LINCROFT 0x26 | ||
53 | #define INTEL_FAM6_ATOM_PENWELL 0x27 | ||
54 | #define INTEL_FAM6_ATOM_CLOVERVIEW 0x35 | ||
55 | #define INTEL_FAM6_ATOM_CEDARVIEW 0x36 | ||
56 | #define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */ | ||
57 | #define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */ | ||
58 | #define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */ | ||
59 | #define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */ | ||
60 | #define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */ | ||
61 | #define INTEL_FAM6_ATOM_GOLDMONT 0x5C | ||
62 | #define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ | ||
63 | |||
64 | /* Xeon Phi */ | ||
65 | |||
66 | #define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ | ||
67 | |||
68 | #endif /* _ASM_X86_INTEL_FAMILY_H */ | ||
diff --git a/arch/x86/include/asm/kaiser.h b/arch/x86/include/asm/kaiser.h new file mode 100644 index 000000000000..48c791a411ab --- /dev/null +++ b/arch/x86/include/asm/kaiser.h | |||
@@ -0,0 +1,151 @@ | |||
1 | #ifndef _ASM_X86_KAISER_H | ||
2 | #define _ASM_X86_KAISER_H | ||
3 | |||
4 | #include <uapi/asm/processor-flags.h> /* For PCID constants */ | ||
5 | |||
6 | /* | ||
7 | * This file includes the definitions for the KAISER feature. | ||
8 | * KAISER is a counter measure against x86_64 side channel attacks on | ||
9 | * the kernel virtual memory. It has a shadow pgd for every process: the | ||
10 | * shadow pgd has a minimalistic kernel-set mapped, but includes the whole | ||
11 | * user memory. Within a kernel context switch, or when an interrupt is handled, | ||
12 | * the pgd is switched to the normal one. When the system switches to user mode, | ||
13 | * the shadow pgd is enabled. By this, the virtual memory caches are freed, | ||
14 | * and the user may not attack the whole kernel memory. | ||
15 | * | ||
16 | * A minimalistic kernel mapping holds the parts needed to be mapped in user | ||
17 | * mode, such as the entry/exit functions of the user space, or the stacks. | ||
18 | */ | ||
19 | |||
20 | #define KAISER_SHADOW_PGD_OFFSET 0x1000 | ||
21 | |||
22 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
23 | /* | ||
24 | * A page table address must have this alignment to stay the same when | ||
25 | * KAISER_SHADOW_PGD_OFFSET mask is applied | ||
26 | */ | ||
27 | #define KAISER_KERNEL_PGD_ALIGNMENT (KAISER_SHADOW_PGD_OFFSET << 1) | ||
28 | #else | ||
29 | #define KAISER_KERNEL_PGD_ALIGNMENT PAGE_SIZE | ||
30 | #endif | ||
31 | |||
32 | #ifdef __ASSEMBLY__ | ||
33 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
34 | |||
35 | .macro _SWITCH_TO_KERNEL_CR3 reg | ||
36 | movq %cr3, \reg | ||
37 | andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), \reg | ||
38 | /* If PCID enabled, set X86_CR3_PCID_NOFLUSH_BIT */ | ||
39 | ALTERNATIVE "", "bts $63, \reg", X86_FEATURE_PCID | ||
40 | movq \reg, %cr3 | ||
41 | .endm | ||
42 | |||
43 | .macro _SWITCH_TO_USER_CR3 reg regb | ||
44 | /* | ||
45 | * regb must be the low byte portion of reg: because we have arranged | ||
46 | * for the low byte of the user PCID to serve as the high byte of NOFLUSH | ||
47 | * (0x80 for each when PCID is enabled, or 0x00 when PCID and NOFLUSH are | ||
48 | * not enabled): so that the one register can update both memory and cr3. | ||
49 | */ | ||
50 | movq %cr3, \reg | ||
51 | orq PER_CPU_VAR(x86_cr3_pcid_user), \reg | ||
52 | js 9f | ||
53 | /* If PCID enabled, FLUSH this time, reset to NOFLUSH for next time */ | ||
54 | movb \regb, PER_CPU_VAR(x86_cr3_pcid_user+7) | ||
55 | 9: | ||
56 | movq \reg, %cr3 | ||
57 | .endm | ||
58 | |||
59 | .macro SWITCH_KERNEL_CR3 | ||
60 | ALTERNATIVE "jmp 8f", "pushq %rax", X86_FEATURE_KAISER | ||
61 | _SWITCH_TO_KERNEL_CR3 %rax | ||
62 | popq %rax | ||
63 | 8: | ||
64 | .endm | ||
65 | |||
66 | .macro SWITCH_USER_CR3 | ||
67 | ALTERNATIVE "jmp 8f", "pushq %rax", X86_FEATURE_KAISER | ||
68 | _SWITCH_TO_USER_CR3 %rax %al | ||
69 | popq %rax | ||
70 | 8: | ||
71 | .endm | ||
72 | |||
73 | .macro SWITCH_KERNEL_CR3_NO_STACK | ||
74 | ALTERNATIVE "jmp 8f", \ | ||
75 | __stringify(movq %rax, PER_CPU_VAR(unsafe_stack_register_backup)), \ | ||
76 | X86_FEATURE_KAISER | ||
77 | _SWITCH_TO_KERNEL_CR3 %rax | ||
78 | movq PER_CPU_VAR(unsafe_stack_register_backup), %rax | ||
79 | 8: | ||
80 | .endm | ||
81 | |||
82 | #else /* CONFIG_PAGE_TABLE_ISOLATION */ | ||
83 | |||
84 | .macro SWITCH_KERNEL_CR3 | ||
85 | .endm | ||
86 | .macro SWITCH_USER_CR3 | ||
87 | .endm | ||
88 | .macro SWITCH_KERNEL_CR3_NO_STACK | ||
89 | .endm | ||
90 | |||
91 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ | ||
92 | |||
93 | #else /* __ASSEMBLY__ */ | ||
94 | |||
95 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
96 | /* | ||
97 | * Upon kernel/user mode switch, it may happen that the address | ||
98 | * space has to be switched before the registers have been | ||
99 | * stored. To change the address space, another register is | ||
100 | * needed. A register therefore has to be stored/restored. | ||
101 | */ | ||
102 | DECLARE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup); | ||
103 | |||
104 | DECLARE_PER_CPU(unsigned long, x86_cr3_pcid_user); | ||
105 | |||
106 | extern char __per_cpu_user_mapped_start[], __per_cpu_user_mapped_end[]; | ||
107 | |||
108 | extern int kaiser_enabled; | ||
109 | extern void __init kaiser_check_boottime_disable(void); | ||
110 | #else | ||
111 | #define kaiser_enabled 0 | ||
112 | static inline void __init kaiser_check_boottime_disable(void) {} | ||
113 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ | ||
114 | |||
115 | /* | ||
116 | * Kaiser function prototypes are needed even when CONFIG_PAGE_TABLE_ISOLATION is not set, | ||
117 | * so as to build with tests on kaiser_enabled instead of #ifdefs. | ||
118 | */ | ||
119 | |||
120 | /** | ||
121 | * kaiser_add_mapping - map a virtual memory part to the shadow (user) mapping | ||
122 | * @addr: the start address of the range | ||
123 | * @size: the size of the range | ||
124 | * @flags: The mapping flags of the pages | ||
125 | * | ||
126 | * The mapping is done on a global scope, so no bigger | ||
127 | * synchronization has to be done. the pages have to be | ||
128 | * manually unmapped again when they are not needed any longer. | ||
129 | */ | ||
130 | extern int kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags); | ||
131 | |||
132 | /** | ||
133 | * kaiser_remove_mapping - unmap a virtual memory part of the shadow mapping | ||
134 | * @addr: the start address of the range | ||
135 | * @size: the size of the range | ||
136 | */ | ||
137 | extern void kaiser_remove_mapping(unsigned long start, unsigned long size); | ||
138 | |||
139 | /** | ||
140 | * kaiser_init - Initialize the shadow mapping | ||
141 | * | ||
142 | * Most parts of the shadow mapping can be mapped upon boot | ||
143 | * time. Only per-process things like the thread stacks | ||
144 | * or a new LDT have to be mapped at runtime. These boot- | ||
145 | * time mappings are permanent and never unmapped. | ||
146 | */ | ||
147 | extern void kaiser_init(void); | ||
148 | |||
149 | #endif /* __ASSEMBLY */ | ||
150 | |||
151 | #endif /* _ASM_X86_KAISER_H */ | ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9d2abb2a41d2..74fda1a453bd 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -998,7 +998,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, | |||
998 | static inline int emulate_instruction(struct kvm_vcpu *vcpu, | 998 | static inline int emulate_instruction(struct kvm_vcpu *vcpu, |
999 | int emulation_type) | 999 | int emulation_type) |
1000 | { | 1000 | { |
1001 | return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); | 1001 | return x86_emulate_instruction(vcpu, 0, |
1002 | emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0); | ||
1002 | } | 1003 | } |
1003 | 1004 | ||
1004 | void kvm_enable_efer_bits(u64); | 1005 | void kvm_enable_efer_bits(u64); |
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 55234d5e7160..7680b76adafc 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h | |||
@@ -24,12 +24,6 @@ typedef struct { | |||
24 | atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ | 24 | atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ |
25 | } mm_context_t; | 25 | } mm_context_t; |
26 | 26 | ||
27 | #ifdef CONFIG_SMP | ||
28 | void leave_mm(int cpu); | 27 | void leave_mm(int cpu); |
29 | #else | ||
30 | static inline void leave_mm(int cpu) | ||
31 | { | ||
32 | } | ||
33 | #endif | ||
34 | 28 | ||
35 | #endif /* _ASM_X86_MMU_H */ | 29 | #endif /* _ASM_X86_MMU_H */ |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index bfd9b2a35a0b..9bfc5fd77015 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -98,109 +98,16 @@ static inline void load_mm_ldt(struct mm_struct *mm) | |||
98 | 98 | ||
99 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 99 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
100 | { | 100 | { |
101 | #ifdef CONFIG_SMP | ||
102 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) | 101 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
103 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); | 102 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); |
104 | #endif | ||
105 | } | 103 | } |
106 | 104 | ||
107 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 105 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
108 | struct task_struct *tsk) | 106 | struct task_struct *tsk); |
109 | { | ||
110 | unsigned cpu = smp_processor_id(); | ||
111 | |||
112 | if (likely(prev != next)) { | ||
113 | #ifdef CONFIG_SMP | ||
114 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); | ||
115 | this_cpu_write(cpu_tlbstate.active_mm, next); | ||
116 | #endif | ||
117 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
118 | |||
119 | /* | ||
120 | * Re-load page tables. | ||
121 | * | ||
122 | * This logic has an ordering constraint: | ||
123 | * | ||
124 | * CPU 0: Write to a PTE for 'next' | ||
125 | * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI. | ||
126 | * CPU 1: set bit 1 in next's mm_cpumask | ||
127 | * CPU 1: load from the PTE that CPU 0 writes (implicit) | ||
128 | * | ||
129 | * We need to prevent an outcome in which CPU 1 observes | ||
130 | * the new PTE value and CPU 0 observes bit 1 clear in | ||
131 | * mm_cpumask. (If that occurs, then the IPI will never | ||
132 | * be sent, and CPU 0's TLB will contain a stale entry.) | ||
133 | * | ||
134 | * The bad outcome can occur if either CPU's load is | ||
135 | * reordered before that CPU's store, so both CPUs must | ||
136 | * execute full barriers to prevent this from happening. | ||
137 | * | ||
138 | * Thus, switch_mm needs a full barrier between the | ||
139 | * store to mm_cpumask and any operation that could load | ||
140 | * from next->pgd. TLB fills are special and can happen | ||
141 | * due to instruction fetches or for no reason at all, | ||
142 | * and neither LOCK nor MFENCE orders them. | ||
143 | * Fortunately, load_cr3() is serializing and gives the | ||
144 | * ordering guarantee we need. | ||
145 | * | ||
146 | */ | ||
147 | load_cr3(next->pgd); | ||
148 | |||
149 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | ||
150 | |||
151 | /* Stop flush ipis for the previous mm */ | ||
152 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | ||
153 | 107 | ||
154 | /* Load per-mm CR4 state */ | 108 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
155 | load_mm_cr4(next); | 109 | struct task_struct *tsk); |
156 | 110 | #define switch_mm_irqs_off switch_mm_irqs_off | |
157 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | ||
158 | /* | ||
159 | * Load the LDT, if the LDT is different. | ||
160 | * | ||
161 | * It's possible that prev->context.ldt doesn't match | ||
162 | * the LDT register. This can happen if leave_mm(prev) | ||
163 | * was called and then modify_ldt changed | ||
164 | * prev->context.ldt but suppressed an IPI to this CPU. | ||
165 | * In this case, prev->context.ldt != NULL, because we | ||
166 | * never set context.ldt to NULL while the mm still | ||
167 | * exists. That means that next->context.ldt != | ||
168 | * prev->context.ldt, because mms never share an LDT. | ||
169 | */ | ||
170 | if (unlikely(prev->context.ldt != next->context.ldt)) | ||
171 | load_mm_ldt(next); | ||
172 | #endif | ||
173 | } | ||
174 | #ifdef CONFIG_SMP | ||
175 | else { | ||
176 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); | ||
177 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); | ||
178 | |||
179 | if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { | ||
180 | /* | ||
181 | * On established mms, the mm_cpumask is only changed | ||
182 | * from irq context, from ptep_clear_flush() while in | ||
183 | * lazy tlb mode, and here. Irqs are blocked during | ||
184 | * schedule, protecting us from simultaneous changes. | ||
185 | */ | ||
186 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
187 | |||
188 | /* | ||
189 | * We were in lazy tlb mode and leave_mm disabled | ||
190 | * tlb flush IPI delivery. We must reload CR3 | ||
191 | * to make sure to use no freed page tables. | ||
192 | * | ||
193 | * As above, load_cr3() is serializing and orders TLB | ||
194 | * fills with respect to the mm_cpumask write. | ||
195 | */ | ||
196 | load_cr3(next->pgd); | ||
197 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | ||
198 | load_mm_cr4(next); | ||
199 | load_mm_ldt(next); | ||
200 | } | ||
201 | } | ||
202 | #endif | ||
203 | } | ||
204 | 111 | ||
205 | #define activate_mm(prev, next) \ | 112 | #define activate_mm(prev, next) \ |
206 | do { \ | 113 | do { \ |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 37db36fddc88..b8911aecf035 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -330,6 +330,9 @@ | |||
330 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL | 330 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL |
331 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 | 331 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 |
332 | #define MSR_FAM10H_NODE_ID 0xc001100c | 332 | #define MSR_FAM10H_NODE_ID 0xc001100c |
333 | #define MSR_F10H_DECFG 0xc0011029 | ||
334 | #define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 | ||
335 | #define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) | ||
333 | 336 | ||
334 | /* K8 MSRs */ | 337 | /* K8 MSRs */ |
335 | #define MSR_K8_TOP_MEM1 0xc001001a | 338 | #define MSR_K8_TOP_MEM1 0xc001001a |
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h new file mode 100644 index 000000000000..492370b9b35b --- /dev/null +++ b/arch/x86/include/asm/nospec-branch.h | |||
@@ -0,0 +1,198 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #ifndef __NOSPEC_BRANCH_H__ | ||
4 | #define __NOSPEC_BRANCH_H__ | ||
5 | |||
6 | #include <asm/alternative.h> | ||
7 | #include <asm/alternative-asm.h> | ||
8 | #include <asm/cpufeature.h> | ||
9 | |||
10 | /* | ||
11 | * Fill the CPU return stack buffer. | ||
12 | * | ||
13 | * Each entry in the RSB, if used for a speculative 'ret', contains an | ||
14 | * infinite 'pause; lfence; jmp' loop to capture speculative execution. | ||
15 | * | ||
16 | * This is required in various cases for retpoline and IBRS-based | ||
17 | * mitigations for the Spectre variant 2 vulnerability. Sometimes to | ||
18 | * eliminate potentially bogus entries from the RSB, and sometimes | ||
19 | * purely to ensure that it doesn't get empty, which on some CPUs would | ||
20 | * allow predictions from other (unwanted!) sources to be used. | ||
21 | * | ||
22 | * We define a CPP macro such that it can be used from both .S files and | ||
23 | * inline assembly. It's possible to do a .macro and then include that | ||
24 | * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. | ||
25 | */ | ||
26 | |||
27 | #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ | ||
28 | #define RSB_FILL_LOOPS 16 /* To avoid underflow */ | ||
29 | |||
30 | /* | ||
31 | * Google experimented with loop-unrolling and this turned out to be | ||
32 | * the optimal version — two calls, each with their own speculation | ||
33 | * trap should their return address end up getting used, in a loop. | ||
34 | */ | ||
35 | #define __FILL_RETURN_BUFFER(reg, nr, sp) \ | ||
36 | mov $(nr/2), reg; \ | ||
37 | 771: \ | ||
38 | call 772f; \ | ||
39 | 773: /* speculation trap */ \ | ||
40 | pause; \ | ||
41 | lfence; \ | ||
42 | jmp 773b; \ | ||
43 | 772: \ | ||
44 | call 774f; \ | ||
45 | 775: /* speculation trap */ \ | ||
46 | pause; \ | ||
47 | lfence; \ | ||
48 | jmp 775b; \ | ||
49 | 774: \ | ||
50 | dec reg; \ | ||
51 | jnz 771b; \ | ||
52 | add $(BITS_PER_LONG/8) * nr, sp; | ||
53 | |||
54 | #ifdef __ASSEMBLY__ | ||
55 | |||
56 | /* | ||
57 | * These are the bare retpoline primitives for indirect jmp and call. | ||
58 | * Do not use these directly; they only exist to make the ALTERNATIVE | ||
59 | * invocation below less ugly. | ||
60 | */ | ||
61 | .macro RETPOLINE_JMP reg:req | ||
62 | call .Ldo_rop_\@ | ||
63 | .Lspec_trap_\@: | ||
64 | pause | ||
65 | lfence | ||
66 | jmp .Lspec_trap_\@ | ||
67 | .Ldo_rop_\@: | ||
68 | mov \reg, (%_ASM_SP) | ||
69 | ret | ||
70 | .endm | ||
71 | |||
72 | /* | ||
73 | * This is a wrapper around RETPOLINE_JMP so the called function in reg | ||
74 | * returns to the instruction after the macro. | ||
75 | */ | ||
76 | .macro RETPOLINE_CALL reg:req | ||
77 | jmp .Ldo_call_\@ | ||
78 | .Ldo_retpoline_jmp_\@: | ||
79 | RETPOLINE_JMP \reg | ||
80 | .Ldo_call_\@: | ||
81 | call .Ldo_retpoline_jmp_\@ | ||
82 | .endm | ||
83 | |||
84 | /* | ||
85 | * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple | ||
86 | * indirect jmp/call which may be susceptible to the Spectre variant 2 | ||
87 | * attack. | ||
88 | */ | ||
89 | .macro JMP_NOSPEC reg:req | ||
90 | #ifdef CONFIG_RETPOLINE | ||
91 | ALTERNATIVE_2 __stringify(jmp *\reg), \ | ||
92 | __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ | ||
93 | __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD | ||
94 | #else | ||
95 | jmp *\reg | ||
96 | #endif | ||
97 | .endm | ||
98 | |||
99 | .macro CALL_NOSPEC reg:req | ||
100 | #ifdef CONFIG_RETPOLINE | ||
101 | ALTERNATIVE_2 __stringify(call *\reg), \ | ||
102 | __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ | ||
103 | __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD | ||
104 | #else | ||
105 | call *\reg | ||
106 | #endif | ||
107 | .endm | ||
108 | |||
109 | /* | ||
110 | * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP | ||
111 | * monstrosity above, manually. | ||
112 | */ | ||
113 | .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req | ||
114 | #ifdef CONFIG_RETPOLINE | ||
115 | ALTERNATIVE "jmp .Lskip_rsb_\@", \ | ||
116 | __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ | ||
117 | \ftr | ||
118 | .Lskip_rsb_\@: | ||
119 | #endif | ||
120 | .endm | ||
121 | |||
122 | #else /* __ASSEMBLY__ */ | ||
123 | |||
124 | #if defined(CONFIG_X86_64) && defined(RETPOLINE) | ||
125 | |||
126 | /* | ||
127 | * Since the inline asm uses the %V modifier which is only in newer GCC, | ||
128 | * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. | ||
129 | */ | ||
130 | # define CALL_NOSPEC \ | ||
131 | ALTERNATIVE( \ | ||
132 | "call *%[thunk_target]\n", \ | ||
133 | "call __x86_indirect_thunk_%V[thunk_target]\n", \ | ||
134 | X86_FEATURE_RETPOLINE) | ||
135 | # define THUNK_TARGET(addr) [thunk_target] "r" (addr) | ||
136 | |||
137 | #elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) | ||
138 | /* | ||
139 | * For i386 we use the original ret-equivalent retpoline, because | ||
140 | * otherwise we'll run out of registers. We don't care about CET | ||
141 | * here, anyway. | ||
142 | */ | ||
143 | # define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \ | ||
144 | " jmp 904f;\n" \ | ||
145 | " .align 16\n" \ | ||
146 | "901: call 903f;\n" \ | ||
147 | "902: pause;\n" \ | ||
148 | " lfence;\n" \ | ||
149 | " jmp 902b;\n" \ | ||
150 | " .align 16\n" \ | ||
151 | "903: addl $4, %%esp;\n" \ | ||
152 | " pushl %[thunk_target];\n" \ | ||
153 | " ret;\n" \ | ||
154 | " .align 16\n" \ | ||
155 | "904: call 901b;\n", \ | ||
156 | X86_FEATURE_RETPOLINE) | ||
157 | |||
158 | # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) | ||
159 | #else /* No retpoline for C / inline asm */ | ||
160 | # define CALL_NOSPEC "call *%[thunk_target]\n" | ||
161 | # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) | ||
162 | #endif | ||
163 | |||
164 | /* The Spectre V2 mitigation variants */ | ||
165 | enum spectre_v2_mitigation { | ||
166 | SPECTRE_V2_NONE, | ||
167 | SPECTRE_V2_RETPOLINE_MINIMAL, | ||
168 | SPECTRE_V2_RETPOLINE_MINIMAL_AMD, | ||
169 | SPECTRE_V2_RETPOLINE_GENERIC, | ||
170 | SPECTRE_V2_RETPOLINE_AMD, | ||
171 | SPECTRE_V2_IBRS, | ||
172 | }; | ||
173 | |||
174 | extern char __indirect_thunk_start[]; | ||
175 | extern char __indirect_thunk_end[]; | ||
176 | |||
177 | /* | ||
178 | * On VMEXIT we must ensure that no RSB predictions learned in the guest | ||
179 | * can be followed in the host, by overwriting the RSB completely. Both | ||
180 | * retpoline and IBRS mitigations for Spectre v2 need this; only on future | ||
181 | * CPUs with IBRS_ATT *might* it be avoided. | ||
182 | */ | ||
183 | static inline void vmexit_fill_RSB(void) | ||
184 | { | ||
185 | #ifdef CONFIG_RETPOLINE | ||
186 | unsigned long loops; | ||
187 | |||
188 | asm volatile (ALTERNATIVE("jmp 910f", | ||
189 | __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), | ||
190 | X86_FEATURE_RETPOLINE) | ||
191 | "910:" | ||
192 | : "=r" (loops), ASM_CALL_CONSTRAINT | ||
193 | : : "memory" ); | ||
194 | #endif | ||
195 | } | ||
196 | |||
197 | #endif /* __ASSEMBLY__ */ | ||
198 | #endif /* __NOSPEC_BRANCH_H__ */ | ||
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 6ec0c8b2e9df..84c62d950023 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -18,6 +18,12 @@ | |||
18 | #ifndef __ASSEMBLY__ | 18 | #ifndef __ASSEMBLY__ |
19 | #include <asm/x86_init.h> | 19 | #include <asm/x86_init.h> |
20 | 20 | ||
21 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
22 | extern int kaiser_enabled; | ||
23 | #else | ||
24 | #define kaiser_enabled 0 | ||
25 | #endif | ||
26 | |||
21 | void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); | 27 | void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); |
22 | void ptdump_walk_pgd_level_checkwx(void); | 28 | void ptdump_walk_pgd_level_checkwx(void); |
23 | 29 | ||
@@ -653,7 +659,17 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) | |||
653 | 659 | ||
654 | static inline int pgd_bad(pgd_t pgd) | 660 | static inline int pgd_bad(pgd_t pgd) |
655 | { | 661 | { |
656 | return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; | 662 | pgdval_t ignore_flags = _PAGE_USER; |
663 | /* | ||
664 | * We set NX on KAISER pgds that map userspace memory so | ||
665 | * that userspace can not meaningfully use the kernel | ||
666 | * page table by accident; it will fault on the first | ||
667 | * instruction it tries to run. See native_set_pgd(). | ||
668 | */ | ||
669 | if (kaiser_enabled) | ||
670 | ignore_flags |= _PAGE_NX; | ||
671 | |||
672 | return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE; | ||
657 | } | 673 | } |
658 | 674 | ||
659 | static inline int pgd_none(pgd_t pgd) | 675 | static inline int pgd_none(pgd_t pgd) |
@@ -855,7 +871,15 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |||
855 | */ | 871 | */ |
856 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | 872 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) |
857 | { | 873 | { |
858 | memcpy(dst, src, count * sizeof(pgd_t)); | 874 | memcpy(dst, src, count * sizeof(pgd_t)); |
875 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
876 | if (kaiser_enabled) { | ||
877 | /* Clone the shadow pgd part as well */ | ||
878 | memcpy(native_get_shadow_pgd(dst), | ||
879 | native_get_shadow_pgd(src), | ||
880 | count * sizeof(pgd_t)); | ||
881 | } | ||
882 | #endif | ||
859 | } | 883 | } |
860 | 884 | ||
861 | #define PTE_SHIFT ilog2(PTRS_PER_PTE) | 885 | #define PTE_SHIFT ilog2(PTRS_PER_PTE) |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 2ee781114d34..c810226e741a 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -106,9 +106,32 @@ static inline void native_pud_clear(pud_t *pud) | |||
106 | native_set_pud(pud, native_make_pud(0)); | 106 | native_set_pud(pud, native_make_pud(0)); |
107 | } | 107 | } |
108 | 108 | ||
109 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
110 | extern pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd); | ||
111 | |||
112 | static inline pgd_t *native_get_shadow_pgd(pgd_t *pgdp) | ||
113 | { | ||
114 | #ifdef CONFIG_DEBUG_VM | ||
115 | /* linux/mmdebug.h may not have been included at this point */ | ||
116 | BUG_ON(!kaiser_enabled); | ||
117 | #endif | ||
118 | return (pgd_t *)((unsigned long)pgdp | (unsigned long)PAGE_SIZE); | ||
119 | } | ||
120 | #else | ||
121 | static inline pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd) | ||
122 | { | ||
123 | return pgd; | ||
124 | } | ||
125 | static inline pgd_t *native_get_shadow_pgd(pgd_t *pgdp) | ||
126 | { | ||
127 | BUILD_BUG_ON(1); | ||
128 | return NULL; | ||
129 | } | ||
130 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ | ||
131 | |||
109 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) | 132 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) |
110 | { | 133 | { |
111 | *pgdp = pgd; | 134 | *pgdp = kaiser_set_shadow_pgd(pgdp, pgd); |
112 | } | 135 | } |
113 | 136 | ||
114 | static inline void native_pgd_clear(pgd_t *pgd) | 137 | static inline void native_pgd_clear(pgd_t *pgd) |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 79c91853e50e..8dba273da25a 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -89,7 +89,7 @@ | |||
89 | #define _PAGE_NX (_AT(pteval_t, 0)) | 89 | #define _PAGE_NX (_AT(pteval_t, 0)) |
90 | #endif | 90 | #endif |
91 | 91 | ||
92 | #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) | 92 | #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) |
93 | 93 | ||
94 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ | 94 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
95 | _PAGE_ACCESSED | _PAGE_DIRTY) | 95 | _PAGE_ACCESSED | _PAGE_DIRTY) |
@@ -102,6 +102,33 @@ | |||
102 | _PAGE_SOFT_DIRTY) | 102 | _PAGE_SOFT_DIRTY) |
103 | #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) | 103 | #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) |
104 | 104 | ||
105 | /* The ASID is the lower 12 bits of CR3 */ | ||
106 | #define X86_CR3_PCID_ASID_MASK (_AC((1<<12)-1,UL)) | ||
107 | |||
108 | /* Mask for all the PCID-related bits in CR3: */ | ||
109 | #define X86_CR3_PCID_MASK (X86_CR3_PCID_NOFLUSH | X86_CR3_PCID_ASID_MASK) | ||
110 | #define X86_CR3_PCID_ASID_KERN (_AC(0x0,UL)) | ||
111 | |||
112 | #if defined(CONFIG_PAGE_TABLE_ISOLATION) && defined(CONFIG_X86_64) | ||
113 | /* Let X86_CR3_PCID_ASID_USER be usable for the X86_CR3_PCID_NOFLUSH bit */ | ||
114 | #define X86_CR3_PCID_ASID_USER (_AC(0x80,UL)) | ||
115 | |||
116 | #define X86_CR3_PCID_KERN_FLUSH (X86_CR3_PCID_ASID_KERN) | ||
117 | #define X86_CR3_PCID_USER_FLUSH (X86_CR3_PCID_ASID_USER) | ||
118 | #define X86_CR3_PCID_KERN_NOFLUSH (X86_CR3_PCID_NOFLUSH | X86_CR3_PCID_ASID_KERN) | ||
119 | #define X86_CR3_PCID_USER_NOFLUSH (X86_CR3_PCID_NOFLUSH | X86_CR3_PCID_ASID_USER) | ||
120 | #else | ||
121 | #define X86_CR3_PCID_ASID_USER (_AC(0x0,UL)) | ||
122 | /* | ||
123 | * PCIDs are unsupported on 32-bit and none of these bits can be | ||
124 | * set in CR3: | ||
125 | */ | ||
126 | #define X86_CR3_PCID_KERN_FLUSH (0) | ||
127 | #define X86_CR3_PCID_USER_FLUSH (0) | ||
128 | #define X86_CR3_PCID_KERN_NOFLUSH (0) | ||
129 | #define X86_CR3_PCID_USER_NOFLUSH (0) | ||
130 | #endif | ||
131 | |||
105 | /* | 132 | /* |
106 | * The cache modes defined here are used to translate between pure SW usage | 133 | * The cache modes defined here are used to translate between pure SW usage |
107 | * and the HW defined cache mode bits and/or PAT entries. | 134 | * and the HW defined cache mode bits and/or PAT entries. |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 2d5a50cb61a2..9e77cea2a8ef 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -113,7 +113,7 @@ struct cpuinfo_x86 { | |||
113 | char x86_vendor_id[16]; | 113 | char x86_vendor_id[16]; |
114 | char x86_model_id[64]; | 114 | char x86_model_id[64]; |
115 | /* in KB - valid for CPUS which support this call: */ | 115 | /* in KB - valid for CPUS which support this call: */ |
116 | int x86_cache_size; | 116 | unsigned int x86_cache_size; |
117 | int x86_cache_alignment; /* In bytes */ | 117 | int x86_cache_alignment; /* In bytes */ |
118 | /* Cache QoS architectural values: */ | 118 | /* Cache QoS architectural values: */ |
119 | int x86_cache_max_rmid; /* max index */ | 119 | int x86_cache_max_rmid; /* max index */ |
@@ -156,8 +156,8 @@ extern struct cpuinfo_x86 boot_cpu_data; | |||
156 | extern struct cpuinfo_x86 new_cpu_data; | 156 | extern struct cpuinfo_x86 new_cpu_data; |
157 | 157 | ||
158 | extern struct tss_struct doublefault_tss; | 158 | extern struct tss_struct doublefault_tss; |
159 | extern __u32 cpu_caps_cleared[NCAPINTS]; | 159 | extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; |
160 | extern __u32 cpu_caps_set[NCAPINTS]; | 160 | extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; |
161 | 161 | ||
162 | #ifdef CONFIG_SMP | 162 | #ifdef CONFIG_SMP |
163 | DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); | 163 | DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
@@ -305,7 +305,7 @@ struct tss_struct { | |||
305 | 305 | ||
306 | } ____cacheline_aligned; | 306 | } ____cacheline_aligned; |
307 | 307 | ||
308 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); | 308 | DECLARE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct tss_struct, cpu_tss); |
309 | 309 | ||
310 | #ifdef CONFIG_X86_32 | 310 | #ifdef CONFIG_X86_32 |
311 | DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); | 311 | DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); |
@@ -574,7 +574,7 @@ static inline void sync_core(void) | |||
574 | { | 574 | { |
575 | int tmp; | 575 | int tmp; |
576 | 576 | ||
577 | #ifdef CONFIG_M486 | 577 | #ifdef CONFIG_X86_32 |
578 | /* | 578 | /* |
579 | * Do a CPUID if available, otherwise do a jump. The jump | 579 | * Do a CPUID if available, otherwise do a jump. The jump |
580 | * can conveniently enough be the jump around CPUID. | 580 | * can conveniently enough be the jump around CPUID. |
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index baad72e4c100..c926255745e1 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h | |||
@@ -4,6 +4,15 @@ | |||
4 | #include <linux/clocksource.h> | 4 | #include <linux/clocksource.h> |
5 | #include <asm/pvclock-abi.h> | 5 | #include <asm/pvclock-abi.h> |
6 | 6 | ||
7 | #ifdef CONFIG_KVM_GUEST | ||
8 | extern struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void); | ||
9 | #else | ||
10 | static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void) | ||
11 | { | ||
12 | return NULL; | ||
13 | } | ||
14 | #endif | ||
15 | |||
7 | /* some helper functions for xen and kvm pv clock sources */ | 16 | /* some helper functions for xen and kvm pv clock sources */ |
8 | cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); | 17 | cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); |
9 | u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); | 18 | u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); |
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 751bf4b7bf11..025ecfaba9c9 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_SWITCH_TO_H | 1 | #ifndef _ASM_X86_SWITCH_TO_H |
2 | #define _ASM_X86_SWITCH_TO_H | 2 | #define _ASM_X86_SWITCH_TO_H |
3 | 3 | ||
4 | #include <asm/nospec-branch.h> | ||
5 | |||
4 | struct task_struct; /* one of the stranger aspects of C forward declarations */ | 6 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
5 | __visible struct task_struct *__switch_to(struct task_struct *prev, | 7 | __visible struct task_struct *__switch_to(struct task_struct *prev, |
6 | struct task_struct *next); | 8 | struct task_struct *next); |
@@ -24,6 +26,23 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
24 | #define __switch_canary_iparam | 26 | #define __switch_canary_iparam |
25 | #endif /* CC_STACKPROTECTOR */ | 27 | #endif /* CC_STACKPROTECTOR */ |
26 | 28 | ||
29 | #ifdef CONFIG_RETPOLINE | ||
30 | /* | ||
31 | * When switching from a shallower to a deeper call stack | ||
32 | * the RSB may either underflow or use entries populated | ||
33 | * with userspace addresses. On CPUs where those concerns | ||
34 | * exist, overwrite the RSB with entries which capture | ||
35 | * speculative execution to prevent attack. | ||
36 | */ | ||
37 | #define __retpoline_fill_return_buffer \ | ||
38 | ALTERNATIVE("jmp 910f", \ | ||
39 | __stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\ | ||
40 | X86_FEATURE_RSB_CTXSW) \ | ||
41 | "910:\n\t" | ||
42 | #else | ||
43 | #define __retpoline_fill_return_buffer | ||
44 | #endif | ||
45 | |||
27 | /* | 46 | /* |
28 | * Saving eflags is important. It switches not only IOPL between tasks, | 47 | * Saving eflags is important. It switches not only IOPL between tasks, |
29 | * it also protects other tasks from NT leaking through sysenter etc. | 48 | * it also protects other tasks from NT leaking through sysenter etc. |
@@ -46,6 +65,7 @@ do { \ | |||
46 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ | 65 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ |
47 | "pushl %[next_ip]\n\t" /* restore EIP */ \ | 66 | "pushl %[next_ip]\n\t" /* restore EIP */ \ |
48 | __switch_canary \ | 67 | __switch_canary \ |
68 | __retpoline_fill_return_buffer \ | ||
49 | "jmp __switch_to\n" /* regparm call */ \ | 69 | "jmp __switch_to\n" /* regparm call */ \ |
50 | "1:\t" \ | 70 | "1:\t" \ |
51 | "popl %%ebp\n\t" /* restore EBP */ \ | 71 | "popl %%ebp\n\t" /* restore EBP */ \ |
@@ -100,6 +120,23 @@ do { \ | |||
100 | #define __switch_canary_iparam | 120 | #define __switch_canary_iparam |
101 | #endif /* CC_STACKPROTECTOR */ | 121 | #endif /* CC_STACKPROTECTOR */ |
102 | 122 | ||
123 | #ifdef CONFIG_RETPOLINE | ||
124 | /* | ||
125 | * When switching from a shallower to a deeper call stack | ||
126 | * the RSB may either underflow or use entries populated | ||
127 | * with userspace addresses. On CPUs where those concerns | ||
128 | * exist, overwrite the RSB with entries which capture | ||
129 | * speculative execution to prevent attack. | ||
130 | */ | ||
131 | #define __retpoline_fill_return_buffer \ | ||
132 | ALTERNATIVE("jmp 910f", \ | ||
133 | __stringify(__FILL_RETURN_BUFFER(%%r12, RSB_CLEAR_LOOPS, %%rsp)),\ | ||
134 | X86_FEATURE_RSB_CTXSW) \ | ||
135 | "910:\n\t" | ||
136 | #else | ||
137 | #define __retpoline_fill_return_buffer | ||
138 | #endif | ||
139 | |||
103 | /* | 140 | /* |
104 | * There is no need to save or restore flags, because flags are always | 141 | * There is no need to save or restore flags, because flags are always |
105 | * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL | 142 | * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL |
@@ -112,6 +149,7 @@ do { \ | |||
112 | "call __switch_to\n\t" \ | 149 | "call __switch_to\n\t" \ |
113 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ | 150 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ |
114 | __switch_canary \ | 151 | __switch_canary \ |
152 | __retpoline_fill_return_buffer \ | ||
115 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | 153 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
116 | "movq %%rax,%%rdi\n\t" \ | 154 | "movq %%rax,%%rdi\n\t" \ |
117 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ | 155 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 0c977fc124a7..c706b7796870 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -166,17 +166,6 @@ static inline struct thread_info *current_thread_info(void) | |||
166 | return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE); | 166 | return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE); |
167 | } | 167 | } |
168 | 168 | ||
169 | static inline unsigned long current_stack_pointer(void) | ||
170 | { | ||
171 | unsigned long sp; | ||
172 | #ifdef CONFIG_X86_64 | ||
173 | asm("mov %%rsp,%0" : "=g" (sp)); | ||
174 | #else | ||
175 | asm("mov %%esp,%0" : "=g" (sp)); | ||
176 | #endif | ||
177 | return sp; | ||
178 | } | ||
179 | |||
180 | /* | 169 | /* |
181 | * Walks up the stack frames to make sure that the specified object is | 170 | * Walks up the stack frames to make sure that the specified object is |
182 | * entirely contained by a single stack frame. | 171 | * entirely contained by a single stack frame. |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 6433e28dc9c8..a691b66cc40a 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -6,6 +6,55 @@ | |||
6 | 6 | ||
7 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | #include <asm/special_insns.h> | 8 | #include <asm/special_insns.h> |
9 | #include <asm/smp.h> | ||
10 | |||
11 | static inline void __invpcid(unsigned long pcid, unsigned long addr, | ||
12 | unsigned long type) | ||
13 | { | ||
14 | struct { u64 d[2]; } desc = { { pcid, addr } }; | ||
15 | |||
16 | /* | ||
17 | * The memory clobber is because the whole point is to invalidate | ||
18 | * stale TLB entries and, especially if we're flushing global | ||
19 | * mappings, we don't want the compiler to reorder any subsequent | ||
20 | * memory accesses before the TLB flush. | ||
21 | * | ||
22 | * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and | ||
23 | * invpcid (%rcx), %rax in long mode. | ||
24 | */ | ||
25 | asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" | ||
26 | : : "m" (desc), "a" (type), "c" (&desc) : "memory"); | ||
27 | } | ||
28 | |||
29 | #define INVPCID_TYPE_INDIV_ADDR 0 | ||
30 | #define INVPCID_TYPE_SINGLE_CTXT 1 | ||
31 | #define INVPCID_TYPE_ALL_INCL_GLOBAL 2 | ||
32 | #define INVPCID_TYPE_ALL_NON_GLOBAL 3 | ||
33 | |||
34 | /* Flush all mappings for a given pcid and addr, not including globals. */ | ||
35 | static inline void invpcid_flush_one(unsigned long pcid, | ||
36 | unsigned long addr) | ||
37 | { | ||
38 | __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); | ||
39 | } | ||
40 | |||
41 | /* Flush all mappings for a given PCID, not including globals. */ | ||
42 | static inline void invpcid_flush_single_context(unsigned long pcid) | ||
43 | { | ||
44 | __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); | ||
45 | } | ||
46 | |||
47 | /* Flush all mappings, including globals, for all PCIDs. */ | ||
48 | static inline void invpcid_flush_all(void) | ||
49 | { | ||
50 | __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); | ||
51 | } | ||
52 | |||
53 | /* Flush all mappings for all PCIDs except globals. */ | ||
54 | static inline void invpcid_flush_all_nonglobals(void) | ||
55 | { | ||
56 | __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); | ||
57 | } | ||
9 | 58 | ||
10 | #ifdef CONFIG_PARAVIRT | 59 | #ifdef CONFIG_PARAVIRT |
11 | #include <asm/paravirt.h> | 60 | #include <asm/paravirt.h> |
@@ -16,10 +65,8 @@ | |||
16 | #endif | 65 | #endif |
17 | 66 | ||
18 | struct tlb_state { | 67 | struct tlb_state { |
19 | #ifdef CONFIG_SMP | ||
20 | struct mm_struct *active_mm; | 68 | struct mm_struct *active_mm; |
21 | int state; | 69 | int state; |
22 | #endif | ||
23 | 70 | ||
24 | /* | 71 | /* |
25 | * Access to this CR4 shadow and to H/W CR4 is protected by | 72 | * Access to this CR4 shadow and to H/W CR4 is protected by |
@@ -84,6 +131,24 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask) | |||
84 | cr4_set_bits(mask); | 131 | cr4_set_bits(mask); |
85 | } | 132 | } |
86 | 133 | ||
134 | /* | ||
135 | * Declare a couple of kaiser interfaces here for convenience, | ||
136 | * to avoid the need for asm/kaiser.h in unexpected places. | ||
137 | */ | ||
138 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
139 | extern int kaiser_enabled; | ||
140 | extern void kaiser_setup_pcid(void); | ||
141 | extern void kaiser_flush_tlb_on_return_to_user(void); | ||
142 | #else | ||
143 | #define kaiser_enabled 0 | ||
144 | static inline void kaiser_setup_pcid(void) | ||
145 | { | ||
146 | } | ||
147 | static inline void kaiser_flush_tlb_on_return_to_user(void) | ||
148 | { | ||
149 | } | ||
150 | #endif | ||
151 | |||
87 | static inline void __native_flush_tlb(void) | 152 | static inline void __native_flush_tlb(void) |
88 | { | 153 | { |
89 | /* | 154 | /* |
@@ -92,6 +157,8 @@ static inline void __native_flush_tlb(void) | |||
92 | * back: | 157 | * back: |
93 | */ | 158 | */ |
94 | preempt_disable(); | 159 | preempt_disable(); |
160 | if (kaiser_enabled) | ||
161 | kaiser_flush_tlb_on_return_to_user(); | ||
95 | native_write_cr3(native_read_cr3()); | 162 | native_write_cr3(native_read_cr3()); |
96 | preempt_enable(); | 163 | preempt_enable(); |
97 | } | 164 | } |
@@ -101,39 +168,84 @@ static inline void __native_flush_tlb_global_irq_disabled(void) | |||
101 | unsigned long cr4; | 168 | unsigned long cr4; |
102 | 169 | ||
103 | cr4 = this_cpu_read(cpu_tlbstate.cr4); | 170 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
104 | /* clear PGE */ | 171 | if (cr4 & X86_CR4_PGE) { |
105 | native_write_cr4(cr4 & ~X86_CR4_PGE); | 172 | /* clear PGE and flush TLB of all entries */ |
106 | /* write old PGE again and flush TLBs */ | 173 | native_write_cr4(cr4 & ~X86_CR4_PGE); |
107 | native_write_cr4(cr4); | 174 | /* restore PGE as it was before */ |
175 | native_write_cr4(cr4); | ||
176 | } else { | ||
177 | /* do it with cr3, letting kaiser flush user PCID */ | ||
178 | __native_flush_tlb(); | ||
179 | } | ||
108 | } | 180 | } |
109 | 181 | ||
110 | static inline void __native_flush_tlb_global(void) | 182 | static inline void __native_flush_tlb_global(void) |
111 | { | 183 | { |
112 | unsigned long flags; | 184 | unsigned long flags; |
113 | 185 | ||
186 | if (this_cpu_has(X86_FEATURE_INVPCID)) { | ||
187 | /* | ||
188 | * Using INVPCID is considerably faster than a pair of writes | ||
189 | * to CR4 sandwiched inside an IRQ flag save/restore. | ||
190 | * | ||
191 | * Note, this works with CR4.PCIDE=0 or 1. | ||
192 | */ | ||
193 | invpcid_flush_all(); | ||
194 | return; | ||
195 | } | ||
196 | |||
114 | /* | 197 | /* |
115 | * Read-modify-write to CR4 - protect it from preemption and | 198 | * Read-modify-write to CR4 - protect it from preemption and |
116 | * from interrupts. (Use the raw variant because this code can | 199 | * from interrupts. (Use the raw variant because this code can |
117 | * be called from deep inside debugging code.) | 200 | * be called from deep inside debugging code.) |
118 | */ | 201 | */ |
119 | raw_local_irq_save(flags); | 202 | raw_local_irq_save(flags); |
120 | |||
121 | __native_flush_tlb_global_irq_disabled(); | 203 | __native_flush_tlb_global_irq_disabled(); |
122 | |||
123 | raw_local_irq_restore(flags); | 204 | raw_local_irq_restore(flags); |
124 | } | 205 | } |
125 | 206 | ||
126 | static inline void __native_flush_tlb_single(unsigned long addr) | 207 | static inline void __native_flush_tlb_single(unsigned long addr) |
127 | { | 208 | { |
128 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); | 209 | /* |
210 | * SIMICS #GP's if you run INVPCID with type 2/3 | ||
211 | * and X86_CR4_PCIDE clear. Shame! | ||
212 | * | ||
213 | * The ASIDs used below are hard-coded. But, we must not | ||
214 | * call invpcid(type=1/2) before CR4.PCIDE=1. Just call | ||
215 | * invlpg in the case we are called early. | ||
216 | */ | ||
217 | |||
218 | if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) { | ||
219 | if (kaiser_enabled) | ||
220 | kaiser_flush_tlb_on_return_to_user(); | ||
221 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); | ||
222 | return; | ||
223 | } | ||
224 | /* Flush the address out of both PCIDs. */ | ||
225 | /* | ||
226 | * An optimization here might be to determine addresses | ||
227 | * that are only kernel-mapped and only flush the kernel | ||
228 | * ASID. But, userspace flushes are probably much more | ||
229 | * important performance-wise. | ||
230 | * | ||
231 | * Make sure to do only a single invpcid when KAISER is | ||
232 | * disabled and we have only a single ASID. | ||
233 | */ | ||
234 | if (kaiser_enabled) | ||
235 | invpcid_flush_one(X86_CR3_PCID_ASID_USER, addr); | ||
236 | invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr); | ||
129 | } | 237 | } |
130 | 238 | ||
131 | static inline void __flush_tlb_all(void) | 239 | static inline void __flush_tlb_all(void) |
132 | { | 240 | { |
133 | if (cpu_has_pge) | 241 | __flush_tlb_global(); |
134 | __flush_tlb_global(); | 242 | /* |
135 | else | 243 | * Note: if we somehow had PCID but not PGE, then this wouldn't work -- |
136 | __flush_tlb(); | 244 | * we'd end up flushing kernel translations for the current ASID but |
245 | * we might fail to flush kernel translations for other cached ASIDs. | ||
246 | * | ||
247 | * To avoid this issue, we force PCID off if PGE is off. | ||
248 | */ | ||
137 | } | 249 | } |
138 | 250 | ||
139 | static inline void __flush_tlb_one(unsigned long addr) | 251 | static inline void __flush_tlb_one(unsigned long addr) |
@@ -147,7 +259,6 @@ static inline void __flush_tlb_one(unsigned long addr) | |||
147 | /* | 259 | /* |
148 | * TLB flushing: | 260 | * TLB flushing: |
149 | * | 261 | * |
150 | * - flush_tlb() flushes the current mm struct TLBs | ||
151 | * - flush_tlb_all() flushes all processes TLBs | 262 | * - flush_tlb_all() flushes all processes TLBs |
152 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | 263 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
153 | * - flush_tlb_page(vma, vmaddr) flushes one page | 264 | * - flush_tlb_page(vma, vmaddr) flushes one page |
@@ -159,84 +270,6 @@ static inline void __flush_tlb_one(unsigned long addr) | |||
159 | * and page-granular flushes are available only on i486 and up. | 270 | * and page-granular flushes are available only on i486 and up. |
160 | */ | 271 | */ |
161 | 272 | ||
162 | #ifndef CONFIG_SMP | ||
163 | |||
164 | /* "_up" is for UniProcessor. | ||
165 | * | ||
166 | * This is a helper for other header functions. *Not* intended to be called | ||
167 | * directly. All global TLB flushes need to either call this, or to bump the | ||
168 | * vm statistics themselves. | ||
169 | */ | ||
170 | static inline void __flush_tlb_up(void) | ||
171 | { | ||
172 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | ||
173 | __flush_tlb(); | ||
174 | } | ||
175 | |||
176 | static inline void flush_tlb_all(void) | ||
177 | { | ||
178 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | ||
179 | __flush_tlb_all(); | ||
180 | } | ||
181 | |||
182 | static inline void flush_tlb(void) | ||
183 | { | ||
184 | __flush_tlb_up(); | ||
185 | } | ||
186 | |||
187 | static inline void local_flush_tlb(void) | ||
188 | { | ||
189 | __flush_tlb_up(); | ||
190 | } | ||
191 | |||
192 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
193 | { | ||
194 | if (mm == current->active_mm) | ||
195 | __flush_tlb_up(); | ||
196 | } | ||
197 | |||
198 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
199 | unsigned long addr) | ||
200 | { | ||
201 | if (vma->vm_mm == current->active_mm) | ||
202 | __flush_tlb_one(addr); | ||
203 | } | ||
204 | |||
205 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
206 | unsigned long start, unsigned long end) | ||
207 | { | ||
208 | if (vma->vm_mm == current->active_mm) | ||
209 | __flush_tlb_up(); | ||
210 | } | ||
211 | |||
212 | static inline void flush_tlb_mm_range(struct mm_struct *mm, | ||
213 | unsigned long start, unsigned long end, unsigned long vmflag) | ||
214 | { | ||
215 | if (mm == current->active_mm) | ||
216 | __flush_tlb_up(); | ||
217 | } | ||
218 | |||
219 | static inline void native_flush_tlb_others(const struct cpumask *cpumask, | ||
220 | struct mm_struct *mm, | ||
221 | unsigned long start, | ||
222 | unsigned long end) | ||
223 | { | ||
224 | } | ||
225 | |||
226 | static inline void reset_lazy_tlbstate(void) | ||
227 | { | ||
228 | } | ||
229 | |||
230 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
231 | unsigned long end) | ||
232 | { | ||
233 | flush_tlb_all(); | ||
234 | } | ||
235 | |||
236 | #else /* SMP */ | ||
237 | |||
238 | #include <asm/smp.h> | ||
239 | |||
240 | #define local_flush_tlb() __flush_tlb() | 273 | #define local_flush_tlb() __flush_tlb() |
241 | 274 | ||
242 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) | 275 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) |
@@ -245,13 +278,14 @@ static inline void flush_tlb_kernel_range(unsigned long start, | |||
245 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) | 278 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) |
246 | 279 | ||
247 | extern void flush_tlb_all(void); | 280 | extern void flush_tlb_all(void); |
248 | extern void flush_tlb_current_task(void); | ||
249 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
250 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | 281 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
251 | unsigned long end, unsigned long vmflag); | 282 | unsigned long end, unsigned long vmflag); |
252 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | 283 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
253 | 284 | ||
254 | #define flush_tlb() flush_tlb_current_task() | 285 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
286 | { | ||
287 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE); | ||
288 | } | ||
255 | 289 | ||
256 | void native_flush_tlb_others(const struct cpumask *cpumask, | 290 | void native_flush_tlb_others(const struct cpumask *cpumask, |
257 | struct mm_struct *mm, | 291 | struct mm_struct *mm, |
@@ -266,14 +300,6 @@ static inline void reset_lazy_tlbstate(void) | |||
266 | this_cpu_write(cpu_tlbstate.active_mm, &init_mm); | 300 | this_cpu_write(cpu_tlbstate.active_mm, &init_mm); |
267 | } | 301 | } |
268 | 302 | ||
269 | #endif /* SMP */ | ||
270 | |||
271 | /* Not inlined due to inc_irq_stat not being defined yet */ | ||
272 | #define flush_tlb_local() { \ | ||
273 | inc_irq_stat(irq_tlb_count); \ | ||
274 | local_flush_tlb(); \ | ||
275 | } | ||
276 | |||
277 | #ifndef CONFIG_PARAVIRT | 303 | #ifndef CONFIG_PARAVIRT |
278 | #define flush_tlb_others(mask, mm, start, end) \ | 304 | #define flush_tlb_others(mask, mm, start, end) \ |
279 | native_flush_tlb_others(mask, mm, start, end) | 305 | native_flush_tlb_others(mask, mm, start, end) |
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index c3496619740a..156959ca49ce 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h | |||
@@ -92,6 +92,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); | |||
92 | #ifdef CONFIG_X86_32 | 92 | #ifdef CONFIG_X86_32 |
93 | dotraplinkage void do_iret_error(struct pt_regs *, long); | 93 | dotraplinkage void do_iret_error(struct pt_regs *, long); |
94 | #endif | 94 | #endif |
95 | dotraplinkage void do_mce(struct pt_regs *, long); | ||
95 | 96 | ||
96 | static inline int get_si_code(unsigned long condition) | 97 | static inline int get_si_code(unsigned long condition) |
97 | { | 98 | { |
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h index 756de9190aec..deabaf9759b6 100644 --- a/arch/x86/include/asm/vdso.h +++ b/arch/x86/include/asm/vdso.h | |||
@@ -22,6 +22,7 @@ struct vdso_image { | |||
22 | 22 | ||
23 | long sym_vvar_page; | 23 | long sym_vvar_page; |
24 | long sym_hpet_page; | 24 | long sym_hpet_page; |
25 | long sym_pvclock_page; | ||
25 | long sym_VDSO32_NOTE_MASK; | 26 | long sym_VDSO32_NOTE_MASK; |
26 | long sym___kernel_sigreturn; | 27 | long sym___kernel_sigreturn; |
27 | long sym___kernel_rt_sigreturn; | 28 | long sym___kernel_rt_sigreturn; |
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index 6ba66ee79710..62210da19a92 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h | |||
@@ -12,12 +12,15 @@ extern void map_vsyscall(void); | |||
12 | * Returns true if handled. | 12 | * Returns true if handled. |
13 | */ | 13 | */ |
14 | extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); | 14 | extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); |
15 | extern bool vsyscall_enabled(void); | ||
15 | #else | 16 | #else |
16 | static inline void map_vsyscall(void) {} | 17 | static inline void map_vsyscall(void) {} |
17 | static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) | 18 | static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) |
18 | { | 19 | { |
19 | return false; | 20 | return false; |
20 | } | 21 | } |
22 | static inline bool vsyscall_enabled(void) { return false; } | ||
21 | #endif | 23 | #endif |
24 | extern unsigned long vsyscall_pgprot; | ||
22 | 25 | ||
23 | #endif /* _ASM_X86_VSYSCALL_H */ | 26 | #endif /* _ASM_X86_VSYSCALL_H */ |
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 85133b2b8e99..0977e7607046 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/page.h> | 44 | #include <asm/page.h> |
45 | #include <asm/pgtable.h> | 45 | #include <asm/pgtable.h> |
46 | #include <asm/smap.h> | 46 | #include <asm/smap.h> |
47 | #include <asm/nospec-branch.h> | ||
47 | 48 | ||
48 | #include <xen/interface/xen.h> | 49 | #include <xen/interface/xen.h> |
49 | #include <xen/interface/sched.h> | 50 | #include <xen/interface/sched.h> |
@@ -215,9 +216,9 @@ privcmd_call(unsigned call, | |||
215 | __HYPERCALL_5ARG(a1, a2, a3, a4, a5); | 216 | __HYPERCALL_5ARG(a1, a2, a3, a4, a5); |
216 | 217 | ||
217 | stac(); | 218 | stac(); |
218 | asm volatile("call *%[call]" | 219 | asm volatile(CALL_NOSPEC |
219 | : __HYPERCALL_5PARAM | 220 | : __HYPERCALL_5PARAM |
220 | : [call] "a" (&hypercall_page[call]) | 221 | : [thunk_target] "a" (&hypercall_page[call]) |
221 | : __HYPERCALL_CLOBBER5); | 222 | : __HYPERCALL_CLOBBER5); |
222 | clac(); | 223 | clac(); |
223 | 224 | ||
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h index 79887abcb5e1..1361779f44fe 100644 --- a/arch/x86/include/uapi/asm/processor-flags.h +++ b/arch/x86/include/uapi/asm/processor-flags.h | |||
@@ -77,7 +77,8 @@ | |||
77 | #define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT) | 77 | #define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT) |
78 | #define X86_CR3_PCD_BIT 4 /* Page Cache Disable */ | 78 | #define X86_CR3_PCD_BIT 4 /* Page Cache Disable */ |
79 | #define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT) | 79 | #define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT) |
80 | #define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */ | 80 | #define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */ |
81 | #define X86_CR3_PCID_NOFLUSH _BITULL(X86_CR3_PCID_NOFLUSH_BIT) | ||
81 | 82 | ||
82 | /* | 83 | /* |
83 | * Intel CPU features in CR4 | 84 | * Intel CPU features in CR4 |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 1e5eb9f2ff5f..a1e4a6c3f394 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -321,13 +321,12 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e | |||
321 | #ifdef CONFIG_X86_IO_APIC | 321 | #ifdef CONFIG_X86_IO_APIC |
322 | #define MP_ISA_BUS 0 | 322 | #define MP_ISA_BUS 0 |
323 | 323 | ||
324 | static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, | ||
325 | u8 trigger, u32 gsi); | ||
326 | |||
324 | static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, | 327 | static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, |
325 | u32 gsi) | 328 | u32 gsi) |
326 | { | 329 | { |
327 | int ioapic; | ||
328 | int pin; | ||
329 | struct mpc_intsrc mp_irq; | ||
330 | |||
331 | /* | 330 | /* |
332 | * Check bus_irq boundary. | 331 | * Check bus_irq boundary. |
333 | */ | 332 | */ |
@@ -337,14 +336,6 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, | |||
337 | } | 336 | } |
338 | 337 | ||
339 | /* | 338 | /* |
340 | * Convert 'gsi' to 'ioapic.pin'. | ||
341 | */ | ||
342 | ioapic = mp_find_ioapic(gsi); | ||
343 | if (ioapic < 0) | ||
344 | return; | ||
345 | pin = mp_find_ioapic_pin(ioapic, gsi); | ||
346 | |||
347 | /* | ||
348 | * TBD: This check is for faulty timer entries, where the override | 339 | * TBD: This check is for faulty timer entries, where the override |
349 | * erroneously sets the trigger to level, resulting in a HUGE | 340 | * erroneously sets the trigger to level, resulting in a HUGE |
350 | * increase of timer interrupts! | 341 | * increase of timer interrupts! |
@@ -352,16 +343,8 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, | |||
352 | if ((bus_irq == 0) && (trigger == 3)) | 343 | if ((bus_irq == 0) && (trigger == 3)) |
353 | trigger = 1; | 344 | trigger = 1; |
354 | 345 | ||
355 | mp_irq.type = MP_INTSRC; | 346 | if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0) |
356 | mp_irq.irqtype = mp_INT; | 347 | return; |
357 | mp_irq.irqflag = (trigger << 2) | polarity; | ||
358 | mp_irq.srcbus = MP_ISA_BUS; | ||
359 | mp_irq.srcbusirq = bus_irq; /* IRQ */ | ||
360 | mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */ | ||
361 | mp_irq.dstirq = pin; /* INTIN# */ | ||
362 | |||
363 | mp_save_irq(&mp_irq); | ||
364 | |||
365 | /* | 348 | /* |
366 | * Reset default identity mapping if gsi is also an legacy IRQ, | 349 | * Reset default identity mapping if gsi is also an legacy IRQ, |
367 | * otherwise there will be more than one entry with the same GSI | 350 | * otherwise there will be more than one entry with the same GSI |
@@ -408,6 +391,34 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, | |||
408 | return 0; | 391 | return 0; |
409 | } | 392 | } |
410 | 393 | ||
394 | static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, | ||
395 | u8 trigger, u32 gsi) | ||
396 | { | ||
397 | struct mpc_intsrc mp_irq; | ||
398 | int ioapic, pin; | ||
399 | |||
400 | /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */ | ||
401 | ioapic = mp_find_ioapic(gsi); | ||
402 | if (ioapic < 0) { | ||
403 | pr_warn("Failed to find ioapic for gsi : %u\n", gsi); | ||
404 | return ioapic; | ||
405 | } | ||
406 | |||
407 | pin = mp_find_ioapic_pin(ioapic, gsi); | ||
408 | |||
409 | mp_irq.type = MP_INTSRC; | ||
410 | mp_irq.irqtype = mp_INT; | ||
411 | mp_irq.irqflag = (trigger << 2) | polarity; | ||
412 | mp_irq.srcbus = MP_ISA_BUS; | ||
413 | mp_irq.srcbusirq = bus_irq; | ||
414 | mp_irq.dstapic = mpc_ioapic_id(ioapic); | ||
415 | mp_irq.dstirq = pin; | ||
416 | |||
417 | mp_save_irq(&mp_irq); | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
411 | static int __init | 422 | static int __init |
412 | acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) | 423 | acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) |
413 | { | 424 | { |
@@ -452,7 +463,11 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, | |||
452 | if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) | 463 | if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) |
453 | polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; | 464 | polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; |
454 | 465 | ||
455 | mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); | 466 | if (bus_irq < NR_IRQS_LEGACY) |
467 | mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); | ||
468 | else | ||
469 | mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi); | ||
470 | |||
456 | acpi_penalize_sci_irq(bus_irq, trigger, polarity); | 471 | acpi_penalize_sci_irq(bus_irq, trigger, polarity); |
457 | 472 | ||
458 | /* | 473 | /* |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 25f909362b7a..d6f375f1b928 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -339,9 +339,12 @@ done: | |||
339 | static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) | 339 | static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) |
340 | { | 340 | { |
341 | unsigned long flags; | 341 | unsigned long flags; |
342 | int i; | ||
342 | 343 | ||
343 | if (instr[0] != 0x90) | 344 | for (i = 0; i < a->padlen; i++) { |
344 | return; | 345 | if (instr[i] != 0x90) |
346 | return; | ||
347 | } | ||
345 | 348 | ||
346 | local_irq_save(flags); | 349 | local_irq_save(flags); |
347 | add_nops(instr + (a->instrlen - a->padlen), a->padlen); | 350 | add_nops(instr + (a->instrlen - a->padlen), a->padlen); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index fc91c98bee01..fd945099fc95 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2592,8 +2592,8 @@ static struct resource * __init ioapic_setup_resources(void) | |||
2592 | res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 2592 | res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
2593 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); | 2593 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); |
2594 | mem += IOAPIC_RESOURCE_NAME_SIZE; | 2594 | mem += IOAPIC_RESOURCE_NAME_SIZE; |
2595 | ioapics[i].iomem_res = &res[num]; | ||
2595 | num++; | 2596 | num++; |
2596 | ioapics[i].iomem_res = res; | ||
2597 | } | 2597 | } |
2598 | 2598 | ||
2599 | ioapic_resources = res; | 2599 | ioapic_resources = res; |
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 0988e204f1e3..a41e523536a2 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -359,14 +359,17 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, | |||
359 | irq_data->chip_data = data; | 359 | irq_data->chip_data = data; |
360 | irq_data->hwirq = virq + i; | 360 | irq_data->hwirq = virq + i; |
361 | err = assign_irq_vector_policy(virq + i, node, data, info); | 361 | err = assign_irq_vector_policy(virq + i, node, data, info); |
362 | if (err) | 362 | if (err) { |
363 | irq_data->chip_data = NULL; | ||
364 | free_apic_chip_data(data); | ||
363 | goto error; | 365 | goto error; |
366 | } | ||
364 | } | 367 | } |
365 | 368 | ||
366 | return 0; | 369 | return 0; |
367 | 370 | ||
368 | error: | 371 | error: |
369 | x86_vector_free_irqs(domain, virq, i + 1); | 372 | x86_vector_free_irqs(domain, virq, i); |
370 | return err; | 373 | return err; |
371 | } | 374 | } |
372 | 375 | ||
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 58031303e304..8f184615053b 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -16,13 +16,11 @@ obj-y := intel_cacheinfo.o scattered.o topology.o | |||
16 | obj-y += common.o | 16 | obj-y += common.o |
17 | obj-y += rdrand.o | 17 | obj-y += rdrand.o |
18 | obj-y += match.o | 18 | obj-y += match.o |
19 | obj-y += bugs.o | ||
19 | 20 | ||
20 | obj-$(CONFIG_PROC_FS) += proc.o | 21 | obj-$(CONFIG_PROC_FS) += proc.o |
21 | obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o | 22 | obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o |
22 | 23 | ||
23 | obj-$(CONFIG_X86_32) += bugs.o | ||
24 | obj-$(CONFIG_X86_64) += bugs_64.o | ||
25 | |||
26 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o | 24 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o |
27 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o | 25 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o |
28 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o | 26 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e2defc7593a4..4bf9e77f3e05 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -746,8 +746,32 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
746 | set_cpu_cap(c, X86_FEATURE_K8); | 746 | set_cpu_cap(c, X86_FEATURE_K8); |
747 | 747 | ||
748 | if (cpu_has_xmm2) { | 748 | if (cpu_has_xmm2) { |
749 | /* MFENCE stops RDTSC speculation */ | 749 | unsigned long long val; |
750 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | 750 | int ret; |
751 | |||
752 | /* | ||
753 | * A serializing LFENCE has less overhead than MFENCE, so | ||
754 | * use it for execution serialization. On families which | ||
755 | * don't have that MSR, LFENCE is already serializing. | ||
756 | * msr_set_bit() uses the safe accessors, too, even if the MSR | ||
757 | * is not present. | ||
758 | */ | ||
759 | msr_set_bit(MSR_F10H_DECFG, | ||
760 | MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); | ||
761 | |||
762 | /* | ||
763 | * Verify that the MSR write was successful (could be running | ||
764 | * under a hypervisor) and only then assume that LFENCE is | ||
765 | * serializing. | ||
766 | */ | ||
767 | ret = rdmsrl_safe(MSR_F10H_DECFG, &val); | ||
768 | if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { | ||
769 | /* A serializing LFENCE stops RDTSC speculation */ | ||
770 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
771 | } else { | ||
772 | /* MFENCE stops RDTSC speculation */ | ||
773 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | ||
774 | } | ||
751 | } | 775 | } |
752 | 776 | ||
753 | /* | 777 | /* |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index bd17db15a2c1..8cacf62ec458 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -9,6 +9,10 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/utsname.h> | 11 | #include <linux/utsname.h> |
12 | #include <linux/cpu.h> | ||
13 | |||
14 | #include <asm/nospec-branch.h> | ||
15 | #include <asm/cmdline.h> | ||
12 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
13 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
14 | #include <asm/processor-flags.h> | 18 | #include <asm/processor-flags.h> |
@@ -16,15 +20,25 @@ | |||
16 | #include <asm/msr.h> | 20 | #include <asm/msr.h> |
17 | #include <asm/paravirt.h> | 21 | #include <asm/paravirt.h> |
18 | #include <asm/alternative.h> | 22 | #include <asm/alternative.h> |
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/cacheflush.h> | ||
25 | #include <asm/intel-family.h> | ||
26 | |||
27 | static void __init spectre_v2_select_mitigation(void); | ||
19 | 28 | ||
20 | void __init check_bugs(void) | 29 | void __init check_bugs(void) |
21 | { | 30 | { |
22 | identify_boot_cpu(); | 31 | identify_boot_cpu(); |
23 | #ifndef CONFIG_SMP | ||
24 | pr_info("CPU: "); | ||
25 | print_cpu_info(&boot_cpu_data); | ||
26 | #endif | ||
27 | 32 | ||
33 | if (!IS_ENABLED(CONFIG_SMP)) { | ||
34 | pr_info("CPU: "); | ||
35 | print_cpu_info(&boot_cpu_data); | ||
36 | } | ||
37 | |||
38 | /* Select the proper spectre mitigation before patching alternatives */ | ||
39 | spectre_v2_select_mitigation(); | ||
40 | |||
41 | #ifdef CONFIG_X86_32 | ||
28 | /* | 42 | /* |
29 | * Check whether we are able to run this kernel safely on SMP. | 43 | * Check whether we are able to run this kernel safely on SMP. |
30 | * | 44 | * |
@@ -40,4 +54,229 @@ void __init check_bugs(void) | |||
40 | alternative_instructions(); | 54 | alternative_instructions(); |
41 | 55 | ||
42 | fpu__init_check_bugs(); | 56 | fpu__init_check_bugs(); |
57 | #else /* CONFIG_X86_64 */ | ||
58 | alternative_instructions(); | ||
59 | |||
60 | /* | ||
61 | * Make sure the first 2MB area is not mapped by huge pages | ||
62 | * There are typically fixed size MTRRs in there and overlapping | ||
63 | * MTRRs into large pages causes slow downs. | ||
64 | * | ||
65 | * Right now we don't do that with gbpages because there seems | ||
66 | * very little benefit for that case. | ||
67 | */ | ||
68 | if (!direct_gbpages) | ||
69 | set_memory_4k((unsigned long)__va(0), 1); | ||
70 | #endif | ||
71 | } | ||
72 | |||
73 | /* The kernel command line selection */ | ||
74 | enum spectre_v2_mitigation_cmd { | ||
75 | SPECTRE_V2_CMD_NONE, | ||
76 | SPECTRE_V2_CMD_AUTO, | ||
77 | SPECTRE_V2_CMD_FORCE, | ||
78 | SPECTRE_V2_CMD_RETPOLINE, | ||
79 | SPECTRE_V2_CMD_RETPOLINE_GENERIC, | ||
80 | SPECTRE_V2_CMD_RETPOLINE_AMD, | ||
81 | }; | ||
82 | |||
83 | static const char *spectre_v2_strings[] = { | ||
84 | [SPECTRE_V2_NONE] = "Vulnerable", | ||
85 | [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", | ||
86 | [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", | ||
87 | [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", | ||
88 | [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", | ||
89 | }; | ||
90 | |||
91 | #undef pr_fmt | ||
92 | #define pr_fmt(fmt) "Spectre V2 mitigation: " fmt | ||
93 | |||
94 | static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; | ||
95 | |||
96 | static void __init spec2_print_if_insecure(const char *reason) | ||
97 | { | ||
98 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | ||
99 | pr_info("%s\n", reason); | ||
100 | } | ||
101 | |||
102 | static void __init spec2_print_if_secure(const char *reason) | ||
103 | { | ||
104 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | ||
105 | pr_info("%s\n", reason); | ||
106 | } | ||
107 | |||
108 | static inline bool retp_compiler(void) | ||
109 | { | ||
110 | return __is_defined(RETPOLINE); | ||
43 | } | 111 | } |
112 | |||
113 | static inline bool match_option(const char *arg, int arglen, const char *opt) | ||
114 | { | ||
115 | int len = strlen(opt); | ||
116 | |||
117 | return len == arglen && !strncmp(arg, opt, len); | ||
118 | } | ||
119 | |||
120 | static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) | ||
121 | { | ||
122 | char arg[20]; | ||
123 | int ret; | ||
124 | |||
125 | ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, | ||
126 | sizeof(arg)); | ||
127 | if (ret > 0) { | ||
128 | if (match_option(arg, ret, "off")) { | ||
129 | goto disable; | ||
130 | } else if (match_option(arg, ret, "on")) { | ||
131 | spec2_print_if_secure("force enabled on command line."); | ||
132 | return SPECTRE_V2_CMD_FORCE; | ||
133 | } else if (match_option(arg, ret, "retpoline")) { | ||
134 | spec2_print_if_insecure("retpoline selected on command line."); | ||
135 | return SPECTRE_V2_CMD_RETPOLINE; | ||
136 | } else if (match_option(arg, ret, "retpoline,amd")) { | ||
137 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { | ||
138 | pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); | ||
139 | return SPECTRE_V2_CMD_AUTO; | ||
140 | } | ||
141 | spec2_print_if_insecure("AMD retpoline selected on command line."); | ||
142 | return SPECTRE_V2_CMD_RETPOLINE_AMD; | ||
143 | } else if (match_option(arg, ret, "retpoline,generic")) { | ||
144 | spec2_print_if_insecure("generic retpoline selected on command line."); | ||
145 | return SPECTRE_V2_CMD_RETPOLINE_GENERIC; | ||
146 | } else if (match_option(arg, ret, "auto")) { | ||
147 | return SPECTRE_V2_CMD_AUTO; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2")) | ||
152 | return SPECTRE_V2_CMD_AUTO; | ||
153 | disable: | ||
154 | spec2_print_if_insecure("disabled on command line."); | ||
155 | return SPECTRE_V2_CMD_NONE; | ||
156 | } | ||
157 | |||
158 | /* Check for Skylake-like CPUs (for RSB handling) */ | ||
159 | static bool __init is_skylake_era(void) | ||
160 | { | ||
161 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
162 | boot_cpu_data.x86 == 6) { | ||
163 | switch (boot_cpu_data.x86_model) { | ||
164 | case INTEL_FAM6_SKYLAKE_MOBILE: | ||
165 | case INTEL_FAM6_SKYLAKE_DESKTOP: | ||
166 | case INTEL_FAM6_SKYLAKE_X: | ||
167 | case INTEL_FAM6_KABYLAKE_MOBILE: | ||
168 | case INTEL_FAM6_KABYLAKE_DESKTOP: | ||
169 | return true; | ||
170 | } | ||
171 | } | ||
172 | return false; | ||
173 | } | ||
174 | |||
175 | static void __init spectre_v2_select_mitigation(void) | ||
176 | { | ||
177 | enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); | ||
178 | enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; | ||
179 | |||
180 | /* | ||
181 | * If the CPU is not affected and the command line mode is NONE or AUTO | ||
182 | * then nothing to do. | ||
183 | */ | ||
184 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && | ||
185 | (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) | ||
186 | return; | ||
187 | |||
188 | switch (cmd) { | ||
189 | case SPECTRE_V2_CMD_NONE: | ||
190 | return; | ||
191 | |||
192 | case SPECTRE_V2_CMD_FORCE: | ||
193 | /* FALLTRHU */ | ||
194 | case SPECTRE_V2_CMD_AUTO: | ||
195 | goto retpoline_auto; | ||
196 | |||
197 | case SPECTRE_V2_CMD_RETPOLINE_AMD: | ||
198 | if (IS_ENABLED(CONFIG_RETPOLINE)) | ||
199 | goto retpoline_amd; | ||
200 | break; | ||
201 | case SPECTRE_V2_CMD_RETPOLINE_GENERIC: | ||
202 | if (IS_ENABLED(CONFIG_RETPOLINE)) | ||
203 | goto retpoline_generic; | ||
204 | break; | ||
205 | case SPECTRE_V2_CMD_RETPOLINE: | ||
206 | if (IS_ENABLED(CONFIG_RETPOLINE)) | ||
207 | goto retpoline_auto; | ||
208 | break; | ||
209 | } | ||
210 | pr_err("kernel not compiled with retpoline; no mitigation available!"); | ||
211 | return; | ||
212 | |||
213 | retpoline_auto: | ||
214 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { | ||
215 | retpoline_amd: | ||
216 | if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { | ||
217 | pr_err("LFENCE not serializing. Switching to generic retpoline\n"); | ||
218 | goto retpoline_generic; | ||
219 | } | ||
220 | mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : | ||
221 | SPECTRE_V2_RETPOLINE_MINIMAL_AMD; | ||
222 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); | ||
223 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); | ||
224 | } else { | ||
225 | retpoline_generic: | ||
226 | mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : | ||
227 | SPECTRE_V2_RETPOLINE_MINIMAL; | ||
228 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); | ||
229 | } | ||
230 | |||
231 | spectre_v2_enabled = mode; | ||
232 | pr_info("%s\n", spectre_v2_strings[mode]); | ||
233 | |||
234 | /* | ||
235 | * If neither SMEP or KPTI are available, there is a risk of | ||
236 | * hitting userspace addresses in the RSB after a context switch | ||
237 | * from a shallow call stack to a deeper one. To prevent this fill | ||
238 | * the entire RSB, even when using IBRS. | ||
239 | * | ||
240 | * Skylake era CPUs have a separate issue with *underflow* of the | ||
241 | * RSB, when they will predict 'ret' targets from the generic BTB. | ||
242 | * The proper mitigation for this is IBRS. If IBRS is not supported | ||
243 | * or deactivated in favour of retpolines the RSB fill on context | ||
244 | * switch is required. | ||
245 | */ | ||
246 | if ((!boot_cpu_has(X86_FEATURE_KAISER) && | ||
247 | !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { | ||
248 | setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); | ||
249 | pr_info("Filling RSB on context switch\n"); | ||
250 | } | ||
251 | } | ||
252 | |||
253 | #undef pr_fmt | ||
254 | |||
255 | #ifdef CONFIG_SYSFS | ||
256 | ssize_t cpu_show_meltdown(struct device *dev, | ||
257 | struct device_attribute *attr, char *buf) | ||
258 | { | ||
259 | if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) | ||
260 | return sprintf(buf, "Not affected\n"); | ||
261 | if (boot_cpu_has(X86_FEATURE_KAISER)) | ||
262 | return sprintf(buf, "Mitigation: PTI\n"); | ||
263 | return sprintf(buf, "Vulnerable\n"); | ||
264 | } | ||
265 | |||
266 | ssize_t cpu_show_spectre_v1(struct device *dev, | ||
267 | struct device_attribute *attr, char *buf) | ||
268 | { | ||
269 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) | ||
270 | return sprintf(buf, "Not affected\n"); | ||
271 | return sprintf(buf, "Vulnerable\n"); | ||
272 | } | ||
273 | |||
274 | ssize_t cpu_show_spectre_v2(struct device *dev, | ||
275 | struct device_attribute *attr, char *buf) | ||
276 | { | ||
277 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | ||
278 | return sprintf(buf, "Not affected\n"); | ||
279 | |||
280 | return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]); | ||
281 | } | ||
282 | #endif | ||
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c deleted file mode 100644 index 04f0fe5af83e..000000000000 --- a/arch/x86/kernel/cpu/bugs_64.c +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1994 Linus Torvalds | ||
3 | * Copyright (C) 2000 SuSE | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <asm/alternative.h> | ||
9 | #include <asm/bugs.h> | ||
10 | #include <asm/processor.h> | ||
11 | #include <asm/mtrr.h> | ||
12 | #include <asm/cacheflush.h> | ||
13 | |||
14 | void __init check_bugs(void) | ||
15 | { | ||
16 | identify_boot_cpu(); | ||
17 | #if !defined(CONFIG_SMP) | ||
18 | printk(KERN_INFO "CPU: "); | ||
19 | print_cpu_info(&boot_cpu_data); | ||
20 | #endif | ||
21 | alternative_instructions(); | ||
22 | |||
23 | /* | ||
24 | * Make sure the first 2MB area is not mapped by huge pages | ||
25 | * There are typically fixed size MTRRs in there and overlapping | ||
26 | * MTRRs into large pages causes slow downs. | ||
27 | * | ||
28 | * Right now we don't do that with gbpages because there seems | ||
29 | * very little benefit for that case. | ||
30 | */ | ||
31 | if (!direct_gbpages) | ||
32 | set_memory_4k((unsigned long)__va(0), 1); | ||
33 | } | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 637ca414d431..8eabbafff213 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -92,7 +92,7 @@ static const struct cpu_dev default_cpu = { | |||
92 | 92 | ||
93 | static const struct cpu_dev *this_cpu = &default_cpu; | 93 | static const struct cpu_dev *this_cpu = &default_cpu; |
94 | 94 | ||
95 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 95 | DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page) = { .gdt = { |
96 | #ifdef CONFIG_X86_64 | 96 | #ifdef CONFIG_X86_64 |
97 | /* | 97 | /* |
98 | * We need valid kernel segments for data and code in long mode too | 98 | * We need valid kernel segments for data and code in long mode too |
@@ -162,6 +162,40 @@ static int __init x86_mpx_setup(char *s) | |||
162 | } | 162 | } |
163 | __setup("nompx", x86_mpx_setup); | 163 | __setup("nompx", x86_mpx_setup); |
164 | 164 | ||
165 | #ifdef CONFIG_X86_64 | ||
166 | static int __init x86_pcid_setup(char *s) | ||
167 | { | ||
168 | /* require an exact match without trailing characters */ | ||
169 | if (strlen(s)) | ||
170 | return 0; | ||
171 | |||
172 | /* do not emit a message if the feature is not present */ | ||
173 | if (!boot_cpu_has(X86_FEATURE_PCID)) | ||
174 | return 1; | ||
175 | |||
176 | setup_clear_cpu_cap(X86_FEATURE_PCID); | ||
177 | pr_info("nopcid: PCID feature disabled\n"); | ||
178 | return 1; | ||
179 | } | ||
180 | __setup("nopcid", x86_pcid_setup); | ||
181 | #endif | ||
182 | |||
183 | static int __init x86_noinvpcid_setup(char *s) | ||
184 | { | ||
185 | /* noinvpcid doesn't accept parameters */ | ||
186 | if (s) | ||
187 | return -EINVAL; | ||
188 | |||
189 | /* do not emit a message if the feature is not present */ | ||
190 | if (!boot_cpu_has(X86_FEATURE_INVPCID)) | ||
191 | return 0; | ||
192 | |||
193 | setup_clear_cpu_cap(X86_FEATURE_INVPCID); | ||
194 | pr_info("noinvpcid: INVPCID feature disabled\n"); | ||
195 | return 0; | ||
196 | } | ||
197 | early_param("noinvpcid", x86_noinvpcid_setup); | ||
198 | |||
165 | #ifdef CONFIG_X86_32 | 199 | #ifdef CONFIG_X86_32 |
166 | static int cachesize_override = -1; | 200 | static int cachesize_override = -1; |
167 | static int disable_x86_serial_nr = 1; | 201 | static int disable_x86_serial_nr = 1; |
@@ -287,6 +321,39 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) | |||
287 | } | 321 | } |
288 | } | 322 | } |
289 | 323 | ||
324 | static void setup_pcid(struct cpuinfo_x86 *c) | ||
325 | { | ||
326 | if (cpu_has(c, X86_FEATURE_PCID)) { | ||
327 | if (cpu_has(c, X86_FEATURE_PGE) || kaiser_enabled) { | ||
328 | cr4_set_bits(X86_CR4_PCIDE); | ||
329 | /* | ||
330 | * INVPCID has two "groups" of types: | ||
331 | * 1/2: Invalidate an individual address | ||
332 | * 3/4: Invalidate all contexts | ||
333 | * | ||
334 | * 1/2 take a PCID, but 3/4 do not. So, 3/4 | ||
335 | * ignore the PCID argument in the descriptor. | ||
336 | * But, we have to be careful not to call 1/2 | ||
337 | * with an actual non-zero PCID in them before | ||
338 | * we do the above cr4_set_bits(). | ||
339 | */ | ||
340 | if (cpu_has(c, X86_FEATURE_INVPCID)) | ||
341 | set_cpu_cap(c, X86_FEATURE_INVPCID_SINGLE); | ||
342 | } else { | ||
343 | /* | ||
344 | * flush_tlb_all(), as currently implemented, won't | ||
345 | * work if PCID is on but PGE is not. Since that | ||
346 | * combination doesn't exist on real hardware, there's | ||
347 | * no reason to try to fully support it, but it's | ||
348 | * polite to avoid corrupting data if we're on | ||
349 | * an improperly configured VM. | ||
350 | */ | ||
351 | clear_cpu_cap(c, X86_FEATURE_PCID); | ||
352 | } | ||
353 | } | ||
354 | kaiser_setup_pcid(); | ||
355 | } | ||
356 | |||
290 | /* | 357 | /* |
291 | * Some CPU features depend on higher CPUID levels, which may not always | 358 | * Some CPU features depend on higher CPUID levels, which may not always |
292 | * be available due to CPUID level capping or broken virtualization | 359 | * be available due to CPUID level capping or broken virtualization |
@@ -365,8 +432,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c) | |||
365 | return NULL; /* Not found */ | 432 | return NULL; /* Not found */ |
366 | } | 433 | } |
367 | 434 | ||
368 | __u32 cpu_caps_cleared[NCAPINTS]; | 435 | __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; |
369 | __u32 cpu_caps_set[NCAPINTS]; | 436 | __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; |
370 | 437 | ||
371 | void load_percpu_segment(int cpu) | 438 | void load_percpu_segment(int cpu) |
372 | { | 439 | { |
@@ -597,6 +664,16 @@ void cpu_detect(struct cpuinfo_x86 *c) | |||
597 | } | 664 | } |
598 | } | 665 | } |
599 | 666 | ||
667 | static void apply_forced_caps(struct cpuinfo_x86 *c) | ||
668 | { | ||
669 | int i; | ||
670 | |||
671 | for (i = 0; i < NCAPINTS + NBUGINTS; i++) { | ||
672 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | ||
673 | c->x86_capability[i] |= cpu_caps_set[i]; | ||
674 | } | ||
675 | } | ||
676 | |||
600 | void get_cpu_cap(struct cpuinfo_x86 *c) | 677 | void get_cpu_cap(struct cpuinfo_x86 *c) |
601 | { | 678 | { |
602 | u32 tfms, xlvl; | 679 | u32 tfms, xlvl; |
@@ -753,7 +830,22 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
753 | } | 830 | } |
754 | 831 | ||
755 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); | 832 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); |
833 | |||
834 | if (c->x86_vendor != X86_VENDOR_AMD) | ||
835 | setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); | ||
836 | |||
837 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); | ||
838 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); | ||
839 | |||
756 | fpu__init_system(c); | 840 | fpu__init_system(c); |
841 | |||
842 | #ifdef CONFIG_X86_32 | ||
843 | /* | ||
844 | * Regardless of whether PCID is enumerated, the SDM says | ||
845 | * that it can't be enabled in 32-bit mode. | ||
846 | */ | ||
847 | setup_clear_cpu_cap(X86_FEATURE_PCID); | ||
848 | #endif | ||
757 | } | 849 | } |
758 | 850 | ||
759 | void __init early_cpu_init(void) | 851 | void __init early_cpu_init(void) |
@@ -863,7 +955,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) | |||
863 | int i; | 955 | int i; |
864 | 956 | ||
865 | c->loops_per_jiffy = loops_per_jiffy; | 957 | c->loops_per_jiffy = loops_per_jiffy; |
866 | c->x86_cache_size = -1; | 958 | c->x86_cache_size = 0; |
867 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 959 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
868 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | 960 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
869 | c->x86_vendor_id[0] = '\0'; /* Unset */ | 961 | c->x86_vendor_id[0] = '\0'; /* Unset */ |
@@ -888,11 +980,8 @@ static void identify_cpu(struct cpuinfo_x86 *c) | |||
888 | if (this_cpu->c_identify) | 980 | if (this_cpu->c_identify) |
889 | this_cpu->c_identify(c); | 981 | this_cpu->c_identify(c); |
890 | 982 | ||
891 | /* Clear/Set all flags overriden by options, after probe */ | 983 | /* Clear/Set all flags overridden by options, after probe */ |
892 | for (i = 0; i < NCAPINTS; i++) { | 984 | apply_forced_caps(c); |
893 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | ||
894 | c->x86_capability[i] |= cpu_caps_set[i]; | ||
895 | } | ||
896 | 985 | ||
897 | #ifdef CONFIG_X86_64 | 986 | #ifdef CONFIG_X86_64 |
898 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); | 987 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
@@ -918,6 +1007,9 @@ static void identify_cpu(struct cpuinfo_x86 *c) | |||
918 | setup_smep(c); | 1007 | setup_smep(c); |
919 | setup_smap(c); | 1008 | setup_smap(c); |
920 | 1009 | ||
1010 | /* Set up PCID */ | ||
1011 | setup_pcid(c); | ||
1012 | |||
921 | /* | 1013 | /* |
922 | * The vendor-specific functions might have changed features. | 1014 | * The vendor-specific functions might have changed features. |
923 | * Now we do "generic changes." | 1015 | * Now we do "generic changes." |
@@ -950,10 +1042,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) | |||
950 | * Clear/Set all flags overriden by options, need do it | 1042 | * Clear/Set all flags overriden by options, need do it |
951 | * before following smp all cpus cap AND. | 1043 | * before following smp all cpus cap AND. |
952 | */ | 1044 | */ |
953 | for (i = 0; i < NCAPINTS; i++) { | 1045 | apply_forced_caps(c); |
954 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | ||
955 | c->x86_capability[i] |= cpu_caps_set[i]; | ||
956 | } | ||
957 | 1046 | ||
958 | /* | 1047 | /* |
959 | * On SMP, boot_cpu_data holds the common feature set between | 1048 | * On SMP, boot_cpu_data holds the common feature set between |
@@ -1173,7 +1262,7 @@ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | |||
1173 | [DEBUG_STACK - 1] = DEBUG_STKSZ | 1262 | [DEBUG_STACK - 1] = DEBUG_STKSZ |
1174 | }; | 1263 | }; |
1175 | 1264 | ||
1176 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | 1265 | DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(char, exception_stacks |
1177 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); | 1266 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); |
1178 | 1267 | ||
1179 | /* May not be marked __init: used by software suspend */ | 1268 | /* May not be marked __init: used by software suspend */ |
@@ -1336,6 +1425,14 @@ void cpu_init(void) | |||
1336 | * try to read it. | 1425 | * try to read it. |
1337 | */ | 1426 | */ |
1338 | cr4_init_shadow(); | 1427 | cr4_init_shadow(); |
1428 | if (!kaiser_enabled) { | ||
1429 | /* | ||
1430 | * secondary_startup_64() deferred setting PGE in cr4: | ||
1431 | * probe_page_size_mask() sets it on the boot cpu, | ||
1432 | * but it needs to be set on each secondary cpu. | ||
1433 | */ | ||
1434 | cr4_set_bits(X86_CR4_PGE); | ||
1435 | } | ||
1339 | 1436 | ||
1340 | /* | 1437 | /* |
1341 | * Load microcode on this cpu if a valid microcode is available. | 1438 | * Load microcode on this cpu if a valid microcode is available. |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index e38d338a6447..b4ca91cf55b0 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu) | |||
934 | ci_leaf_init(this_leaf++, &id4_regs); | 934 | ci_leaf_init(this_leaf++, &id4_regs); |
935 | __cache_cpumap_setup(cpu, idx, &id4_regs); | 935 | __cache_cpumap_setup(cpu, idx, &id4_regs); |
936 | } | 936 | } |
937 | this_cpu_ci->cpu_map_populated = true; | ||
938 | |||
937 | return 0; | 939 | return 0; |
938 | } | 940 | } |
939 | 941 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7e8a736d09db..364fbad72e60 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1672,6 +1672,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code) | |||
1672 | void (*machine_check_vector)(struct pt_regs *, long error_code) = | 1672 | void (*machine_check_vector)(struct pt_regs *, long error_code) = |
1673 | unexpected_machine_check; | 1673 | unexpected_machine_check; |
1674 | 1674 | ||
1675 | dotraplinkage void do_mce(struct pt_regs *regs, long error_code) | ||
1676 | { | ||
1677 | machine_check_vector(regs, error_code); | ||
1678 | } | ||
1679 | |||
1675 | /* | 1680 | /* |
1676 | * Called for each booted CPU to set up machine checks. | 1681 | * Called for each booted CPU to set up machine checks. |
1677 | * Must be called with preempt off: | 1682 | * Must be called with preempt off: |
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 2233f8a76615..2a0f44d225fe 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -580,6 +580,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, | |||
580 | #define F14H_MPB_MAX_SIZE 1824 | 580 | #define F14H_MPB_MAX_SIZE 1824 |
581 | #define F15H_MPB_MAX_SIZE 4096 | 581 | #define F15H_MPB_MAX_SIZE 4096 |
582 | #define F16H_MPB_MAX_SIZE 3458 | 582 | #define F16H_MPB_MAX_SIZE 3458 |
583 | #define F17H_MPB_MAX_SIZE 3200 | ||
583 | 584 | ||
584 | switch (family) { | 585 | switch (family) { |
585 | case 0x14: | 586 | case 0x14: |
@@ -591,6 +592,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, | |||
591 | case 0x16: | 592 | case 0x16: |
592 | max_size = F16H_MPB_MAX_SIZE; | 593 | max_size = F16H_MPB_MAX_SIZE; |
593 | break; | 594 | break; |
595 | case 0x17: | ||
596 | max_size = F17H_MPB_MAX_SIZE; | ||
597 | break; | ||
594 | default: | 598 | default: |
595 | max_size = F1XH_MPB_MAX_SIZE; | 599 | max_size = F1XH_MPB_MAX_SIZE; |
596 | break; | 600 | break; |
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b3e94ef461fd..ce5f8a2e7ae6 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
@@ -44,7 +44,7 @@ | |||
44 | 44 | ||
45 | static struct microcode_ops *microcode_ops; | 45 | static struct microcode_ops *microcode_ops; |
46 | 46 | ||
47 | static bool dis_ucode_ldr; | 47 | static bool dis_ucode_ldr = true; |
48 | 48 | ||
49 | static int __init disable_loader(char *str) | 49 | static int __init disable_loader(char *str) |
50 | { | 50 | { |
@@ -81,6 +81,7 @@ struct cpu_info_ctx { | |||
81 | 81 | ||
82 | static bool __init check_loader_disabled_bsp(void) | 82 | static bool __init check_loader_disabled_bsp(void) |
83 | { | 83 | { |
84 | u32 a, b, c, d; | ||
84 | #ifdef CONFIG_X86_32 | 85 | #ifdef CONFIG_X86_32 |
85 | const char *cmdline = (const char *)__pa_nodebug(boot_command_line); | 86 | const char *cmdline = (const char *)__pa_nodebug(boot_command_line); |
86 | const char *opt = "dis_ucode_ldr"; | 87 | const char *opt = "dis_ucode_ldr"; |
@@ -93,8 +94,20 @@ static bool __init check_loader_disabled_bsp(void) | |||
93 | bool *res = &dis_ucode_ldr; | 94 | bool *res = &dis_ucode_ldr; |
94 | #endif | 95 | #endif |
95 | 96 | ||
96 | if (cmdline_find_option_bool(cmdline, option)) | 97 | a = 1; |
97 | *res = true; | 98 | c = 0; |
99 | native_cpuid(&a, &b, &c, &d); | ||
100 | |||
101 | /* | ||
102 | * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not | ||
103 | * completely accurate as xen pv guests don't see that CPUID bit set but | ||
104 | * that's good enough as they don't land on the BSP path anyway. | ||
105 | */ | ||
106 | if (c & BIT(31)) | ||
107 | return *res; | ||
108 | |||
109 | if (cmdline_find_option_bool(cmdline, option) <= 0) | ||
110 | *res = false; | ||
98 | 111 | ||
99 | return *res; | 112 | return *res; |
100 | } | 113 | } |
@@ -122,9 +135,7 @@ void __init load_ucode_bsp(void) | |||
122 | { | 135 | { |
123 | int vendor; | 136 | int vendor; |
124 | unsigned int family; | 137 | unsigned int family; |
125 | 138 | bool intel = true; | |
126 | if (check_loader_disabled_bsp()) | ||
127 | return; | ||
128 | 139 | ||
129 | if (!have_cpuid_p()) | 140 | if (!have_cpuid_p()) |
130 | return; | 141 | return; |
@@ -134,16 +145,27 @@ void __init load_ucode_bsp(void) | |||
134 | 145 | ||
135 | switch (vendor) { | 146 | switch (vendor) { |
136 | case X86_VENDOR_INTEL: | 147 | case X86_VENDOR_INTEL: |
137 | if (family >= 6) | 148 | if (family < 6) |
138 | load_ucode_intel_bsp(); | 149 | return; |
139 | break; | 150 | break; |
151 | |||
140 | case X86_VENDOR_AMD: | 152 | case X86_VENDOR_AMD: |
141 | if (family >= 0x10) | 153 | if (family < 0x10) |
142 | load_ucode_amd_bsp(family); | 154 | return; |
155 | intel = false; | ||
143 | break; | 156 | break; |
157 | |||
144 | default: | 158 | default: |
145 | break; | 159 | return; |
146 | } | 160 | } |
161 | |||
162 | if (check_loader_disabled_bsp()) | ||
163 | return; | ||
164 | |||
165 | if (intel) | ||
166 | load_ucode_intel_bsp(); | ||
167 | else | ||
168 | load_ucode_amd_bsp(family); | ||
147 | } | 169 | } |
148 | 170 | ||
149 | static bool check_loader_disabled_ap(void) | 171 | static bool check_loader_disabled_ap(void) |
@@ -162,9 +184,6 @@ void load_ucode_ap(void) | |||
162 | if (check_loader_disabled_ap()) | 184 | if (check_loader_disabled_ap()) |
163 | return; | 185 | return; |
164 | 186 | ||
165 | if (!have_cpuid_p()) | ||
166 | return; | ||
167 | |||
168 | vendor = x86_vendor(); | 187 | vendor = x86_vendor(); |
169 | family = x86_family(); | 188 | family = x86_family(); |
170 | 189 | ||
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index abf581ade8d2..2f38a99cdb98 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -39,6 +39,9 @@ | |||
39 | #include <asm/setup.h> | 39 | #include <asm/setup.h> |
40 | #include <asm/msr.h> | 40 | #include <asm/msr.h> |
41 | 41 | ||
42 | /* last level cache size per core */ | ||
43 | static int llc_size_per_core; | ||
44 | |||
42 | static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT]; | 45 | static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT]; |
43 | static struct mc_saved_data { | 46 | static struct mc_saved_data { |
44 | unsigned int mc_saved_count; | 47 | unsigned int mc_saved_count; |
@@ -994,8 +997,19 @@ static bool is_blacklisted(unsigned int cpu) | |||
994 | { | 997 | { |
995 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 998 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
996 | 999 | ||
997 | if (c->x86 == 6 && c->x86_model == 79) { | 1000 | /* |
998 | pr_err_once("late loading on model 79 is disabled.\n"); | 1001 | * Late loading on model 79 with microcode revision less than 0x0b000021 |
1002 | * and LLC size per core bigger than 2.5MB may result in a system hang. | ||
1003 | * This behavior is documented in item BDF90, #334165 (Intel Xeon | ||
1004 | * Processor E7-8800/4800 v4 Product Family). | ||
1005 | */ | ||
1006 | if (c->x86 == 6 && | ||
1007 | c->x86_model == 79 && | ||
1008 | c->x86_mask == 0x01 && | ||
1009 | llc_size_per_core > 2621440 && | ||
1010 | c->microcode < 0x0b000021) { | ||
1011 | pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); | ||
1012 | pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); | ||
999 | return true; | 1013 | return true; |
1000 | } | 1014 | } |
1001 | 1015 | ||
@@ -1059,6 +1073,15 @@ static struct microcode_ops microcode_intel_ops = { | |||
1059 | .microcode_fini_cpu = microcode_fini_cpu, | 1073 | .microcode_fini_cpu = microcode_fini_cpu, |
1060 | }; | 1074 | }; |
1061 | 1075 | ||
1076 | static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) | ||
1077 | { | ||
1078 | u64 llc_size = c->x86_cache_size * 1024ULL; | ||
1079 | |||
1080 | do_div(llc_size, c->x86_max_cores); | ||
1081 | |||
1082 | return (int)llc_size; | ||
1083 | } | ||
1084 | |||
1062 | struct microcode_ops * __init init_intel_microcode(void) | 1085 | struct microcode_ops * __init init_intel_microcode(void) |
1063 | { | 1086 | { |
1064 | struct cpuinfo_x86 *c = &boot_cpu_data; | 1087 | struct cpuinfo_x86 *c = &boot_cpu_data; |
@@ -1069,6 +1092,8 @@ struct microcode_ops * __init init_intel_microcode(void) | |||
1069 | return NULL; | 1092 | return NULL; |
1070 | } | 1093 | } |
1071 | 1094 | ||
1095 | llc_size_per_core = calc_llc_size_per_core(c); | ||
1096 | |||
1072 | return µcode_intel_ops; | 1097 | return µcode_intel_ops; |
1073 | } | 1098 | } |
1074 | 1099 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c index 2cad71d1b14c..5af11c46d0b9 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_bts.c +++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/coredump.h> | 24 | #include <linux/coredump.h> |
25 | #include <linux/kaiser.h> | ||
25 | 26 | ||
26 | #include <asm-generic/sizes.h> | 27 | #include <asm-generic/sizes.h> |
27 | #include <asm/perf_event.h> | 28 | #include <asm/perf_event.h> |
@@ -67,6 +68,23 @@ static size_t buf_size(struct page *page) | |||
67 | return 1 << (PAGE_SHIFT + page_private(page)); | 68 | return 1 << (PAGE_SHIFT + page_private(page)); |
68 | } | 69 | } |
69 | 70 | ||
71 | static void bts_buffer_free_aux(void *data) | ||
72 | { | ||
73 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
74 | struct bts_buffer *buf = data; | ||
75 | int nbuf; | ||
76 | |||
77 | for (nbuf = 0; nbuf < buf->nr_bufs; nbuf++) { | ||
78 | struct page *page = buf->buf[nbuf].page; | ||
79 | void *kaddr = page_address(page); | ||
80 | size_t page_size = buf_size(page); | ||
81 | |||
82 | kaiser_remove_mapping((unsigned long)kaddr, page_size); | ||
83 | } | ||
84 | #endif | ||
85 | kfree(data); | ||
86 | } | ||
87 | |||
70 | static void * | 88 | static void * |
71 | bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) | 89 | bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) |
72 | { | 90 | { |
@@ -103,29 +121,33 @@ bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) | |||
103 | buf->real_size = size - size % BTS_RECORD_SIZE; | 121 | buf->real_size = size - size % BTS_RECORD_SIZE; |
104 | 122 | ||
105 | for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) { | 123 | for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) { |
106 | unsigned int __nr_pages; | 124 | void *kaddr = pages[pg]; |
125 | size_t page_size; | ||
126 | |||
127 | page = virt_to_page(kaddr); | ||
128 | page_size = buf_size(page); | ||
129 | |||
130 | if (kaiser_add_mapping((unsigned long)kaddr, | ||
131 | page_size, __PAGE_KERNEL) < 0) { | ||
132 | buf->nr_bufs = nbuf; | ||
133 | bts_buffer_free_aux(buf); | ||
134 | return NULL; | ||
135 | } | ||
107 | 136 | ||
108 | page = virt_to_page(pages[pg]); | ||
109 | __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1; | ||
110 | buf->buf[nbuf].page = page; | 137 | buf->buf[nbuf].page = page; |
111 | buf->buf[nbuf].offset = offset; | 138 | buf->buf[nbuf].offset = offset; |
112 | buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); | 139 | buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); |
113 | buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement; | 140 | buf->buf[nbuf].size = page_size - buf->buf[nbuf].displacement; |
114 | pad = buf->buf[nbuf].size % BTS_RECORD_SIZE; | 141 | pad = buf->buf[nbuf].size % BTS_RECORD_SIZE; |
115 | buf->buf[nbuf].size -= pad; | 142 | buf->buf[nbuf].size -= pad; |
116 | 143 | ||
117 | pg += __nr_pages; | 144 | pg += page_size >> PAGE_SHIFT; |
118 | offset += __nr_pages << PAGE_SHIFT; | 145 | offset += page_size; |
119 | } | 146 | } |
120 | 147 | ||
121 | return buf; | 148 | return buf; |
122 | } | 149 | } |
123 | 150 | ||
124 | static void bts_buffer_free_aux(void *data) | ||
125 | { | ||
126 | kfree(data); | ||
127 | } | ||
128 | |||
129 | static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx) | 151 | static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx) |
130 | { | 152 | { |
131 | return buf->buf[idx].offset + buf->buf[idx].displacement; | 153 | return buf->buf[idx].offset + buf->buf[idx].displacement; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 1e7de3cefc9c..f01b3a12dce0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -2,11 +2,15 @@ | |||
2 | #include <linux/types.h> | 2 | #include <linux/types.h> |
3 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
4 | 4 | ||
5 | #include <asm/kaiser.h> | ||
5 | #include <asm/perf_event.h> | 6 | #include <asm/perf_event.h> |
6 | #include <asm/insn.h> | 7 | #include <asm/insn.h> |
7 | 8 | ||
8 | #include "perf_event.h" | 9 | #include "perf_event.h" |
9 | 10 | ||
11 | static | ||
12 | DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct debug_store, cpu_debug_store); | ||
13 | |||
10 | /* The size of a BTS record in bytes: */ | 14 | /* The size of a BTS record in bytes: */ |
11 | #define BTS_RECORD_SIZE 24 | 15 | #define BTS_RECORD_SIZE 24 |
12 | 16 | ||
@@ -268,6 +272,39 @@ void fini_debug_store_on_cpu(int cpu) | |||
268 | 272 | ||
269 | static DEFINE_PER_CPU(void *, insn_buffer); | 273 | static DEFINE_PER_CPU(void *, insn_buffer); |
270 | 274 | ||
275 | static void *dsalloc(size_t size, gfp_t flags, int node) | ||
276 | { | ||
277 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
278 | unsigned int order = get_order(size); | ||
279 | struct page *page; | ||
280 | unsigned long addr; | ||
281 | |||
282 | page = __alloc_pages_node(node, flags | __GFP_ZERO, order); | ||
283 | if (!page) | ||
284 | return NULL; | ||
285 | addr = (unsigned long)page_address(page); | ||
286 | if (kaiser_add_mapping(addr, size, __PAGE_KERNEL) < 0) { | ||
287 | __free_pages(page, order); | ||
288 | addr = 0; | ||
289 | } | ||
290 | return (void *)addr; | ||
291 | #else | ||
292 | return kmalloc_node(size, flags | __GFP_ZERO, node); | ||
293 | #endif | ||
294 | } | ||
295 | |||
296 | static void dsfree(const void *buffer, size_t size) | ||
297 | { | ||
298 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
299 | if (!buffer) | ||
300 | return; | ||
301 | kaiser_remove_mapping((unsigned long)buffer, size); | ||
302 | free_pages((unsigned long)buffer, get_order(size)); | ||
303 | #else | ||
304 | kfree(buffer); | ||
305 | #endif | ||
306 | } | ||
307 | |||
271 | static int alloc_pebs_buffer(int cpu) | 308 | static int alloc_pebs_buffer(int cpu) |
272 | { | 309 | { |
273 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; | 310 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; |
@@ -278,7 +315,7 @@ static int alloc_pebs_buffer(int cpu) | |||
278 | if (!x86_pmu.pebs) | 315 | if (!x86_pmu.pebs) |
279 | return 0; | 316 | return 0; |
280 | 317 | ||
281 | buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node); | 318 | buffer = dsalloc(x86_pmu.pebs_buffer_size, GFP_KERNEL, node); |
282 | if (unlikely(!buffer)) | 319 | if (unlikely(!buffer)) |
283 | return -ENOMEM; | 320 | return -ENOMEM; |
284 | 321 | ||
@@ -289,7 +326,7 @@ static int alloc_pebs_buffer(int cpu) | |||
289 | if (x86_pmu.intel_cap.pebs_format < 2) { | 326 | if (x86_pmu.intel_cap.pebs_format < 2) { |
290 | ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); | 327 | ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); |
291 | if (!ibuffer) { | 328 | if (!ibuffer) { |
292 | kfree(buffer); | 329 | dsfree(buffer, x86_pmu.pebs_buffer_size); |
293 | return -ENOMEM; | 330 | return -ENOMEM; |
294 | } | 331 | } |
295 | per_cpu(insn_buffer, cpu) = ibuffer; | 332 | per_cpu(insn_buffer, cpu) = ibuffer; |
@@ -315,7 +352,8 @@ static void release_pebs_buffer(int cpu) | |||
315 | kfree(per_cpu(insn_buffer, cpu)); | 352 | kfree(per_cpu(insn_buffer, cpu)); |
316 | per_cpu(insn_buffer, cpu) = NULL; | 353 | per_cpu(insn_buffer, cpu) = NULL; |
317 | 354 | ||
318 | kfree((void *)(unsigned long)ds->pebs_buffer_base); | 355 | dsfree((void *)(unsigned long)ds->pebs_buffer_base, |
356 | x86_pmu.pebs_buffer_size); | ||
319 | ds->pebs_buffer_base = 0; | 357 | ds->pebs_buffer_base = 0; |
320 | } | 358 | } |
321 | 359 | ||
@@ -329,7 +367,7 @@ static int alloc_bts_buffer(int cpu) | |||
329 | if (!x86_pmu.bts) | 367 | if (!x86_pmu.bts) |
330 | return 0; | 368 | return 0; |
331 | 369 | ||
332 | buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node); | 370 | buffer = dsalloc(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node); |
333 | if (unlikely(!buffer)) { | 371 | if (unlikely(!buffer)) { |
334 | WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); | 372 | WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); |
335 | return -ENOMEM; | 373 | return -ENOMEM; |
@@ -355,19 +393,15 @@ static void release_bts_buffer(int cpu) | |||
355 | if (!ds || !x86_pmu.bts) | 393 | if (!ds || !x86_pmu.bts) |
356 | return; | 394 | return; |
357 | 395 | ||
358 | kfree((void *)(unsigned long)ds->bts_buffer_base); | 396 | dsfree((void *)(unsigned long)ds->bts_buffer_base, BTS_BUFFER_SIZE); |
359 | ds->bts_buffer_base = 0; | 397 | ds->bts_buffer_base = 0; |
360 | } | 398 | } |
361 | 399 | ||
362 | static int alloc_ds_buffer(int cpu) | 400 | static int alloc_ds_buffer(int cpu) |
363 | { | 401 | { |
364 | int node = cpu_to_node(cpu); | 402 | struct debug_store *ds = per_cpu_ptr(&cpu_debug_store, cpu); |
365 | struct debug_store *ds; | ||
366 | |||
367 | ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node); | ||
368 | if (unlikely(!ds)) | ||
369 | return -ENOMEM; | ||
370 | 403 | ||
404 | memset(ds, 0, sizeof(*ds)); | ||
371 | per_cpu(cpu_hw_events, cpu).ds = ds; | 405 | per_cpu(cpu_hw_events, cpu).ds = ds; |
372 | 406 | ||
373 | return 0; | 407 | return 0; |
@@ -381,7 +415,6 @@ static void release_ds_buffer(int cpu) | |||
381 | return; | 415 | return; |
382 | 416 | ||
383 | per_cpu(cpu_hw_events, cpu).ds = NULL; | 417 | per_cpu(cpu_hw_events, cpu).ds = NULL; |
384 | kfree(ds); | ||
385 | } | 418 | } |
386 | 419 | ||
387 | void release_ds_buffers(void) | 420 | void release_ds_buffers(void) |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 18ca99f2798b..935225c0375f 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -87,8 +87,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
87 | } | 87 | } |
88 | 88 | ||
89 | /* Cache size */ | 89 | /* Cache size */ |
90 | if (c->x86_cache_size >= 0) | 90 | if (c->x86_cache_size) |
91 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); | 91 | seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); |
92 | 92 | ||
93 | show_cpuinfo_core(m, c, cpu); | 93 | show_cpuinfo_core(m, c, cpu); |
94 | show_cpuinfo_misc(m, c); | 94 | show_cpuinfo_misc(m, c); |
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 4d38416e2a7f..b02cb2ec6726 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/pgalloc.h> | 41 | #include <asm/pgalloc.h> |
42 | #include <asm/setup.h> | 42 | #include <asm/setup.h> |
43 | #include <asm/espfix.h> | 43 | #include <asm/espfix.h> |
44 | #include <asm/kaiser.h> | ||
44 | 45 | ||
45 | /* | 46 | /* |
46 | * Note: we only need 6*8 = 48 bytes for the espfix stack, but round | 47 | * Note: we only need 6*8 = 48 bytes for the espfix stack, but round |
@@ -126,6 +127,15 @@ void __init init_espfix_bsp(void) | |||
126 | /* Install the espfix pud into the kernel page directory */ | 127 | /* Install the espfix pud into the kernel page directory */ |
127 | pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)]; | 128 | pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)]; |
128 | pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page); | 129 | pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page); |
130 | /* | ||
131 | * Just copy the top-level PGD that is mapping the espfix | ||
132 | * area to ensure it is mapped into the shadow user page | ||
133 | * tables. | ||
134 | */ | ||
135 | if (kaiser_enabled) { | ||
136 | set_pgd(native_get_shadow_pgd(pgd_p), | ||
137 | __pgd(_KERNPG_TABLE | __pa((pud_t *)espfix_pud_page))); | ||
138 | } | ||
129 | 139 | ||
130 | /* Randomize the locations */ | 140 | /* Randomize the locations */ |
131 | init_espfix_random(); | 141 | init_espfix_random(); |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index ffdc0e860390..4034e905741a 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -183,8 +183,8 @@ ENTRY(secondary_startup_64) | |||
183 | movq $(init_level4_pgt - __START_KERNEL_map), %rax | 183 | movq $(init_level4_pgt - __START_KERNEL_map), %rax |
184 | 1: | 184 | 1: |
185 | 185 | ||
186 | /* Enable PAE mode and PGE */ | 186 | /* Enable PAE and PSE, but defer PGE until kaiser_enabled is decided */ |
187 | movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx | 187 | movl $(X86_CR4_PAE | X86_CR4_PSE), %ecx |
188 | movq %rcx, %cr4 | 188 | movq %rcx, %cr4 |
189 | 189 | ||
190 | /* Setup early boot stage 4 level pagetables. */ | 190 | /* Setup early boot stage 4 level pagetables. */ |
@@ -441,6 +441,27 @@ early_idt_ripmsg: | |||
441 | .balign PAGE_SIZE; \ | 441 | .balign PAGE_SIZE; \ |
442 | GLOBAL(name) | 442 | GLOBAL(name) |
443 | 443 | ||
444 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
445 | /* | ||
446 | * Each PGD needs to be 8k long and 8k aligned. We do not | ||
447 | * ever go out to userspace with these, so we do not | ||
448 | * strictly *need* the second page, but this allows us to | ||
449 | * have a single set_pgd() implementation that does not | ||
450 | * need to worry about whether it has 4k or 8k to work | ||
451 | * with. | ||
452 | * | ||
453 | * This ensures PGDs are 8k long: | ||
454 | */ | ||
455 | #define KAISER_USER_PGD_FILL 512 | ||
456 | /* This ensures they are 8k-aligned: */ | ||
457 | #define NEXT_PGD_PAGE(name) \ | ||
458 | .balign 2 * PAGE_SIZE; \ | ||
459 | GLOBAL(name) | ||
460 | #else | ||
461 | #define NEXT_PGD_PAGE(name) NEXT_PAGE(name) | ||
462 | #define KAISER_USER_PGD_FILL 0 | ||
463 | #endif | ||
464 | |||
444 | /* Automate the creation of 1 to 1 mapping pmd entries */ | 465 | /* Automate the creation of 1 to 1 mapping pmd entries */ |
445 | #define PMDS(START, PERM, COUNT) \ | 466 | #define PMDS(START, PERM, COUNT) \ |
446 | i = 0 ; \ | 467 | i = 0 ; \ |
@@ -450,9 +471,10 @@ GLOBAL(name) | |||
450 | .endr | 471 | .endr |
451 | 472 | ||
452 | __INITDATA | 473 | __INITDATA |
453 | NEXT_PAGE(early_level4_pgt) | 474 | NEXT_PGD_PAGE(early_level4_pgt) |
454 | .fill 511,8,0 | 475 | .fill 511,8,0 |
455 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE | 476 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE |
477 | .fill KAISER_USER_PGD_FILL,8,0 | ||
456 | 478 | ||
457 | NEXT_PAGE(early_dynamic_pgts) | 479 | NEXT_PAGE(early_dynamic_pgts) |
458 | .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 | 480 | .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 |
@@ -460,16 +482,18 @@ NEXT_PAGE(early_dynamic_pgts) | |||
460 | .data | 482 | .data |
461 | 483 | ||
462 | #ifndef CONFIG_XEN | 484 | #ifndef CONFIG_XEN |
463 | NEXT_PAGE(init_level4_pgt) | 485 | NEXT_PGD_PAGE(init_level4_pgt) |
464 | .fill 512,8,0 | 486 | .fill 512,8,0 |
487 | .fill KAISER_USER_PGD_FILL,8,0 | ||
465 | #else | 488 | #else |
466 | NEXT_PAGE(init_level4_pgt) | 489 | NEXT_PGD_PAGE(init_level4_pgt) |
467 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE | 490 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
468 | .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 | 491 | .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 |
469 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE | 492 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
470 | .org init_level4_pgt + L4_START_KERNEL*8, 0 | 493 | .org init_level4_pgt + L4_START_KERNEL*8, 0 |
471 | /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ | 494 | /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ |
472 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE | 495 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE |
496 | .fill KAISER_USER_PGD_FILL,8,0 | ||
473 | 497 | ||
474 | NEXT_PAGE(level3_ident_pgt) | 498 | NEXT_PAGE(level3_ident_pgt) |
475 | .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE | 499 | .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
@@ -480,6 +504,7 @@ NEXT_PAGE(level2_ident_pgt) | |||
480 | */ | 504 | */ |
481 | PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) | 505 | PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) |
482 | #endif | 506 | #endif |
507 | .fill KAISER_USER_PGD_FILL,8,0 | ||
483 | 508 | ||
484 | NEXT_PAGE(level3_kernel_pgt) | 509 | NEXT_PAGE(level3_kernel_pgt) |
485 | .fill L3_START_KERNEL,8,0 | 510 | .fill L3_START_KERNEL,8,0 |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 61521dc19c10..9f669fdd2010 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -102,8 +102,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) | |||
102 | seq_puts(p, " Rescheduling interrupts\n"); | 102 | seq_puts(p, " Rescheduling interrupts\n"); |
103 | seq_printf(p, "%*s: ", prec, "CAL"); | 103 | seq_printf(p, "%*s: ", prec, "CAL"); |
104 | for_each_online_cpu(j) | 104 | for_each_online_cpu(j) |
105 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count - | 105 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); |
106 | irq_stats(j)->irq_tlb_count); | ||
107 | seq_puts(p, " Function call interrupts\n"); | 106 | seq_puts(p, " Function call interrupts\n"); |
108 | seq_printf(p, "%*s: ", prec, "TLB"); | 107 | seq_printf(p, "%*s: ", prec, "TLB"); |
109 | for_each_online_cpu(j) | 108 | for_each_online_cpu(j) |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 38da8f29a9c8..528b7aa1780d 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | 21 | ||
22 | #include <asm/apic.h> | 22 | #include <asm/apic.h> |
23 | #include <asm/nospec-branch.h> | ||
23 | 24 | ||
24 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 25 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
25 | 26 | ||
@@ -55,17 +56,17 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack); | |||
55 | static void call_on_stack(void *func, void *stack) | 56 | static void call_on_stack(void *func, void *stack) |
56 | { | 57 | { |
57 | asm volatile("xchgl %%ebx,%%esp \n" | 58 | asm volatile("xchgl %%ebx,%%esp \n" |
58 | "call *%%edi \n" | 59 | CALL_NOSPEC |
59 | "movl %%ebx,%%esp \n" | 60 | "movl %%ebx,%%esp \n" |
60 | : "=b" (stack) | 61 | : "=b" (stack) |
61 | : "0" (stack), | 62 | : "0" (stack), |
62 | "D"(func) | 63 | [thunk_target] "D"(func) |
63 | : "memory", "cc", "edx", "ecx", "eax"); | 64 | : "memory", "cc", "edx", "ecx", "eax"); |
64 | } | 65 | } |
65 | 66 | ||
66 | static inline void *current_stack(void) | 67 | static inline void *current_stack(void) |
67 | { | 68 | { |
68 | return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); | 69 | return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); |
69 | } | 70 | } |
70 | 71 | ||
71 | static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) | 72 | static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) |
@@ -89,17 +90,17 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) | |||
89 | 90 | ||
90 | /* Save the next esp at the bottom of the stack */ | 91 | /* Save the next esp at the bottom of the stack */ |
91 | prev_esp = (u32 *)irqstk; | 92 | prev_esp = (u32 *)irqstk; |
92 | *prev_esp = current_stack_pointer(); | 93 | *prev_esp = current_stack_pointer; |
93 | 94 | ||
94 | if (unlikely(overflow)) | 95 | if (unlikely(overflow)) |
95 | call_on_stack(print_stack_overflow, isp); | 96 | call_on_stack(print_stack_overflow, isp); |
96 | 97 | ||
97 | asm volatile("xchgl %%ebx,%%esp \n" | 98 | asm volatile("xchgl %%ebx,%%esp \n" |
98 | "call *%%edi \n" | 99 | CALL_NOSPEC |
99 | "movl %%ebx,%%esp \n" | 100 | "movl %%ebx,%%esp \n" |
100 | : "=a" (arg1), "=b" (isp) | 101 | : "=a" (arg1), "=b" (isp) |
101 | : "0" (desc), "1" (isp), | 102 | : "0" (desc), "1" (isp), |
102 | "D" (desc->handle_irq) | 103 | [thunk_target] "D" (desc->handle_irq) |
103 | : "memory", "cc", "ecx"); | 104 | : "memory", "cc", "ecx"); |
104 | return 1; | 105 | return 1; |
105 | } | 106 | } |
@@ -142,7 +143,7 @@ void do_softirq_own_stack(void) | |||
142 | 143 | ||
143 | /* Push the previous esp onto the stack */ | 144 | /* Push the previous esp onto the stack */ |
144 | prev_esp = (u32 *)irqstk; | 145 | prev_esp = (u32 *)irqstk; |
145 | *prev_esp = current_stack_pointer(); | 146 | *prev_esp = current_stack_pointer; |
146 | 147 | ||
147 | call_on_stack(__do_softirq, isp); | 148 | call_on_stack(__do_softirq, isp); |
148 | } | 149 | } |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 1423ab1b0312..f480b38a03c3 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -51,7 +51,7 @@ static struct irqaction irq2 = { | |||
51 | .flags = IRQF_NO_THREAD, | 51 | .flags = IRQF_NO_THREAD, |
52 | }; | 52 | }; |
53 | 53 | ||
54 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | 54 | DEFINE_PER_CPU_USER_MAPPED(vector_irq_t, vector_irq) = { |
55 | [0 ... NR_VECTORS - 1] = VECTOR_UNUSED, | 55 | [0 ... NR_VECTORS - 1] = VECTOR_UNUSED, |
56 | }; | 56 | }; |
57 | 57 | ||
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index c9d488f3e4cd..ea8e2b846101 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <asm/alternative.h> | 36 | #include <asm/alternative.h> |
37 | #include <asm/insn.h> | 37 | #include <asm/insn.h> |
38 | #include <asm/debugreg.h> | 38 | #include <asm/debugreg.h> |
39 | #include <asm/nospec-branch.h> | ||
39 | 40 | ||
40 | #include "common.h" | 41 | #include "common.h" |
41 | 42 | ||
@@ -191,7 +192,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src) | |||
191 | } | 192 | } |
192 | 193 | ||
193 | /* Check whether insn is indirect jump */ | 194 | /* Check whether insn is indirect jump */ |
194 | static int insn_is_indirect_jump(struct insn *insn) | 195 | static int __insn_is_indirect_jump(struct insn *insn) |
195 | { | 196 | { |
196 | return ((insn->opcode.bytes[0] == 0xff && | 197 | return ((insn->opcode.bytes[0] == 0xff && |
197 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ | 198 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ |
@@ -225,6 +226,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) | |||
225 | return (start <= target && target <= start + len); | 226 | return (start <= target && target <= start + len); |
226 | } | 227 | } |
227 | 228 | ||
229 | static int insn_is_indirect_jump(struct insn *insn) | ||
230 | { | ||
231 | int ret = __insn_is_indirect_jump(insn); | ||
232 | |||
233 | #ifdef CONFIG_RETPOLINE | ||
234 | /* | ||
235 | * Jump to x86_indirect_thunk_* is treated as an indirect jump. | ||
236 | * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with | ||
237 | * older gcc may use indirect jump. So we add this check instead of | ||
238 | * replace indirect-jump check. | ||
239 | */ | ||
240 | if (!ret) | ||
241 | ret = insn_jump_into_range(insn, | ||
242 | (unsigned long)__indirect_thunk_start, | ||
243 | (unsigned long)__indirect_thunk_end - | ||
244 | (unsigned long)__indirect_thunk_start); | ||
245 | #endif | ||
246 | return ret; | ||
247 | } | ||
248 | |||
228 | /* Decode whole function to ensure any instructions don't jump into target */ | 249 | /* Decode whole function to ensure any instructions don't jump into target */ |
229 | static int can_optimize(unsigned long paddr) | 250 | static int can_optimize(unsigned long paddr) |
230 | { | 251 | { |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 2bd81e302427..ec1b06dc82d2 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -45,6 +45,11 @@ early_param("no-kvmclock", parse_no_kvmclock); | |||
45 | static struct pvclock_vsyscall_time_info *hv_clock; | 45 | static struct pvclock_vsyscall_time_info *hv_clock; |
46 | static struct pvclock_wall_clock wall_clock; | 46 | static struct pvclock_wall_clock wall_clock; |
47 | 47 | ||
48 | struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void) | ||
49 | { | ||
50 | return hv_clock; | ||
51 | } | ||
52 | |||
48 | /* | 53 | /* |
49 | * The wallclock is the time of day when we booted. Since then, some time may | 54 | * The wallclock is the time of day when we booted. Since then, some time may |
50 | * have elapsed since the hypervisor wrote the data. So we try to account for | 55 | * have elapsed since the hypervisor wrote the data. So we try to account for |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index d6279593bcdd..bc429365b72a 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/vmalloc.h> | 17 | #include <linux/vmalloc.h> |
18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
19 | #include <linux/kaiser.h> | ||
19 | 20 | ||
20 | #include <asm/ldt.h> | 21 | #include <asm/ldt.h> |
21 | #include <asm/desc.h> | 22 | #include <asm/desc.h> |
@@ -34,11 +35,21 @@ static void flush_ldt(void *current_mm) | |||
34 | set_ldt(pc->ldt->entries, pc->ldt->size); | 35 | set_ldt(pc->ldt->entries, pc->ldt->size); |
35 | } | 36 | } |
36 | 37 | ||
38 | static void __free_ldt_struct(struct ldt_struct *ldt) | ||
39 | { | ||
40 | if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) | ||
41 | vfree(ldt->entries); | ||
42 | else | ||
43 | free_page((unsigned long)ldt->entries); | ||
44 | kfree(ldt); | ||
45 | } | ||
46 | |||
37 | /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ | 47 | /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ |
38 | static struct ldt_struct *alloc_ldt_struct(int size) | 48 | static struct ldt_struct *alloc_ldt_struct(int size) |
39 | { | 49 | { |
40 | struct ldt_struct *new_ldt; | 50 | struct ldt_struct *new_ldt; |
41 | int alloc_size; | 51 | int alloc_size; |
52 | int ret; | ||
42 | 53 | ||
43 | if (size > LDT_ENTRIES) | 54 | if (size > LDT_ENTRIES) |
44 | return NULL; | 55 | return NULL; |
@@ -66,7 +77,13 @@ static struct ldt_struct *alloc_ldt_struct(int size) | |||
66 | return NULL; | 77 | return NULL; |
67 | } | 78 | } |
68 | 79 | ||
80 | ret = kaiser_add_mapping((unsigned long)new_ldt->entries, alloc_size, | ||
81 | __PAGE_KERNEL); | ||
69 | new_ldt->size = size; | 82 | new_ldt->size = size; |
83 | if (ret) { | ||
84 | __free_ldt_struct(new_ldt); | ||
85 | return NULL; | ||
86 | } | ||
70 | return new_ldt; | 87 | return new_ldt; |
71 | } | 88 | } |
72 | 89 | ||
@@ -92,12 +109,10 @@ static void free_ldt_struct(struct ldt_struct *ldt) | |||
92 | if (likely(!ldt)) | 109 | if (likely(!ldt)) |
93 | return; | 110 | return; |
94 | 111 | ||
112 | kaiser_remove_mapping((unsigned long)ldt->entries, | ||
113 | ldt->size * LDT_ENTRY_SIZE); | ||
95 | paravirt_free_ldt(ldt->entries, ldt->size); | 114 | paravirt_free_ldt(ldt->entries, ldt->size); |
96 | if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) | 115 | __free_ldt_struct(ldt); |
97 | vfree(ldt->entries); | ||
98 | else | ||
99 | free_page((unsigned long)ldt->entries); | ||
100 | kfree(ldt); | ||
101 | } | 116 | } |
102 | 117 | ||
103 | /* | 118 | /* |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 819ab3f9c9c7..64979821bc2e 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -536,3 +536,48 @@ overflow: | |||
536 | return -ENOEXEC; | 536 | return -ENOEXEC; |
537 | } | 537 | } |
538 | #endif /* CONFIG_KEXEC_FILE */ | 538 | #endif /* CONFIG_KEXEC_FILE */ |
539 | |||
540 | static int | ||
541 | kexec_mark_range(unsigned long start, unsigned long end, bool protect) | ||
542 | { | ||
543 | struct page *page; | ||
544 | unsigned int nr_pages; | ||
545 | |||
546 | /* | ||
547 | * For physical range: [start, end]. We must skip the unassigned | ||
548 | * crashk resource with zero-valued "end" member. | ||
549 | */ | ||
550 | if (!end || start > end) | ||
551 | return 0; | ||
552 | |||
553 | page = pfn_to_page(start >> PAGE_SHIFT); | ||
554 | nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; | ||
555 | if (protect) | ||
556 | return set_pages_ro(page, nr_pages); | ||
557 | else | ||
558 | return set_pages_rw(page, nr_pages); | ||
559 | } | ||
560 | |||
561 | static void kexec_mark_crashkres(bool protect) | ||
562 | { | ||
563 | unsigned long control; | ||
564 | |||
565 | kexec_mark_range(crashk_low_res.start, crashk_low_res.end, protect); | ||
566 | |||
567 | /* Don't touch the control code page used in crash_kexec().*/ | ||
568 | control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page)); | ||
569 | /* Control code page is located in the 2nd page. */ | ||
570 | kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect); | ||
571 | control += KEXEC_CONTROL_PAGE_SIZE; | ||
572 | kexec_mark_range(control, crashk_res.end, protect); | ||
573 | } | ||
574 | |||
575 | void arch_kexec_protect_crashkres(void) | ||
576 | { | ||
577 | kexec_mark_crashkres(true); | ||
578 | } | ||
579 | |||
580 | void arch_kexec_unprotect_crashkres(void) | ||
581 | { | ||
582 | kexec_mark_crashkres(false); | ||
583 | } | ||
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index 5d9afbcb6074..09284cfab86f 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/linkage.h> | 7 | #include <linux/linkage.h> |
8 | #include <asm/ptrace.h> | 8 | #include <asm/ptrace.h> |
9 | #include <asm/ftrace.h> | 9 | #include <asm/ftrace.h> |
10 | 10 | #include <asm/nospec-branch.h> | |
11 | 11 | ||
12 | .code64 | 12 | .code64 |
13 | .section .entry.text, "ax" | 13 | .section .entry.text, "ax" |
@@ -285,8 +285,9 @@ trace: | |||
285 | * ip and parent ip are used and the list function is called when | 285 | * ip and parent ip are used and the list function is called when |
286 | * function tracing is enabled. | 286 | * function tracing is enabled. |
287 | */ | 287 | */ |
288 | call *ftrace_trace_function | ||
289 | 288 | ||
289 | movq ftrace_trace_function, %r8 | ||
290 | CALL_NOSPEC %r8 | ||
290 | restore_mcount_regs | 291 | restore_mcount_regs |
291 | 292 | ||
292 | jmp fgraph_trace | 293 | jmp fgraph_trace |
@@ -329,5 +330,5 @@ GLOBAL(return_to_handler) | |||
329 | movq 8(%rsp), %rdx | 330 | movq 8(%rsp), %rdx |
330 | movq (%rsp), %rax | 331 | movq (%rsp), %rax |
331 | addq $24, %rsp | 332 | addq $24, %rsp |
332 | jmp *%rdi | 333 | JMP_NOSPEC %rdi |
333 | #endif | 334 | #endif |
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 8aa05583bc42..0677bf8d3a42 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c | |||
@@ -9,7 +9,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); | |||
9 | DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); | 9 | DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); |
10 | DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); | 10 | DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); |
11 | DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); | 11 | DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); |
12 | DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); | ||
13 | DEF_NATIVE(pv_cpu_ops, clts, "clts"); | 12 | DEF_NATIVE(pv_cpu_ops, clts, "clts"); |
14 | DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); | 13 | DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); |
15 | 14 | ||
@@ -62,7 +61,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
62 | PATCH_SITE(pv_mmu_ops, read_cr3); | 61 | PATCH_SITE(pv_mmu_ops, read_cr3); |
63 | PATCH_SITE(pv_mmu_ops, write_cr3); | 62 | PATCH_SITE(pv_mmu_ops, write_cr3); |
64 | PATCH_SITE(pv_cpu_ops, clts); | 63 | PATCH_SITE(pv_cpu_ops, clts); |
65 | PATCH_SITE(pv_mmu_ops, flush_tlb_single); | ||
66 | PATCH_SITE(pv_cpu_ops, wbinvd); | 64 | PATCH_SITE(pv_cpu_ops, wbinvd); |
67 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) | 65 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) |
68 | case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): | 66 | case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 57eca132962f..c1b21d61b769 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -39,7 +39,7 @@ | |||
39 | * section. Since TSS's are completely CPU-local, we want them | 39 | * section. Since TSS's are completely CPU-local, we want them |
40 | * on exact cacheline boundaries, to eliminate cacheline ping-pong. | 40 | * on exact cacheline boundaries, to eliminate cacheline ping-pong. |
41 | */ | 41 | */ |
42 | __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { | 42 | __visible DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct tss_struct, cpu_tss) = { |
43 | .x86_tss = { | 43 | .x86_tss = { |
44 | .sp0 = TOP_OF_INIT_STACK, | 44 | .sp0 = TOP_OF_INIT_STACK, |
45 | #ifdef CONFIG_X86_32 | 45 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index f660d63f40fe..9a16932c7258 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -93,6 +93,10 @@ void __noreturn machine_real_restart(unsigned int type) | |||
93 | load_cr3(initial_page_table); | 93 | load_cr3(initial_page_table); |
94 | #else | 94 | #else |
95 | write_cr3(real_mode_header->trampoline_pgd); | 95 | write_cr3(real_mode_header->trampoline_pgd); |
96 | |||
97 | /* Exiting long mode will fail if CR4.PCIDE is set. */ | ||
98 | if (static_cpu_has(X86_FEATURE_PCID)) | ||
99 | cr4_clear_bits(X86_CR4_PCIDE); | ||
96 | #endif | 100 | #endif |
97 | 101 | ||
98 | /* Jump to the identity-mapped low memory code */ | 102 | /* Jump to the identity-mapped low memory code */ |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index e67b834279b2..bbaae4cf9e8e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -112,6 +112,7 @@ | |||
112 | #include <asm/alternative.h> | 112 | #include <asm/alternative.h> |
113 | #include <asm/prom.h> | 113 | #include <asm/prom.h> |
114 | #include <asm/microcode.h> | 114 | #include <asm/microcode.h> |
115 | #include <asm/kaiser.h> | ||
115 | 116 | ||
116 | /* | 117 | /* |
117 | * max_low_pfn_mapped: highest direct mapped pfn under 4GB | 118 | * max_low_pfn_mapped: highest direct mapped pfn under 4GB |
@@ -1016,6 +1017,12 @@ void __init setup_arch(char **cmdline_p) | |||
1016 | */ | 1017 | */ |
1017 | init_hypervisor_platform(); | 1018 | init_hypervisor_platform(); |
1018 | 1019 | ||
1020 | /* | ||
1021 | * This needs to happen right after XENPV is set on xen and | ||
1022 | * kaiser_enabled is checked below in cleanup_highmap(). | ||
1023 | */ | ||
1024 | kaiser_check_boottime_disable(); | ||
1025 | |||
1019 | x86_init.resources.probe_roms(); | 1026 | x86_init.resources.probe_roms(); |
1020 | 1027 | ||
1021 | /* after parse_early_param, so could debug it */ | 1028 | /* after parse_early_param, so could debug it */ |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index fbabe4fcc7fb..fe89f938e0f0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -104,14 +104,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) | |||
104 | spin_lock_irqsave(&rtc_lock, flags); | 104 | spin_lock_irqsave(&rtc_lock, flags); |
105 | CMOS_WRITE(0xa, 0xf); | 105 | CMOS_WRITE(0xa, 0xf); |
106 | spin_unlock_irqrestore(&rtc_lock, flags); | 106 | spin_unlock_irqrestore(&rtc_lock, flags); |
107 | local_flush_tlb(); | ||
108 | pr_debug("1.\n"); | ||
109 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = | 107 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = |
110 | start_eip >> 4; | 108 | start_eip >> 4; |
111 | pr_debug("2.\n"); | ||
112 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = | 109 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = |
113 | start_eip & 0xf; | 110 | start_eip & 0xf; |
114 | pr_debug("3.\n"); | ||
115 | } | 111 | } |
116 | 112 | ||
117 | static inline void smpboot_restore_warm_reset_vector(void) | 113 | static inline void smpboot_restore_warm_reset_vector(void) |
@@ -119,11 +115,6 @@ static inline void smpboot_restore_warm_reset_vector(void) | |||
119 | unsigned long flags; | 115 | unsigned long flags; |
120 | 116 | ||
121 | /* | 117 | /* |
122 | * Install writable page 0 entry to set BIOS data area. | ||
123 | */ | ||
124 | local_flush_tlb(); | ||
125 | |||
126 | /* | ||
127 | * Paranoid: Set warm reset code and vector here back | 118 | * Paranoid: Set warm reset code and vector here back |
128 | * to default values. | 119 | * to default values. |
129 | */ | 120 | */ |
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 91a4496db434..c77ab1f51fbe 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c | |||
@@ -140,6 +140,16 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn, | |||
140 | return -1; | 140 | return -1; |
141 | set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); | 141 | set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); |
142 | pte_unmap(pte); | 142 | pte_unmap(pte); |
143 | |||
144 | /* | ||
145 | * PTI poisons low addresses in the kernel page tables in the | ||
146 | * name of making them unusable for userspace. To execute | ||
147 | * code at such a low address, the poison must be cleared. | ||
148 | * | ||
149 | * Note: 'pgd' actually gets set in pud_alloc(). | ||
150 | */ | ||
151 | pgd->pgd &= ~_PAGE_NX; | ||
152 | |||
143 | return 0; | 153 | return 0; |
144 | } | 154 | } |
145 | 155 | ||
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c index 1c113db9ed57..2bb5ee464df3 100644 --- a/arch/x86/kernel/tracepoint.c +++ b/arch/x86/kernel/tracepoint.c | |||
@@ -9,10 +9,12 @@ | |||
9 | #include <linux/atomic.h> | 9 | #include <linux/atomic.h> |
10 | 10 | ||
11 | atomic_t trace_idt_ctr = ATOMIC_INIT(0); | 11 | atomic_t trace_idt_ctr = ATOMIC_INIT(0); |
12 | __aligned(PAGE_SIZE) | ||
12 | struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1, | 13 | struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1, |
13 | (unsigned long) trace_idt_table }; | 14 | (unsigned long) trace_idt_table }; |
14 | 15 | ||
15 | /* No need to be aligned, but done to keep all IDTs defined the same way. */ | 16 | /* No need to be aligned, but done to keep all IDTs defined the same way. */ |
17 | __aligned(PAGE_SIZE) | ||
16 | gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss; | 18 | gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss; |
17 | 19 | ||
18 | static int trace_irq_vector_refcount; | 20 | static int trace_irq_vector_refcount; |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 679302c312f8..22b81f35c500 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -166,7 +166,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) | |||
166 | * from double_fault. | 166 | * from double_fault. |
167 | */ | 167 | */ |
168 | BUG_ON((unsigned long)(current_top_of_stack() - | 168 | BUG_ON((unsigned long)(current_top_of_stack() - |
169 | current_stack_pointer()) >= THREAD_SIZE); | 169 | current_stack_pointer) >= THREAD_SIZE); |
170 | 170 | ||
171 | preempt_enable_no_resched(); | 171 | preempt_enable_no_resched(); |
172 | } | 172 | } |
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 524619351961..510e80da7de4 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -187,7 +187,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) | |||
187 | pte_unmap_unlock(pte, ptl); | 187 | pte_unmap_unlock(pte, ptl); |
188 | out: | 188 | out: |
189 | up_write(&mm->mmap_sem); | 189 | up_write(&mm->mmap_sem); |
190 | flush_tlb(); | 190 | flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL); |
191 | } | 191 | } |
192 | 192 | ||
193 | 193 | ||
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index fe133b710bef..cc468bd15430 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -103,6 +103,13 @@ SECTIONS | |||
103 | IRQENTRY_TEXT | 103 | IRQENTRY_TEXT |
104 | *(.fixup) | 104 | *(.fixup) |
105 | *(.gnu.warning) | 105 | *(.gnu.warning) |
106 | |||
107 | #ifdef CONFIG_RETPOLINE | ||
108 | __indirect_thunk_start = .; | ||
109 | *(.text.__x86.indirect_thunk) | ||
110 | __indirect_thunk_end = .; | ||
111 | #endif | ||
112 | |||
106 | /* End of text section */ | 113 | /* End of text section */ |
107 | _etext = .; | 114 | _etext = .; |
108 | } :text = 0x9090 | 115 | } :text = 0x9090 |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 684edebb4a0c..e4eb1d2bf849 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -2383,9 +2383,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) | |||
2383 | } | 2383 | } |
2384 | 2384 | ||
2385 | static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, | 2385 | static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, |
2386 | u64 cr0, u64 cr4) | 2386 | u64 cr0, u64 cr3, u64 cr4) |
2387 | { | 2387 | { |
2388 | int bad; | 2388 | int bad; |
2389 | u64 pcid; | ||
2390 | |||
2391 | /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */ | ||
2392 | pcid = 0; | ||
2393 | if (cr4 & X86_CR4_PCIDE) { | ||
2394 | pcid = cr3 & 0xfff; | ||
2395 | cr3 &= ~0xfff; | ||
2396 | } | ||
2397 | |||
2398 | bad = ctxt->ops->set_cr(ctxt, 3, cr3); | ||
2399 | if (bad) | ||
2400 | return X86EMUL_UNHANDLEABLE; | ||
2389 | 2401 | ||
2390 | /* | 2402 | /* |
2391 | * First enable PAE, long mode needs it before CR0.PG = 1 is set. | 2403 | * First enable PAE, long mode needs it before CR0.PG = 1 is set. |
@@ -2404,6 +2416,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, | |||
2404 | bad = ctxt->ops->set_cr(ctxt, 4, cr4); | 2416 | bad = ctxt->ops->set_cr(ctxt, 4, cr4); |
2405 | if (bad) | 2417 | if (bad) |
2406 | return X86EMUL_UNHANDLEABLE; | 2418 | return X86EMUL_UNHANDLEABLE; |
2419 | if (pcid) { | ||
2420 | bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid); | ||
2421 | if (bad) | ||
2422 | return X86EMUL_UNHANDLEABLE; | ||
2423 | } | ||
2424 | |||
2407 | } | 2425 | } |
2408 | 2426 | ||
2409 | return X86EMUL_CONTINUE; | 2427 | return X86EMUL_CONTINUE; |
@@ -2414,11 +2432,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
2414 | struct desc_struct desc; | 2432 | struct desc_struct desc; |
2415 | struct desc_ptr dt; | 2433 | struct desc_ptr dt; |
2416 | u16 selector; | 2434 | u16 selector; |
2417 | u32 val, cr0, cr4; | 2435 | u32 val, cr0, cr3, cr4; |
2418 | int i; | 2436 | int i; |
2419 | 2437 | ||
2420 | cr0 = GET_SMSTATE(u32, smbase, 0x7ffc); | 2438 | cr0 = GET_SMSTATE(u32, smbase, 0x7ffc); |
2421 | ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8)); | 2439 | cr3 = GET_SMSTATE(u32, smbase, 0x7ff8); |
2422 | ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED; | 2440 | ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED; |
2423 | ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0); | 2441 | ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0); |
2424 | 2442 | ||
@@ -2460,14 +2478,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
2460 | 2478 | ||
2461 | ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8)); | 2479 | ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8)); |
2462 | 2480 | ||
2463 | return rsm_enter_protected_mode(ctxt, cr0, cr4); | 2481 | return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); |
2464 | } | 2482 | } |
2465 | 2483 | ||
2466 | static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) | 2484 | static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) |
2467 | { | 2485 | { |
2468 | struct desc_struct desc; | 2486 | struct desc_struct desc; |
2469 | struct desc_ptr dt; | 2487 | struct desc_ptr dt; |
2470 | u64 val, cr0, cr4; | 2488 | u64 val, cr0, cr3, cr4; |
2471 | u32 base3; | 2489 | u32 base3; |
2472 | u16 selector; | 2490 | u16 selector; |
2473 | int i, r; | 2491 | int i, r; |
@@ -2484,7 +2502,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
2484 | ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); | 2502 | ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); |
2485 | 2503 | ||
2486 | cr0 = GET_SMSTATE(u64, smbase, 0x7f58); | 2504 | cr0 = GET_SMSTATE(u64, smbase, 0x7f58); |
2487 | ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50)); | 2505 | cr3 = GET_SMSTATE(u64, smbase, 0x7f50); |
2488 | cr4 = GET_SMSTATE(u64, smbase, 0x7f48); | 2506 | cr4 = GET_SMSTATE(u64, smbase, 0x7f48); |
2489 | ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00)); | 2507 | ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00)); |
2490 | val = GET_SMSTATE(u64, smbase, 0x7ed0); | 2508 | val = GET_SMSTATE(u64, smbase, 0x7ed0); |
@@ -2512,7 +2530,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
2512 | dt.address = GET_SMSTATE(u64, smbase, 0x7e68); | 2530 | dt.address = GET_SMSTATE(u64, smbase, 0x7e68); |
2513 | ctxt->ops->set_gdt(ctxt, &dt); | 2531 | ctxt->ops->set_gdt(ctxt, &dt); |
2514 | 2532 | ||
2515 | r = rsm_enter_protected_mode(ctxt, cr0, cr4); | 2533 | r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); |
2516 | if (r != X86EMUL_CONTINUE) | 2534 | if (r != X86EMUL_CONTINUE) |
2517 | return r; | 2535 | return r; |
2518 | 2536 | ||
@@ -4960,6 +4978,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) | |||
4960 | bool op_prefix = false; | 4978 | bool op_prefix = false; |
4961 | bool has_seg_override = false; | 4979 | bool has_seg_override = false; |
4962 | struct opcode opcode; | 4980 | struct opcode opcode; |
4981 | u16 dummy; | ||
4982 | struct desc_struct desc; | ||
4963 | 4983 | ||
4964 | ctxt->memop.type = OP_NONE; | 4984 | ctxt->memop.type = OP_NONE; |
4965 | ctxt->memopp = NULL; | 4985 | ctxt->memopp = NULL; |
@@ -4978,6 +4998,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) | |||
4978 | switch (mode) { | 4998 | switch (mode) { |
4979 | case X86EMUL_MODE_REAL: | 4999 | case X86EMUL_MODE_REAL: |
4980 | case X86EMUL_MODE_VM86: | 5000 | case X86EMUL_MODE_VM86: |
5001 | def_op_bytes = def_ad_bytes = 2; | ||
5002 | ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS); | ||
5003 | if (desc.d) | ||
5004 | def_op_bytes = def_ad_bytes = 4; | ||
5005 | break; | ||
4981 | case X86EMUL_MODE_PROT16: | 5006 | case X86EMUL_MODE_PROT16: |
4982 | def_op_bytes = def_ad_bytes = 2; | 5007 | def_op_bytes = def_ad_bytes = 2; |
4983 | break; | 5008 | break; |
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 3aab53f8cad2..d380111351c0 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c | |||
@@ -247,8 +247,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) | |||
247 | index == RTC_GSI) { | 247 | index == RTC_GSI) { |
248 | if (kvm_apic_match_dest(vcpu, NULL, 0, | 248 | if (kvm_apic_match_dest(vcpu, NULL, 0, |
249 | e->fields.dest_id, e->fields.dest_mode) || | 249 | e->fields.dest_id, e->fields.dest_mode) || |
250 | (e->fields.trig_mode == IOAPIC_EDGE_TRIG && | 250 | kvm_apic_pending_eoi(vcpu, e->fields.vector)) |
251 | kvm_apic_pending_eoi(vcpu, e->fields.vector))) | ||
252 | __set_bit(e->fields.vector, | 251 | __set_bit(e->fields.vector, |
253 | (unsigned long *)eoi_exit_bitmap); | 252 | (unsigned long *)eoi_exit_bitmap); |
254 | } | 253 | } |
@@ -269,6 +268,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | |||
269 | { | 268 | { |
270 | unsigned index; | 269 | unsigned index; |
271 | bool mask_before, mask_after; | 270 | bool mask_before, mask_after; |
271 | int old_remote_irr, old_delivery_status; | ||
272 | union kvm_ioapic_redirect_entry *e; | 272 | union kvm_ioapic_redirect_entry *e; |
273 | 273 | ||
274 | switch (ioapic->ioregsel) { | 274 | switch (ioapic->ioregsel) { |
@@ -291,14 +291,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | |||
291 | return; | 291 | return; |
292 | e = &ioapic->redirtbl[index]; | 292 | e = &ioapic->redirtbl[index]; |
293 | mask_before = e->fields.mask; | 293 | mask_before = e->fields.mask; |
294 | /* Preserve read-only fields */ | ||
295 | old_remote_irr = e->fields.remote_irr; | ||
296 | old_delivery_status = e->fields.delivery_status; | ||
294 | if (ioapic->ioregsel & 1) { | 297 | if (ioapic->ioregsel & 1) { |
295 | e->bits &= 0xffffffff; | 298 | e->bits &= 0xffffffff; |
296 | e->bits |= (u64) val << 32; | 299 | e->bits |= (u64) val << 32; |
297 | } else { | 300 | } else { |
298 | e->bits &= ~0xffffffffULL; | 301 | e->bits &= ~0xffffffffULL; |
299 | e->bits |= (u32) val; | 302 | e->bits |= (u32) val; |
300 | e->fields.remote_irr = 0; | ||
301 | } | 303 | } |
304 | e->fields.remote_irr = old_remote_irr; | ||
305 | e->fields.delivery_status = old_delivery_status; | ||
306 | |||
307 | /* | ||
308 | * Some OSes (Linux, Xen) assume that Remote IRR bit will | ||
309 | * be cleared by IOAPIC hardware when the entry is configured | ||
310 | * as edge-triggered. This behavior is used to simulate an | ||
311 | * explicit EOI on IOAPICs that don't have the EOI register. | ||
312 | */ | ||
313 | if (e->fields.trig_mode == IOAPIC_EDGE_TRIG) | ||
314 | e->fields.remote_irr = 0; | ||
315 | |||
302 | mask_after = e->fields.mask; | 316 | mask_after = e->fields.mask; |
303 | if (mask_before != mask_after) | 317 | if (mask_before != mask_after) |
304 | kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); | 318 | kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1049c3c9b877..2b71f2c03b9e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -4503,7 +4503,7 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu) | |||
4503 | typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap); | 4503 | typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap); |
4504 | 4504 | ||
4505 | /* The caller should hold mmu-lock before calling this function. */ | 4505 | /* The caller should hold mmu-lock before calling this function. */ |
4506 | static bool | 4506 | static __always_inline bool |
4507 | slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, | 4507 | slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, |
4508 | slot_level_handler fn, int start_level, int end_level, | 4508 | slot_level_handler fn, int start_level, int end_level, |
4509 | gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) | 4509 | gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) |
@@ -4533,7 +4533,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, | |||
4533 | return flush; | 4533 | return flush; |
4534 | } | 4534 | } |
4535 | 4535 | ||
4536 | static bool | 4536 | static __always_inline bool |
4537 | slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, | 4537 | slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
4538 | slot_level_handler fn, int start_level, int end_level, | 4538 | slot_level_handler fn, int start_level, int end_level, |
4539 | bool lock_flush_tlb) | 4539 | bool lock_flush_tlb) |
@@ -4544,7 +4544,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, | |||
4544 | lock_flush_tlb); | 4544 | lock_flush_tlb); |
4545 | } | 4545 | } |
4546 | 4546 | ||
4547 | static bool | 4547 | static __always_inline bool |
4548 | slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, | 4548 | slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
4549 | slot_level_handler fn, bool lock_flush_tlb) | 4549 | slot_level_handler fn, bool lock_flush_tlb) |
4550 | { | 4550 | { |
@@ -4552,7 +4552,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, | |||
4552 | PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); | 4552 | PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); |
4553 | } | 4553 | } |
4554 | 4554 | ||
4555 | static bool | 4555 | static __always_inline bool |
4556 | slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, | 4556 | slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
4557 | slot_level_handler fn, bool lock_flush_tlb) | 4557 | slot_level_handler fn, bool lock_flush_tlb) |
4558 | { | 4558 | { |
@@ -4560,7 +4560,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, | |||
4560 | PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); | 4560 | PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); |
4561 | } | 4561 | } |
4562 | 4562 | ||
4563 | static bool | 4563 | static __always_inline bool |
4564 | slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, | 4564 | slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, |
4565 | slot_level_handler fn, bool lock_flush_tlb) | 4565 | slot_level_handler fn, bool lock_flush_tlb) |
4566 | { | 4566 | { |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 4b1152e57340..2038e5bacce6 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/desc.h> | 37 | #include <asm/desc.h> |
38 | #include <asm/debugreg.h> | 38 | #include <asm/debugreg.h> |
39 | #include <asm/kvm_para.h> | 39 | #include <asm/kvm_para.h> |
40 | #include <asm/nospec-branch.h> | ||
40 | 41 | ||
41 | #include <asm/virtext.h> | 42 | #include <asm/virtext.h> |
42 | #include "trace.h" | 43 | #include "trace.h" |
@@ -3856,6 +3857,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3856 | "mov %%r14, %c[r14](%[svm]) \n\t" | 3857 | "mov %%r14, %c[r14](%[svm]) \n\t" |
3857 | "mov %%r15, %c[r15](%[svm]) \n\t" | 3858 | "mov %%r15, %c[r15](%[svm]) \n\t" |
3858 | #endif | 3859 | #endif |
3860 | /* | ||
3861 | * Clear host registers marked as clobbered to prevent | ||
3862 | * speculative use. | ||
3863 | */ | ||
3864 | "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t" | ||
3865 | "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t" | ||
3866 | "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t" | ||
3867 | "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t" | ||
3868 | "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t" | ||
3869 | #ifdef CONFIG_X86_64 | ||
3870 | "xor %%r8, %%r8 \n\t" | ||
3871 | "xor %%r9, %%r9 \n\t" | ||
3872 | "xor %%r10, %%r10 \n\t" | ||
3873 | "xor %%r11, %%r11 \n\t" | ||
3874 | "xor %%r12, %%r12 \n\t" | ||
3875 | "xor %%r13, %%r13 \n\t" | ||
3876 | "xor %%r14, %%r14 \n\t" | ||
3877 | "xor %%r15, %%r15 \n\t" | ||
3878 | #endif | ||
3859 | "pop %%" _ASM_BP | 3879 | "pop %%" _ASM_BP |
3860 | : | 3880 | : |
3861 | : [svm]"a"(svm), | 3881 | : [svm]"a"(svm), |
@@ -3885,6 +3905,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3885 | #endif | 3905 | #endif |
3886 | ); | 3906 | ); |
3887 | 3907 | ||
3908 | /* Eliminate branch target predictions from guest mode */ | ||
3909 | vmexit_fill_RSB(); | ||
3910 | |||
3888 | #ifdef CONFIG_X86_64 | 3911 | #ifdef CONFIG_X86_64 |
3889 | wrmsrl(MSR_GS_BASE, svm->host.gs_base); | 3912 | wrmsrl(MSR_GS_BASE, svm->host.gs_base); |
3890 | #else | 3913 | #else |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index dcbafe53e2d4..2a1a8737015b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/kexec.h> | 47 | #include <asm/kexec.h> |
48 | #include <asm/apic.h> | 48 | #include <asm/apic.h> |
49 | #include <asm/irq_remapping.h> | 49 | #include <asm/irq_remapping.h> |
50 | #include <asm/nospec-branch.h> | ||
50 | 51 | ||
51 | #include "trace.h" | 52 | #include "trace.h" |
52 | #include "pmu.h" | 53 | #include "pmu.h" |
@@ -828,8 +829,16 @@ static inline short vmcs_field_to_offset(unsigned long field) | |||
828 | { | 829 | { |
829 | BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); | 830 | BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); |
830 | 831 | ||
831 | if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || | 832 | if (field >= ARRAY_SIZE(vmcs_field_to_offset_table)) |
832 | vmcs_field_to_offset_table[field] == 0) | 833 | return -ENOENT; |
834 | |||
835 | /* | ||
836 | * FIXME: Mitigation for CVE-2017-5753. To be replaced with a | ||
837 | * generic mechanism. | ||
838 | */ | ||
839 | asm("lfence"); | ||
840 | |||
841 | if (vmcs_field_to_offset_table[field] == 0) | ||
833 | return -ENOENT; | 842 | return -ENOENT; |
834 | 843 | ||
835 | return vmcs_field_to_offset_table[field]; | 844 | return vmcs_field_to_offset_table[field]; |
@@ -1107,6 +1116,11 @@ static inline bool cpu_has_vmx_invvpid_global(void) | |||
1107 | return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; | 1116 | return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; |
1108 | } | 1117 | } |
1109 | 1118 | ||
1119 | static inline bool cpu_has_vmx_invvpid(void) | ||
1120 | { | ||
1121 | return vmx_capability.vpid & VMX_VPID_INVVPID_BIT; | ||
1122 | } | ||
1123 | |||
1110 | static inline bool cpu_has_vmx_ept(void) | 1124 | static inline bool cpu_has_vmx_ept(void) |
1111 | { | 1125 | { |
1112 | return vmcs_config.cpu_based_2nd_exec_ctrl & | 1126 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
@@ -4581,14 +4595,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, | |||
4581 | 4595 | ||
4582 | if (is_guest_mode(vcpu) && | 4596 | if (is_guest_mode(vcpu) && |
4583 | vector == vmx->nested.posted_intr_nv) { | 4597 | vector == vmx->nested.posted_intr_nv) { |
4584 | /* the PIR and ON have been set by L1. */ | ||
4585 | kvm_vcpu_trigger_posted_interrupt(vcpu); | ||
4586 | /* | 4598 | /* |
4587 | * If a posted intr is not recognized by hardware, | 4599 | * If a posted intr is not recognized by hardware, |
4588 | * we will accomplish it in the next vmentry. | 4600 | * we will accomplish it in the next vmentry. |
4589 | */ | 4601 | */ |
4590 | vmx->nested.pi_pending = true; | 4602 | vmx->nested.pi_pending = true; |
4591 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 4603 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
4604 | /* the PIR and ON have been set by L1. */ | ||
4605 | if (!kvm_vcpu_trigger_posted_interrupt(vcpu)) | ||
4606 | kvm_vcpu_kick(vcpu); | ||
4592 | return 0; | 4607 | return 0; |
4593 | } | 4608 | } |
4594 | return -1; | 4609 | return -1; |
@@ -4940,7 +4955,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
4940 | vmcs_write64(GUEST_IA32_DEBUGCTL, 0); | 4955 | vmcs_write64(GUEST_IA32_DEBUGCTL, 0); |
4941 | } | 4956 | } |
4942 | 4957 | ||
4943 | vmcs_writel(GUEST_RFLAGS, 0x02); | 4958 | kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); |
4944 | kvm_rip_write(vcpu, 0xfff0); | 4959 | kvm_rip_write(vcpu, 0xfff0); |
4945 | 4960 | ||
4946 | vmcs_writel(GUEST_GDTR_BASE, 0); | 4961 | vmcs_writel(GUEST_GDTR_BASE, 0); |
@@ -6009,7 +6024,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) | |||
6009 | if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) | 6024 | if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) |
6010 | return 1; | 6025 | return 1; |
6011 | 6026 | ||
6012 | err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); | 6027 | err = emulate_instruction(vcpu, 0); |
6013 | 6028 | ||
6014 | if (err == EMULATE_USER_EXIT) { | 6029 | if (err == EMULATE_USER_EXIT) { |
6015 | ++vcpu->stat.mmio_exits; | 6030 | ++vcpu->stat.mmio_exits; |
@@ -6199,8 +6214,10 @@ static __init int hardware_setup(void) | |||
6199 | if (boot_cpu_has(X86_FEATURE_NX)) | 6214 | if (boot_cpu_has(X86_FEATURE_NX)) |
6200 | kvm_enable_efer_bits(EFER_NX); | 6215 | kvm_enable_efer_bits(EFER_NX); |
6201 | 6216 | ||
6202 | if (!cpu_has_vmx_vpid()) | 6217 | if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || |
6218 | !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) | ||
6203 | enable_vpid = 0; | 6219 | enable_vpid = 0; |
6220 | |||
6204 | if (!cpu_has_vmx_shadow_vmcs()) | 6221 | if (!cpu_has_vmx_shadow_vmcs()) |
6205 | enable_shadow_vmcs = 0; | 6222 | enable_shadow_vmcs = 0; |
6206 | if (enable_shadow_vmcs) | 6223 | if (enable_shadow_vmcs) |
@@ -8616,6 +8633,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
8616 | /* Save guest registers, load host registers, keep flags */ | 8633 | /* Save guest registers, load host registers, keep flags */ |
8617 | "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" | 8634 | "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" |
8618 | "pop %0 \n\t" | 8635 | "pop %0 \n\t" |
8636 | "setbe %c[fail](%0)\n\t" | ||
8619 | "mov %%" _ASM_AX ", %c[rax](%0) \n\t" | 8637 | "mov %%" _ASM_AX ", %c[rax](%0) \n\t" |
8620 | "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" | 8638 | "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" |
8621 | __ASM_SIZE(pop) " %c[rcx](%0) \n\t" | 8639 | __ASM_SIZE(pop) " %c[rcx](%0) \n\t" |
@@ -8632,12 +8650,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
8632 | "mov %%r13, %c[r13](%0) \n\t" | 8650 | "mov %%r13, %c[r13](%0) \n\t" |
8633 | "mov %%r14, %c[r14](%0) \n\t" | 8651 | "mov %%r14, %c[r14](%0) \n\t" |
8634 | "mov %%r15, %c[r15](%0) \n\t" | 8652 | "mov %%r15, %c[r15](%0) \n\t" |
8653 | "xor %%r8d, %%r8d \n\t" | ||
8654 | "xor %%r9d, %%r9d \n\t" | ||
8655 | "xor %%r10d, %%r10d \n\t" | ||
8656 | "xor %%r11d, %%r11d \n\t" | ||
8657 | "xor %%r12d, %%r12d \n\t" | ||
8658 | "xor %%r13d, %%r13d \n\t" | ||
8659 | "xor %%r14d, %%r14d \n\t" | ||
8660 | "xor %%r15d, %%r15d \n\t" | ||
8635 | #endif | 8661 | #endif |
8636 | "mov %%cr2, %%" _ASM_AX " \n\t" | 8662 | "mov %%cr2, %%" _ASM_AX " \n\t" |
8637 | "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" | 8663 | "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" |
8638 | 8664 | ||
8665 | "xor %%eax, %%eax \n\t" | ||
8666 | "xor %%ebx, %%ebx \n\t" | ||
8667 | "xor %%esi, %%esi \n\t" | ||
8668 | "xor %%edi, %%edi \n\t" | ||
8639 | "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" | 8669 | "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" |
8640 | "setbe %c[fail](%0) \n\t" | ||
8641 | ".pushsection .rodata \n\t" | 8670 | ".pushsection .rodata \n\t" |
8642 | ".global vmx_return \n\t" | 8671 | ".global vmx_return \n\t" |
8643 | "vmx_return: " _ASM_PTR " 2b \n\t" | 8672 | "vmx_return: " _ASM_PTR " 2b \n\t" |
@@ -8674,6 +8703,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
8674 | #endif | 8703 | #endif |
8675 | ); | 8704 | ); |
8676 | 8705 | ||
8706 | /* Eliminate branch target predictions from guest mode */ | ||
8707 | vmexit_fill_RSB(); | ||
8708 | |||
8677 | /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ | 8709 | /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ |
8678 | if (debugctlmsr) | 8710 | if (debugctlmsr) |
8679 | update_debugctlmsr(debugctlmsr); | 8711 | update_debugctlmsr(debugctlmsr); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index df81717a92f3..3900d34980de 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -759,7 +759,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
759 | return 1; | 759 | return 1; |
760 | 760 | ||
761 | /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ | 761 | /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ |
762 | if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) | 762 | if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_ASID_MASK) || |
763 | !is_long_mode(vcpu)) | ||
763 | return 1; | 764 | return 1; |
764 | } | 765 | } |
765 | 766 | ||
@@ -4113,7 +4114,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) | |||
4113 | addr, n, v)) | 4114 | addr, n, v)) |
4114 | && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) | 4115 | && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) |
4115 | break; | 4116 | break; |
4116 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); | 4117 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); |
4117 | handled += n; | 4118 | handled += n; |
4118 | addr += n; | 4119 | addr += n; |
4119 | len -= n; | 4120 | len -= n; |
@@ -4361,7 +4362,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) | |||
4361 | { | 4362 | { |
4362 | if (vcpu->mmio_read_completed) { | 4363 | if (vcpu->mmio_read_completed) { |
4363 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, | 4364 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, |
4364 | vcpu->mmio_fragments[0].gpa, *(u64 *)val); | 4365 | vcpu->mmio_fragments[0].gpa, val); |
4365 | vcpu->mmio_read_completed = 0; | 4366 | vcpu->mmio_read_completed = 0; |
4366 | return 1; | 4367 | return 1; |
4367 | } | 4368 | } |
@@ -4383,14 +4384,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
4383 | 4384 | ||
4384 | static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) | 4385 | static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) |
4385 | { | 4386 | { |
4386 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); | 4387 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); |
4387 | return vcpu_mmio_write(vcpu, gpa, bytes, val); | 4388 | return vcpu_mmio_write(vcpu, gpa, bytes, val); |
4388 | } | 4389 | } |
4389 | 4390 | ||
4390 | static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, | 4391 | static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, |
4391 | void *val, int bytes) | 4392 | void *val, int bytes) |
4392 | { | 4393 | { |
4393 | trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); | 4394 | trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); |
4394 | return X86EMUL_IO_NEEDED; | 4395 | return X86EMUL_IO_NEEDED; |
4395 | } | 4396 | } |
4396 | 4397 | ||
@@ -5152,7 +5153,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) | |||
5152 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 5153 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
5153 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | 5154 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
5154 | vcpu->run->internal.ndata = 0; | 5155 | vcpu->run->internal.ndata = 0; |
5155 | r = EMULATE_FAIL; | 5156 | r = EMULATE_USER_EXIT; |
5156 | } | 5157 | } |
5157 | kvm_queue_exception(vcpu, UD_VECTOR); | 5158 | kvm_queue_exception(vcpu, UD_VECTOR); |
5158 | 5159 | ||
@@ -6941,7 +6942,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
6941 | #endif | 6942 | #endif |
6942 | 6943 | ||
6943 | kvm_rip_write(vcpu, regs->rip); | 6944 | kvm_rip_write(vcpu, regs->rip); |
6944 | kvm_set_rflags(vcpu, regs->rflags); | 6945 | kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); |
6945 | 6946 | ||
6946 | vcpu->arch.exception.pending = false; | 6947 | vcpu->arch.exception.pending = false; |
6947 | 6948 | ||
@@ -8230,11 +8231,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, | |||
8230 | { | 8231 | { |
8231 | struct x86_exception fault; | 8232 | struct x86_exception fault; |
8232 | 8233 | ||
8233 | trace_kvm_async_pf_ready(work->arch.token, work->gva); | ||
8234 | if (work->wakeup_all) | 8234 | if (work->wakeup_all) |
8235 | work->arch.token = ~0; /* broadcast wakeup */ | 8235 | work->arch.token = ~0; /* broadcast wakeup */ |
8236 | else | 8236 | else |
8237 | kvm_del_async_pf_gfn(vcpu, work->arch.gfn); | 8237 | kvm_del_async_pf_gfn(vcpu, work->arch.gfn); |
8238 | trace_kvm_async_pf_ready(work->arch.token, work->gva); | ||
8238 | 8239 | ||
8239 | if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && | 8240 | if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && |
8240 | !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { | 8241 | !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index f2587888d987..12a34d15b648 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -21,6 +21,7 @@ lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o | |||
21 | lib-y += memcpy_$(BITS).o | 21 | lib-y += memcpy_$(BITS).o |
22 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 22 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
23 | lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o | 23 | lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o |
24 | lib-$(CONFIG_RETPOLINE) += retpoline.o | ||
24 | 25 | ||
25 | obj-y += msr.o msr-reg.o msr-reg-export.o | 26 | obj-y += msr.o msr-reg.o msr-reg-export.o |
26 | 27 | ||
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index c1e623209853..90353a26ed95 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S | |||
@@ -28,7 +28,8 @@ | |||
28 | #include <linux/linkage.h> | 28 | #include <linux/linkage.h> |
29 | #include <asm/errno.h> | 29 | #include <asm/errno.h> |
30 | #include <asm/asm.h> | 30 | #include <asm/asm.h> |
31 | 31 | #include <asm/nospec-branch.h> | |
32 | |||
32 | /* | 33 | /* |
33 | * computes a partial checksum, e.g. for TCP/UDP fragments | 34 | * computes a partial checksum, e.g. for TCP/UDP fragments |
34 | */ | 35 | */ |
@@ -155,7 +156,7 @@ ENTRY(csum_partial) | |||
155 | negl %ebx | 156 | negl %ebx |
156 | lea 45f(%ebx,%ebx,2), %ebx | 157 | lea 45f(%ebx,%ebx,2), %ebx |
157 | testl %esi, %esi | 158 | testl %esi, %esi |
158 | jmp *%ebx | 159 | JMP_NOSPEC %ebx |
159 | 160 | ||
160 | # Handle 2-byte-aligned regions | 161 | # Handle 2-byte-aligned regions |
161 | 20: addw (%esi), %ax | 162 | 20: addw (%esi), %ax |
@@ -437,7 +438,7 @@ ENTRY(csum_partial_copy_generic) | |||
437 | andl $-32,%edx | 438 | andl $-32,%edx |
438 | lea 3f(%ebx,%ebx), %ebx | 439 | lea 3f(%ebx,%ebx), %ebx |
439 | testl %esi, %esi | 440 | testl %esi, %esi |
440 | jmp *%ebx | 441 | JMP_NOSPEC %ebx |
441 | 1: addl $64,%esi | 442 | 1: addl $64,%esi |
442 | addl $64,%edi | 443 | addl $64,%edi |
443 | SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) | 444 | SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) |
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c index 422db000d727..a744506856b1 100644 --- a/arch/x86/lib/cmdline.c +++ b/arch/x86/lib/cmdline.c | |||
@@ -82,3 +82,108 @@ int cmdline_find_option_bool(const char *cmdline, const char *option) | |||
82 | 82 | ||
83 | return 0; /* Buffer overrun */ | 83 | return 0; /* Buffer overrun */ |
84 | } | 84 | } |
85 | |||
86 | /* | ||
87 | * Find a non-boolean option (i.e. option=argument). In accordance with | ||
88 | * standard Linux practice, if this option is repeated, this returns the | ||
89 | * last instance on the command line. | ||
90 | * | ||
91 | * @cmdline: the cmdline string | ||
92 | * @max_cmdline_size: the maximum size of cmdline | ||
93 | * @option: option string to look for | ||
94 | * @buffer: memory buffer to return the option argument | ||
95 | * @bufsize: size of the supplied memory buffer | ||
96 | * | ||
97 | * Returns the length of the argument (regardless of if it was | ||
98 | * truncated to fit in the buffer), or -1 on not found. | ||
99 | */ | ||
100 | static int | ||
101 | __cmdline_find_option(const char *cmdline, int max_cmdline_size, | ||
102 | const char *option, char *buffer, int bufsize) | ||
103 | { | ||
104 | char c; | ||
105 | int pos = 0, len = -1; | ||
106 | const char *opptr = NULL; | ||
107 | char *bufptr = buffer; | ||
108 | enum { | ||
109 | st_wordstart = 0, /* Start of word/after whitespace */ | ||
110 | st_wordcmp, /* Comparing this word */ | ||
111 | st_wordskip, /* Miscompare, skip */ | ||
112 | st_bufcpy, /* Copying this to buffer */ | ||
113 | } state = st_wordstart; | ||
114 | |||
115 | if (!cmdline) | ||
116 | return -1; /* No command line */ | ||
117 | |||
118 | /* | ||
119 | * This 'pos' check ensures we do not overrun | ||
120 | * a non-NULL-terminated 'cmdline' | ||
121 | */ | ||
122 | while (pos++ < max_cmdline_size) { | ||
123 | c = *(char *)cmdline++; | ||
124 | if (!c) | ||
125 | break; | ||
126 | |||
127 | switch (state) { | ||
128 | case st_wordstart: | ||
129 | if (myisspace(c)) | ||
130 | break; | ||
131 | |||
132 | state = st_wordcmp; | ||
133 | opptr = option; | ||
134 | /* fall through */ | ||
135 | |||
136 | case st_wordcmp: | ||
137 | if ((c == '=') && !*opptr) { | ||
138 | /* | ||
139 | * We matched all the way to the end of the | ||
140 | * option we were looking for, prepare to | ||
141 | * copy the argument. | ||
142 | */ | ||
143 | len = 0; | ||
144 | bufptr = buffer; | ||
145 | state = st_bufcpy; | ||
146 | break; | ||
147 | } else if (c == *opptr++) { | ||
148 | /* | ||
149 | * We are currently matching, so continue | ||
150 | * to the next character on the cmdline. | ||
151 | */ | ||
152 | break; | ||
153 | } | ||
154 | state = st_wordskip; | ||
155 | /* fall through */ | ||
156 | |||
157 | case st_wordskip: | ||
158 | if (myisspace(c)) | ||
159 | state = st_wordstart; | ||
160 | break; | ||
161 | |||
162 | case st_bufcpy: | ||
163 | if (myisspace(c)) { | ||
164 | state = st_wordstart; | ||
165 | } else { | ||
166 | /* | ||
167 | * Increment len, but don't overrun the | ||
168 | * supplied buffer and leave room for the | ||
169 | * NULL terminator. | ||
170 | */ | ||
171 | if (++len < bufsize) | ||
172 | *bufptr++ = c; | ||
173 | } | ||
174 | break; | ||
175 | } | ||
176 | } | ||
177 | |||
178 | if (bufsize) | ||
179 | *bufptr = '\0'; | ||
180 | |||
181 | return len; | ||
182 | } | ||
183 | |||
184 | int cmdline_find_option(const char *cmdline, const char *option, char *buffer, | ||
185 | int bufsize) | ||
186 | { | ||
187 | return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option, | ||
188 | buffer, bufsize); | ||
189 | } | ||
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index e912b2f6d36e..45772560aceb 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c | |||
@@ -93,6 +93,13 @@ static void delay_mwaitx(unsigned long __loops) | |||
93 | { | 93 | { |
94 | u64 start, end, delay, loops = __loops; | 94 | u64 start, end, delay, loops = __loops; |
95 | 95 | ||
96 | /* | ||
97 | * Timer value of 0 causes MWAITX to wait indefinitely, unless there | ||
98 | * is a store on the memory monitored by MONITORX. | ||
99 | */ | ||
100 | if (loops == 0) | ||
101 | return; | ||
102 | |||
96 | start = rdtsc_ordered(); | 103 | start = rdtsc_ordered(); |
97 | 104 | ||
98 | for (;;) { | 105 | for (;;) { |
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S new file mode 100644 index 000000000000..e611a124c442 --- /dev/null +++ b/arch/x86/lib/retpoline.S | |||
@@ -0,0 +1,49 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #include <linux/stringify.h> | ||
4 | #include <linux/linkage.h> | ||
5 | #include <asm/dwarf2.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/alternative-asm.h> | ||
8 | #include <asm-generic/export.h> | ||
9 | #include <asm/nospec-branch.h> | ||
10 | |||
11 | .macro THUNK reg | ||
12 | .section .text.__x86.indirect_thunk | ||
13 | |||
14 | ENTRY(__x86_indirect_thunk_\reg) | ||
15 | CFI_STARTPROC | ||
16 | JMP_NOSPEC %\reg | ||
17 | CFI_ENDPROC | ||
18 | ENDPROC(__x86_indirect_thunk_\reg) | ||
19 | .endm | ||
20 | |||
21 | /* | ||
22 | * Despite being an assembler file we can't just use .irp here | ||
23 | * because __KSYM_DEPS__ only uses the C preprocessor and would | ||
24 | * only see one instance of "__x86_indirect_thunk_\reg" rather | ||
25 | * than one per register with the correct names. So we do it | ||
26 | * the simple and nasty way... | ||
27 | */ | ||
28 | #define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) | ||
29 | #define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) | ||
30 | #define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg) | ||
31 | |||
32 | GENERATE_THUNK(_ASM_AX) | ||
33 | GENERATE_THUNK(_ASM_BX) | ||
34 | GENERATE_THUNK(_ASM_CX) | ||
35 | GENERATE_THUNK(_ASM_DX) | ||
36 | GENERATE_THUNK(_ASM_SI) | ||
37 | GENERATE_THUNK(_ASM_DI) | ||
38 | GENERATE_THUNK(_ASM_BP) | ||
39 | GENERATE_THUNK(_ASM_SP) | ||
40 | #ifdef CONFIG_64BIT | ||
41 | GENERATE_THUNK(r8) | ||
42 | GENERATE_THUNK(r9) | ||
43 | GENERATE_THUNK(r10) | ||
44 | GENERATE_THUNK(r11) | ||
45 | GENERATE_THUNK(r12) | ||
46 | GENERATE_THUNK(r13) | ||
47 | GENERATE_THUNK(r14) | ||
48 | GENERATE_THUNK(r15) | ||
49 | #endif | ||
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 65c47fda26fc..61e6cead9c4a 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ | 1 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ |
2 | pat.o pgtable.o physaddr.o gup.o setup_nx.o | 2 | pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o |
3 | 3 | ||
4 | # Make sure __phys_addr has no stackprotector | 4 | # Make sure __phys_addr has no stackprotector |
5 | nostackp := $(call cc-option, -fno-stack-protector) | 5 | nostackp := $(call cc-option, -fno-stack-protector) |
@@ -9,7 +9,6 @@ CFLAGS_setup_nx.o := $(nostackp) | |||
9 | CFLAGS_fault.o := -I$(src)/../include/asm/trace | 9 | CFLAGS_fault.o := -I$(src)/../include/asm/trace |
10 | 10 | ||
11 | obj-$(CONFIG_X86_PAT) += pat_rbtree.o | 11 | obj-$(CONFIG_X86_PAT) += pat_rbtree.o |
12 | obj-$(CONFIG_SMP) += tlb.o | ||
13 | 12 | ||
14 | obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o | 13 | obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o |
15 | 14 | ||
@@ -33,3 +32,4 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o | |||
33 | obj-$(CONFIG_NUMA_EMU) += numa_emulation.o | 32 | obj-$(CONFIG_NUMA_EMU) += numa_emulation.o |
34 | 33 | ||
35 | obj-$(CONFIG_X86_INTEL_MPX) += mpx.o | 34 | obj-$(CONFIG_X86_INTEL_MPX) += mpx.o |
35 | obj-$(CONFIG_PAGE_TABLE_ISOLATION) += kaiser.o | ||
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 3aebbd6c6f5f..151fd33e9043 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -165,7 +165,7 @@ static void __init probe_page_size_mask(void) | |||
165 | cr4_set_bits_and_update_boot(X86_CR4_PSE); | 165 | cr4_set_bits_and_update_boot(X86_CR4_PSE); |
166 | 166 | ||
167 | /* Enable PGE if available */ | 167 | /* Enable PGE if available */ |
168 | if (cpu_has_pge) { | 168 | if (cpu_has_pge && !kaiser_enabled) { |
169 | cr4_set_bits_and_update_boot(X86_CR4_PGE); | 169 | cr4_set_bits_and_update_boot(X86_CR4_PGE); |
170 | __supported_pte_mask |= _PAGE_GLOBAL; | 170 | __supported_pte_mask |= _PAGE_GLOBAL; |
171 | } else | 171 | } else |
@@ -753,13 +753,11 @@ void __init zone_sizes_init(void) | |||
753 | } | 753 | } |
754 | 754 | ||
755 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { | 755 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { |
756 | #ifdef CONFIG_SMP | ||
757 | .active_mm = &init_mm, | 756 | .active_mm = &init_mm, |
758 | .state = 0, | 757 | .state = 0, |
759 | #endif | ||
760 | .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ | 758 | .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ |
761 | }; | 759 | }; |
762 | EXPORT_SYMBOL_GPL(cpu_tlbstate); | 760 | EXPORT_PER_CPU_SYMBOL(cpu_tlbstate); |
763 | 761 | ||
764 | void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) | 762 | void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) |
765 | { | 763 | { |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index e08d141844ee..97b6b0164dcb 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -395,6 +395,16 @@ void __init cleanup_highmap(void) | |||
395 | continue; | 395 | continue; |
396 | if (vaddr < (unsigned long) _text || vaddr > end) | 396 | if (vaddr < (unsigned long) _text || vaddr > end) |
397 | set_pmd(pmd, __pmd(0)); | 397 | set_pmd(pmd, __pmd(0)); |
398 | else if (kaiser_enabled) { | ||
399 | /* | ||
400 | * level2_kernel_pgt is initialized with _PAGE_GLOBAL: | ||
401 | * clear that now. This is not important, so long as | ||
402 | * CR4.PGE remains clear, but it removes an anomaly. | ||
403 | * Physical mapping setup below avoids _PAGE_GLOBAL | ||
404 | * by use of massage_pgprot() inside pfn_pte() etc. | ||
405 | */ | ||
406 | set_pmd(pmd, pmd_clear_flags(*pmd, _PAGE_GLOBAL)); | ||
407 | } | ||
398 | } | 408 | } |
399 | } | 409 | } |
400 | 410 | ||
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c new file mode 100644 index 000000000000..7a72e32e4806 --- /dev/null +++ b/arch/x86/mm/kaiser.c | |||
@@ -0,0 +1,484 @@ | |||
1 | #include <linux/bug.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/errno.h> | ||
4 | #include <linux/string.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/bug.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/interrupt.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/ftrace.h> | ||
13 | |||
14 | #undef pr_fmt | ||
15 | #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt | ||
16 | |||
17 | #include <asm/kaiser.h> | ||
18 | #include <asm/tlbflush.h> /* to verify its kaiser declarations */ | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/pgalloc.h> | ||
21 | #include <asm/desc.h> | ||
22 | #include <asm/cmdline.h> | ||
23 | #include <asm/vsyscall.h> | ||
24 | |||
25 | int kaiser_enabled __read_mostly = 1; | ||
26 | EXPORT_SYMBOL(kaiser_enabled); /* for inlined TLB flush functions */ | ||
27 | |||
28 | __visible | ||
29 | DEFINE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup); | ||
30 | |||
31 | /* | ||
32 | * These can have bit 63 set, so we can not just use a plain "or" | ||
33 | * instruction to get their value or'd into CR3. It would take | ||
34 | * another register. So, we use a memory reference to these instead. | ||
35 | * | ||
36 | * This is also handy because systems that do not support PCIDs | ||
37 | * just end up or'ing a 0 into their CR3, which does no harm. | ||
38 | */ | ||
39 | DEFINE_PER_CPU(unsigned long, x86_cr3_pcid_user); | ||
40 | |||
41 | /* | ||
42 | * At runtime, the only things we map are some things for CPU | ||
43 | * hotplug, and stacks for new processes. No two CPUs will ever | ||
44 | * be populating the same addresses, so we only need to ensure | ||
45 | * that we protect between two CPUs trying to allocate and | ||
46 | * populate the same page table page. | ||
47 | * | ||
48 | * Only take this lock when doing a set_p[4um]d(), but it is not | ||
49 | * needed for doing a set_pte(). We assume that only the *owner* | ||
50 | * of a given allocation will be doing this for _their_ | ||
51 | * allocation. | ||
52 | * | ||
53 | * This ensures that once a system has been running for a while | ||
54 | * and there have been stacks all over and these page tables | ||
55 | * are fully populated, there will be no further acquisitions of | ||
56 | * this lock. | ||
57 | */ | ||
58 | static DEFINE_SPINLOCK(shadow_table_allocation_lock); | ||
59 | |||
60 | /* | ||
61 | * Returns -1 on error. | ||
62 | */ | ||
63 | static inline unsigned long get_pa_from_mapping(unsigned long vaddr) | ||
64 | { | ||
65 | pgd_t *pgd; | ||
66 | pud_t *pud; | ||
67 | pmd_t *pmd; | ||
68 | pte_t *pte; | ||
69 | |||
70 | pgd = pgd_offset_k(vaddr); | ||
71 | /* | ||
72 | * We made all the kernel PGDs present in kaiser_init(). | ||
73 | * We expect them to stay that way. | ||
74 | */ | ||
75 | BUG_ON(pgd_none(*pgd)); | ||
76 | /* | ||
77 | * PGDs are either 512GB or 128TB on all x86_64 | ||
78 | * configurations. We don't handle these. | ||
79 | */ | ||
80 | BUG_ON(pgd_large(*pgd)); | ||
81 | |||
82 | pud = pud_offset(pgd, vaddr); | ||
83 | if (pud_none(*pud)) { | ||
84 | WARN_ON_ONCE(1); | ||
85 | return -1; | ||
86 | } | ||
87 | |||
88 | if (pud_large(*pud)) | ||
89 | return (pud_pfn(*pud) << PAGE_SHIFT) | (vaddr & ~PUD_PAGE_MASK); | ||
90 | |||
91 | pmd = pmd_offset(pud, vaddr); | ||
92 | if (pmd_none(*pmd)) { | ||
93 | WARN_ON_ONCE(1); | ||
94 | return -1; | ||
95 | } | ||
96 | |||
97 | if (pmd_large(*pmd)) | ||
98 | return (pmd_pfn(*pmd) << PAGE_SHIFT) | (vaddr & ~PMD_PAGE_MASK); | ||
99 | |||
100 | pte = pte_offset_kernel(pmd, vaddr); | ||
101 | if (pte_none(*pte)) { | ||
102 | WARN_ON_ONCE(1); | ||
103 | return -1; | ||
104 | } | ||
105 | |||
106 | return (pte_pfn(*pte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * This is a relatively normal page table walk, except that it | ||
111 | * also tries to allocate page tables pages along the way. | ||
112 | * | ||
113 | * Returns a pointer to a PTE on success, or NULL on failure. | ||
114 | */ | ||
115 | static pte_t *kaiser_pagetable_walk(unsigned long address, bool user) | ||
116 | { | ||
117 | pmd_t *pmd; | ||
118 | pud_t *pud; | ||
119 | pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(address)); | ||
120 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); | ||
121 | unsigned long prot = _KERNPG_TABLE; | ||
122 | |||
123 | if (pgd_none(*pgd)) { | ||
124 | WARN_ONCE(1, "All shadow pgds should have been populated"); | ||
125 | return NULL; | ||
126 | } | ||
127 | BUILD_BUG_ON(pgd_large(*pgd) != 0); | ||
128 | |||
129 | if (user) { | ||
130 | /* | ||
131 | * The vsyscall page is the only page that will have | ||
132 | * _PAGE_USER set. Catch everything else. | ||
133 | */ | ||
134 | BUG_ON(address != VSYSCALL_ADDR); | ||
135 | |||
136 | set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); | ||
137 | prot = _PAGE_TABLE; | ||
138 | } | ||
139 | |||
140 | pud = pud_offset(pgd, address); | ||
141 | /* The shadow page tables do not use large mappings: */ | ||
142 | if (pud_large(*pud)) { | ||
143 | WARN_ON(1); | ||
144 | return NULL; | ||
145 | } | ||
146 | if (pud_none(*pud)) { | ||
147 | unsigned long new_pmd_page = __get_free_page(gfp); | ||
148 | if (!new_pmd_page) | ||
149 | return NULL; | ||
150 | spin_lock(&shadow_table_allocation_lock); | ||
151 | if (pud_none(*pud)) { | ||
152 | set_pud(pud, __pud(prot | __pa(new_pmd_page))); | ||
153 | __inc_zone_page_state(virt_to_page((void *) | ||
154 | new_pmd_page), NR_KAISERTABLE); | ||
155 | } else | ||
156 | free_page(new_pmd_page); | ||
157 | spin_unlock(&shadow_table_allocation_lock); | ||
158 | } | ||
159 | |||
160 | pmd = pmd_offset(pud, address); | ||
161 | /* The shadow page tables do not use large mappings: */ | ||
162 | if (pmd_large(*pmd)) { | ||
163 | WARN_ON(1); | ||
164 | return NULL; | ||
165 | } | ||
166 | if (pmd_none(*pmd)) { | ||
167 | unsigned long new_pte_page = __get_free_page(gfp); | ||
168 | if (!new_pte_page) | ||
169 | return NULL; | ||
170 | spin_lock(&shadow_table_allocation_lock); | ||
171 | if (pmd_none(*pmd)) { | ||
172 | set_pmd(pmd, __pmd(prot | __pa(new_pte_page))); | ||
173 | __inc_zone_page_state(virt_to_page((void *) | ||
174 | new_pte_page), NR_KAISERTABLE); | ||
175 | } else | ||
176 | free_page(new_pte_page); | ||
177 | spin_unlock(&shadow_table_allocation_lock); | ||
178 | } | ||
179 | |||
180 | return pte_offset_kernel(pmd, address); | ||
181 | } | ||
182 | |||
183 | static int kaiser_add_user_map(const void *__start_addr, unsigned long size, | ||
184 | unsigned long flags) | ||
185 | { | ||
186 | int ret = 0; | ||
187 | pte_t *pte; | ||
188 | unsigned long start_addr = (unsigned long )__start_addr; | ||
189 | unsigned long address = start_addr & PAGE_MASK; | ||
190 | unsigned long end_addr = PAGE_ALIGN(start_addr + size); | ||
191 | unsigned long target_address; | ||
192 | |||
193 | /* | ||
194 | * It is convenient for callers to pass in __PAGE_KERNEL etc, | ||
195 | * and there is no actual harm from setting _PAGE_GLOBAL, so | ||
196 | * long as CR4.PGE is not set. But it is nonetheless troubling | ||
197 | * to see Kaiser itself setting _PAGE_GLOBAL (now that "nokaiser" | ||
198 | * requires that not to be #defined to 0): so mask it off here. | ||
199 | */ | ||
200 | flags &= ~_PAGE_GLOBAL; | ||
201 | if (!(__supported_pte_mask & _PAGE_NX)) | ||
202 | flags &= ~_PAGE_NX; | ||
203 | |||
204 | for (; address < end_addr; address += PAGE_SIZE) { | ||
205 | target_address = get_pa_from_mapping(address); | ||
206 | if (target_address == -1) { | ||
207 | ret = -EIO; | ||
208 | break; | ||
209 | } | ||
210 | pte = kaiser_pagetable_walk(address, flags & _PAGE_USER); | ||
211 | if (!pte) { | ||
212 | ret = -ENOMEM; | ||
213 | break; | ||
214 | } | ||
215 | if (pte_none(*pte)) { | ||
216 | set_pte(pte, __pte(flags | target_address)); | ||
217 | } else { | ||
218 | pte_t tmp; | ||
219 | set_pte(&tmp, __pte(flags | target_address)); | ||
220 | WARN_ON_ONCE(!pte_same(*pte, tmp)); | ||
221 | } | ||
222 | } | ||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | static int kaiser_add_user_map_ptrs(const void *start, const void *end, unsigned long flags) | ||
227 | { | ||
228 | unsigned long size = end - start; | ||
229 | |||
230 | return kaiser_add_user_map(start, size, flags); | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Ensure that the top level of the (shadow) page tables are | ||
235 | * entirely populated. This ensures that all processes that get | ||
236 | * forked have the same entries. This way, we do not have to | ||
237 | * ever go set up new entries in older processes. | ||
238 | * | ||
239 | * Note: we never free these, so there are no updates to them | ||
240 | * after this. | ||
241 | */ | ||
242 | static void __init kaiser_init_all_pgds(void) | ||
243 | { | ||
244 | pgd_t *pgd; | ||
245 | int i = 0; | ||
246 | |||
247 | pgd = native_get_shadow_pgd(pgd_offset_k((unsigned long )0)); | ||
248 | for (i = PTRS_PER_PGD / 2; i < PTRS_PER_PGD; i++) { | ||
249 | pgd_t new_pgd; | ||
250 | pud_t *pud = pud_alloc_one(&init_mm, | ||
251 | PAGE_OFFSET + i * PGDIR_SIZE); | ||
252 | if (!pud) { | ||
253 | WARN_ON(1); | ||
254 | break; | ||
255 | } | ||
256 | inc_zone_page_state(virt_to_page(pud), NR_KAISERTABLE); | ||
257 | new_pgd = __pgd(_KERNPG_TABLE |__pa(pud)); | ||
258 | /* | ||
259 | * Make sure not to stomp on some other pgd entry. | ||
260 | */ | ||
261 | if (!pgd_none(pgd[i])) { | ||
262 | WARN_ON(1); | ||
263 | continue; | ||
264 | } | ||
265 | set_pgd(pgd + i, new_pgd); | ||
266 | } | ||
267 | } | ||
268 | |||
269 | #define kaiser_add_user_map_early(start, size, flags) do { \ | ||
270 | int __ret = kaiser_add_user_map(start, size, flags); \ | ||
271 | WARN_ON(__ret); \ | ||
272 | } while (0) | ||
273 | |||
274 | #define kaiser_add_user_map_ptrs_early(start, end, flags) do { \ | ||
275 | int __ret = kaiser_add_user_map_ptrs(start, end, flags); \ | ||
276 | WARN_ON(__ret); \ | ||
277 | } while (0) | ||
278 | |||
279 | void __init kaiser_check_boottime_disable(void) | ||
280 | { | ||
281 | bool enable = true; | ||
282 | char arg[5]; | ||
283 | int ret; | ||
284 | |||
285 | if (boot_cpu_has(X86_FEATURE_XENPV)) | ||
286 | goto silent_disable; | ||
287 | |||
288 | ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg)); | ||
289 | if (ret > 0) { | ||
290 | if (!strncmp(arg, "on", 2)) | ||
291 | goto enable; | ||
292 | |||
293 | if (!strncmp(arg, "off", 3)) | ||
294 | goto disable; | ||
295 | |||
296 | if (!strncmp(arg, "auto", 4)) | ||
297 | goto skip; | ||
298 | } | ||
299 | |||
300 | if (cmdline_find_option_bool(boot_command_line, "nopti")) | ||
301 | goto disable; | ||
302 | |||
303 | skip: | ||
304 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | ||
305 | goto disable; | ||
306 | |||
307 | enable: | ||
308 | if (enable) | ||
309 | setup_force_cpu_cap(X86_FEATURE_KAISER); | ||
310 | |||
311 | return; | ||
312 | |||
313 | disable: | ||
314 | pr_info("disabled\n"); | ||
315 | |||
316 | silent_disable: | ||
317 | kaiser_enabled = 0; | ||
318 | setup_clear_cpu_cap(X86_FEATURE_KAISER); | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * If anything in here fails, we will likely die on one of the | ||
323 | * first kernel->user transitions and init will die. But, we | ||
324 | * will have most of the kernel up by then and should be able to | ||
325 | * get a clean warning out of it. If we BUG_ON() here, we run | ||
326 | * the risk of being before we have good console output. | ||
327 | */ | ||
328 | void __init kaiser_init(void) | ||
329 | { | ||
330 | int cpu; | ||
331 | |||
332 | if (!kaiser_enabled) | ||
333 | return; | ||
334 | |||
335 | kaiser_init_all_pgds(); | ||
336 | |||
337 | /* | ||
338 | * Note that this sets _PAGE_USER and it needs to happen when the | ||
339 | * pagetable hierarchy gets created, i.e., early. Otherwise | ||
340 | * kaiser_pagetable_walk() will encounter initialized PTEs in the | ||
341 | * hierarchy and not set the proper permissions, leading to the | ||
342 | * pagefaults with page-protection violations when trying to read the | ||
343 | * vsyscall page. For example. | ||
344 | */ | ||
345 | if (vsyscall_enabled()) | ||
346 | kaiser_add_user_map_early((void *)VSYSCALL_ADDR, | ||
347 | PAGE_SIZE, | ||
348 | vsyscall_pgprot); | ||
349 | |||
350 | for_each_possible_cpu(cpu) { | ||
351 | void *percpu_vaddr = __per_cpu_user_mapped_start + | ||
352 | per_cpu_offset(cpu); | ||
353 | unsigned long percpu_sz = __per_cpu_user_mapped_end - | ||
354 | __per_cpu_user_mapped_start; | ||
355 | kaiser_add_user_map_early(percpu_vaddr, percpu_sz, | ||
356 | __PAGE_KERNEL); | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Map the entry/exit text section, which is needed at | ||
361 | * switches from user to and from kernel. | ||
362 | */ | ||
363 | kaiser_add_user_map_ptrs_early(__entry_text_start, __entry_text_end, | ||
364 | __PAGE_KERNEL_RX); | ||
365 | |||
366 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
367 | kaiser_add_user_map_ptrs_early(__irqentry_text_start, | ||
368 | __irqentry_text_end, | ||
369 | __PAGE_KERNEL_RX); | ||
370 | #endif | ||
371 | kaiser_add_user_map_early((void *)idt_descr.address, | ||
372 | sizeof(gate_desc) * NR_VECTORS, | ||
373 | __PAGE_KERNEL_RO); | ||
374 | #ifdef CONFIG_TRACING | ||
375 | kaiser_add_user_map_early(&trace_idt_descr, | ||
376 | sizeof(trace_idt_descr), | ||
377 | __PAGE_KERNEL); | ||
378 | kaiser_add_user_map_early(&trace_idt_table, | ||
379 | sizeof(gate_desc) * NR_VECTORS, | ||
380 | __PAGE_KERNEL); | ||
381 | #endif | ||
382 | kaiser_add_user_map_early(&debug_idt_descr, sizeof(debug_idt_descr), | ||
383 | __PAGE_KERNEL); | ||
384 | kaiser_add_user_map_early(&debug_idt_table, | ||
385 | sizeof(gate_desc) * NR_VECTORS, | ||
386 | __PAGE_KERNEL); | ||
387 | |||
388 | pr_info("enabled\n"); | ||
389 | } | ||
390 | |||
391 | /* Add a mapping to the shadow mapping, and synchronize the mappings */ | ||
392 | int kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags) | ||
393 | { | ||
394 | if (!kaiser_enabled) | ||
395 | return 0; | ||
396 | return kaiser_add_user_map((const void *)addr, size, flags); | ||
397 | } | ||
398 | |||
399 | void kaiser_remove_mapping(unsigned long start, unsigned long size) | ||
400 | { | ||
401 | extern void unmap_pud_range_nofree(pgd_t *pgd, | ||
402 | unsigned long start, unsigned long end); | ||
403 | unsigned long end = start + size; | ||
404 | unsigned long addr, next; | ||
405 | pgd_t *pgd; | ||
406 | |||
407 | if (!kaiser_enabled) | ||
408 | return; | ||
409 | pgd = native_get_shadow_pgd(pgd_offset_k(start)); | ||
410 | for (addr = start; addr < end; pgd++, addr = next) { | ||
411 | next = pgd_addr_end(addr, end); | ||
412 | unmap_pud_range_nofree(pgd, addr, next); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | /* | ||
417 | * Page table pages are page-aligned. The lower half of the top | ||
418 | * level is used for userspace and the top half for the kernel. | ||
419 | * This returns true for user pages that need to get copied into | ||
420 | * both the user and kernel copies of the page tables, and false | ||
421 | * for kernel pages that should only be in the kernel copy. | ||
422 | */ | ||
423 | static inline bool is_userspace_pgd(pgd_t *pgdp) | ||
424 | { | ||
425 | return ((unsigned long)pgdp % PAGE_SIZE) < (PAGE_SIZE / 2); | ||
426 | } | ||
427 | |||
428 | pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd) | ||
429 | { | ||
430 | if (!kaiser_enabled) | ||
431 | return pgd; | ||
432 | /* | ||
433 | * Do we need to also populate the shadow pgd? Check _PAGE_USER to | ||
434 | * skip cases like kexec and EFI which make temporary low mappings. | ||
435 | */ | ||
436 | if (pgd.pgd & _PAGE_USER) { | ||
437 | if (is_userspace_pgd(pgdp)) { | ||
438 | native_get_shadow_pgd(pgdp)->pgd = pgd.pgd; | ||
439 | /* | ||
440 | * Even if the entry is *mapping* userspace, ensure | ||
441 | * that userspace can not use it. This way, if we | ||
442 | * get out to userspace running on the kernel CR3, | ||
443 | * userspace will crash instead of running. | ||
444 | */ | ||
445 | if (__supported_pte_mask & _PAGE_NX) | ||
446 | pgd.pgd |= _PAGE_NX; | ||
447 | } | ||
448 | } else if (!pgd.pgd) { | ||
449 | /* | ||
450 | * pgd_clear() cannot check _PAGE_USER, and is even used to | ||
451 | * clear corrupted pgd entries: so just rely on cases like | ||
452 | * kexec and EFI never to be using pgd_clear(). | ||
453 | */ | ||
454 | if (!WARN_ON_ONCE((unsigned long)pgdp & PAGE_SIZE) && | ||
455 | is_userspace_pgd(pgdp)) | ||
456 | native_get_shadow_pgd(pgdp)->pgd = pgd.pgd; | ||
457 | } | ||
458 | return pgd; | ||
459 | } | ||
460 | |||
461 | void kaiser_setup_pcid(void) | ||
462 | { | ||
463 | unsigned long user_cr3 = KAISER_SHADOW_PGD_OFFSET; | ||
464 | |||
465 | if (this_cpu_has(X86_FEATURE_PCID)) | ||
466 | user_cr3 |= X86_CR3_PCID_USER_NOFLUSH; | ||
467 | /* | ||
468 | * These variables are used by the entry/exit | ||
469 | * code to change PCID and pgd and TLB flushing. | ||
470 | */ | ||
471 | this_cpu_write(x86_cr3_pcid_user, user_cr3); | ||
472 | } | ||
473 | |||
474 | /* | ||
475 | * Make a note that this cpu will need to flush USER tlb on return to user. | ||
476 | * If cpu does not have PCID, then the NOFLUSH bit will never have been set. | ||
477 | */ | ||
478 | void kaiser_flush_tlb_on_return_to_user(void) | ||
479 | { | ||
480 | if (this_cpu_has(X86_FEATURE_PCID)) | ||
481 | this_cpu_write(x86_cr3_pcid_user, | ||
482 | X86_CR3_PCID_USER_FLUSH | KAISER_SHADOW_PGD_OFFSET); | ||
483 | } | ||
484 | EXPORT_SYMBOL(kaiser_flush_tlb_on_return_to_user); | ||
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 4e5ac46adc9d..fdfa25c83119 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c | |||
@@ -121,11 +121,22 @@ void __init kasan_init(void) | |||
121 | kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), | 121 | kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), |
122 | (void *)KASAN_SHADOW_END); | 122 | (void *)KASAN_SHADOW_END); |
123 | 123 | ||
124 | memset(kasan_zero_page, 0, PAGE_SIZE); | ||
125 | |||
126 | load_cr3(init_level4_pgt); | 124 | load_cr3(init_level4_pgt); |
127 | __flush_tlb_all(); | 125 | __flush_tlb_all(); |
128 | init_task.kasan_depth = 0; | ||
129 | 126 | ||
127 | /* | ||
128 | * kasan_zero_page has been used as early shadow memory, thus it may | ||
129 | * contain some garbage. Now we can clear and write protect it, since | ||
130 | * after the TLB flush no one should write to it. | ||
131 | */ | ||
132 | memset(kasan_zero_page, 0, PAGE_SIZE); | ||
133 | for (i = 0; i < PTRS_PER_PTE; i++) { | ||
134 | pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO); | ||
135 | set_pte(&kasan_zero_pte[i], pte); | ||
136 | } | ||
137 | /* Flush TLBs again to be sure that write protection applied. */ | ||
138 | __flush_tlb_all(); | ||
139 | |||
140 | init_task.kasan_depth = 0; | ||
130 | pr_info("KernelAddressSanitizer initialized\n"); | 141 | pr_info("KernelAddressSanitizer initialized\n"); |
131 | } | 142 | } |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4540e8880cd9..ac9c7797b632 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -52,6 +52,7 @@ static DEFINE_SPINLOCK(cpa_lock); | |||
52 | #define CPA_FLUSHTLB 1 | 52 | #define CPA_FLUSHTLB 1 |
53 | #define CPA_ARRAY 2 | 53 | #define CPA_ARRAY 2 |
54 | #define CPA_PAGES_ARRAY 4 | 54 | #define CPA_PAGES_ARRAY 4 |
55 | #define CPA_FREE_PAGETABLES 8 | ||
55 | 56 | ||
56 | #ifdef CONFIG_PROC_FS | 57 | #ifdef CONFIG_PROC_FS |
57 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; | 58 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; |
@@ -723,10 +724,13 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte, | |||
723 | return 0; | 724 | return 0; |
724 | } | 725 | } |
725 | 726 | ||
726 | static bool try_to_free_pte_page(pte_t *pte) | 727 | static bool try_to_free_pte_page(struct cpa_data *cpa, pte_t *pte) |
727 | { | 728 | { |
728 | int i; | 729 | int i; |
729 | 730 | ||
731 | if (!(cpa->flags & CPA_FREE_PAGETABLES)) | ||
732 | return false; | ||
733 | |||
730 | for (i = 0; i < PTRS_PER_PTE; i++) | 734 | for (i = 0; i < PTRS_PER_PTE; i++) |
731 | if (!pte_none(pte[i])) | 735 | if (!pte_none(pte[i])) |
732 | return false; | 736 | return false; |
@@ -735,10 +739,13 @@ static bool try_to_free_pte_page(pte_t *pte) | |||
735 | return true; | 739 | return true; |
736 | } | 740 | } |
737 | 741 | ||
738 | static bool try_to_free_pmd_page(pmd_t *pmd) | 742 | static bool try_to_free_pmd_page(struct cpa_data *cpa, pmd_t *pmd) |
739 | { | 743 | { |
740 | int i; | 744 | int i; |
741 | 745 | ||
746 | if (!(cpa->flags & CPA_FREE_PAGETABLES)) | ||
747 | return false; | ||
748 | |||
742 | for (i = 0; i < PTRS_PER_PMD; i++) | 749 | for (i = 0; i < PTRS_PER_PMD; i++) |
743 | if (!pmd_none(pmd[i])) | 750 | if (!pmd_none(pmd[i])) |
744 | return false; | 751 | return false; |
@@ -759,7 +766,9 @@ static bool try_to_free_pud_page(pud_t *pud) | |||
759 | return true; | 766 | return true; |
760 | } | 767 | } |
761 | 768 | ||
762 | static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) | 769 | static bool unmap_pte_range(struct cpa_data *cpa, pmd_t *pmd, |
770 | unsigned long start, | ||
771 | unsigned long end) | ||
763 | { | 772 | { |
764 | pte_t *pte = pte_offset_kernel(pmd, start); | 773 | pte_t *pte = pte_offset_kernel(pmd, start); |
765 | 774 | ||
@@ -770,22 +779,23 @@ static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) | |||
770 | pte++; | 779 | pte++; |
771 | } | 780 | } |
772 | 781 | ||
773 | if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { | 782 | if (try_to_free_pte_page(cpa, (pte_t *)pmd_page_vaddr(*pmd))) { |
774 | pmd_clear(pmd); | 783 | pmd_clear(pmd); |
775 | return true; | 784 | return true; |
776 | } | 785 | } |
777 | return false; | 786 | return false; |
778 | } | 787 | } |
779 | 788 | ||
780 | static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, | 789 | static void __unmap_pmd_range(struct cpa_data *cpa, pud_t *pud, pmd_t *pmd, |
781 | unsigned long start, unsigned long end) | 790 | unsigned long start, unsigned long end) |
782 | { | 791 | { |
783 | if (unmap_pte_range(pmd, start, end)) | 792 | if (unmap_pte_range(cpa, pmd, start, end)) |
784 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) | 793 | if (try_to_free_pmd_page(cpa, (pmd_t *)pud_page_vaddr(*pud))) |
785 | pud_clear(pud); | 794 | pud_clear(pud); |
786 | } | 795 | } |
787 | 796 | ||
788 | static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) | 797 | static void unmap_pmd_range(struct cpa_data *cpa, pud_t *pud, |
798 | unsigned long start, unsigned long end) | ||
789 | { | 799 | { |
790 | pmd_t *pmd = pmd_offset(pud, start); | 800 | pmd_t *pmd = pmd_offset(pud, start); |
791 | 801 | ||
@@ -796,7 +806,7 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) | |||
796 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; | 806 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; |
797 | unsigned long pre_end = min_t(unsigned long, end, next_page); | 807 | unsigned long pre_end = min_t(unsigned long, end, next_page); |
798 | 808 | ||
799 | __unmap_pmd_range(pud, pmd, start, pre_end); | 809 | __unmap_pmd_range(cpa, pud, pmd, start, pre_end); |
800 | 810 | ||
801 | start = pre_end; | 811 | start = pre_end; |
802 | pmd++; | 812 | pmd++; |
@@ -809,7 +819,8 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) | |||
809 | if (pmd_large(*pmd)) | 819 | if (pmd_large(*pmd)) |
810 | pmd_clear(pmd); | 820 | pmd_clear(pmd); |
811 | else | 821 | else |
812 | __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); | 822 | __unmap_pmd_range(cpa, pud, pmd, |
823 | start, start + PMD_SIZE); | ||
813 | 824 | ||
814 | start += PMD_SIZE; | 825 | start += PMD_SIZE; |
815 | pmd++; | 826 | pmd++; |
@@ -819,17 +830,19 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) | |||
819 | * 4K leftovers? | 830 | * 4K leftovers? |
820 | */ | 831 | */ |
821 | if (start < end) | 832 | if (start < end) |
822 | return __unmap_pmd_range(pud, pmd, start, end); | 833 | return __unmap_pmd_range(cpa, pud, pmd, start, end); |
823 | 834 | ||
824 | /* | 835 | /* |
825 | * Try again to free the PMD page if haven't succeeded above. | 836 | * Try again to free the PMD page if haven't succeeded above. |
826 | */ | 837 | */ |
827 | if (!pud_none(*pud)) | 838 | if (!pud_none(*pud)) |
828 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) | 839 | if (try_to_free_pmd_page(cpa, (pmd_t *)pud_page_vaddr(*pud))) |
829 | pud_clear(pud); | 840 | pud_clear(pud); |
830 | } | 841 | } |
831 | 842 | ||
832 | static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) | 843 | static void __unmap_pud_range(struct cpa_data *cpa, pgd_t *pgd, |
844 | unsigned long start, | ||
845 | unsigned long end) | ||
833 | { | 846 | { |
834 | pud_t *pud = pud_offset(pgd, start); | 847 | pud_t *pud = pud_offset(pgd, start); |
835 | 848 | ||
@@ -840,7 +853,7 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) | |||
840 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; | 853 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; |
841 | unsigned long pre_end = min_t(unsigned long, end, next_page); | 854 | unsigned long pre_end = min_t(unsigned long, end, next_page); |
842 | 855 | ||
843 | unmap_pmd_range(pud, start, pre_end); | 856 | unmap_pmd_range(cpa, pud, start, pre_end); |
844 | 857 | ||
845 | start = pre_end; | 858 | start = pre_end; |
846 | pud++; | 859 | pud++; |
@@ -854,7 +867,7 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) | |||
854 | if (pud_large(*pud)) | 867 | if (pud_large(*pud)) |
855 | pud_clear(pud); | 868 | pud_clear(pud); |
856 | else | 869 | else |
857 | unmap_pmd_range(pud, start, start + PUD_SIZE); | 870 | unmap_pmd_range(cpa, pud, start, start + PUD_SIZE); |
858 | 871 | ||
859 | start += PUD_SIZE; | 872 | start += PUD_SIZE; |
860 | pud++; | 873 | pud++; |
@@ -864,7 +877,7 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) | |||
864 | * 2M leftovers? | 877 | * 2M leftovers? |
865 | */ | 878 | */ |
866 | if (start < end) | 879 | if (start < end) |
867 | unmap_pmd_range(pud, start, end); | 880 | unmap_pmd_range(cpa, pud, start, end); |
868 | 881 | ||
869 | /* | 882 | /* |
870 | * No need to try to free the PUD page because we'll free it in | 883 | * No need to try to free the PUD page because we'll free it in |
@@ -872,6 +885,24 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) | |||
872 | */ | 885 | */ |
873 | } | 886 | } |
874 | 887 | ||
888 | static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) | ||
889 | { | ||
890 | struct cpa_data cpa = { | ||
891 | .flags = CPA_FREE_PAGETABLES, | ||
892 | }; | ||
893 | |||
894 | __unmap_pud_range(&cpa, pgd, start, end); | ||
895 | } | ||
896 | |||
897 | void unmap_pud_range_nofree(pgd_t *pgd, unsigned long start, unsigned long end) | ||
898 | { | ||
899 | struct cpa_data cpa = { | ||
900 | .flags = 0, | ||
901 | }; | ||
902 | |||
903 | __unmap_pud_range(&cpa, pgd, start, end); | ||
904 | } | ||
905 | |||
875 | static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end) | 906 | static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end) |
876 | { | 907 | { |
877 | pgd_t *pgd_entry = root + pgd_index(addr); | 908 | pgd_t *pgd_entry = root + pgd_index(addr); |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 3f1bb4f93a5a..3146b1da6d72 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -750,11 +750,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) | |||
750 | return 1; | 750 | return 1; |
751 | 751 | ||
752 | while (cursor < to) { | 752 | while (cursor < to) { |
753 | if (!devmem_is_allowed(pfn)) { | 753 | if (!devmem_is_allowed(pfn)) |
754 | pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n", | ||
755 | current->comm, from, to - 1); | ||
756 | return 0; | 754 | return 0; |
757 | } | ||
758 | cursor += PAGE_SIZE; | 755 | cursor += PAGE_SIZE; |
759 | pfn++; | 756 | pfn++; |
760 | } | 757 | } |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index fb0a9dd1d6e4..dbc27a2b4ad5 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <asm/fixmap.h> | 6 | #include <asm/fixmap.h> |
7 | #include <asm/mtrr.h> | 7 | #include <asm/mtrr.h> |
8 | 8 | ||
9 | #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO | 9 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) |
10 | 10 | ||
11 | #ifdef CONFIG_HIGHPTE | 11 | #ifdef CONFIG_HIGHPTE |
12 | #define PGALLOC_USER_GFP __GFP_HIGHMEM | 12 | #define PGALLOC_USER_GFP __GFP_HIGHMEM |
@@ -340,14 +340,24 @@ static inline void _pgd_free(pgd_t *pgd) | |||
340 | kmem_cache_free(pgd_cache, pgd); | 340 | kmem_cache_free(pgd_cache, pgd); |
341 | } | 341 | } |
342 | #else | 342 | #else |
343 | |||
344 | /* | ||
345 | * Instead of one pgd, Kaiser acquires two pgds. Being order-1, it is | ||
346 | * both 8k in size and 8k-aligned. That lets us just flip bit 12 | ||
347 | * in a pointer to swap between the two 4k halves. | ||
348 | */ | ||
349 | #define PGD_ALLOCATION_ORDER kaiser_enabled | ||
350 | |||
343 | static inline pgd_t *_pgd_alloc(void) | 351 | static inline pgd_t *_pgd_alloc(void) |
344 | { | 352 | { |
345 | return (pgd_t *)__get_free_page(PGALLOC_GFP); | 353 | /* No __GFP_REPEAT: to avoid page allocation stalls in order-1 case */ |
354 | return (pgd_t *)__get_free_pages(PGALLOC_GFP & ~__GFP_REPEAT, | ||
355 | PGD_ALLOCATION_ORDER); | ||
346 | } | 356 | } |
347 | 357 | ||
348 | static inline void _pgd_free(pgd_t *pgd) | 358 | static inline void _pgd_free(pgd_t *pgd) |
349 | { | 359 | { |
350 | free_page((unsigned long)pgd); | 360 | free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); |
351 | } | 361 | } |
352 | #endif /* CONFIG_X86_PAE */ | 362 | #endif /* CONFIG_X86_PAE */ |
353 | 363 | ||
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 5a760fd66bec..7cad01af6dcd 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -6,16 +6,17 @@ | |||
6 | #include <linux/interrupt.h> | 6 | #include <linux/interrupt.h> |
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/cpu.h> | 8 | #include <linux/cpu.h> |
9 | #include <linux/debugfs.h> | ||
9 | 10 | ||
10 | #include <asm/tlbflush.h> | 11 | #include <asm/tlbflush.h> |
11 | #include <asm/mmu_context.h> | 12 | #include <asm/mmu_context.h> |
12 | #include <asm/cache.h> | 13 | #include <asm/cache.h> |
13 | #include <asm/apic.h> | 14 | #include <asm/apic.h> |
14 | #include <asm/uv/uv.h> | 15 | #include <asm/uv/uv.h> |
15 | #include <linux/debugfs.h> | 16 | #include <asm/kaiser.h> |
16 | 17 | ||
17 | /* | 18 | /* |
18 | * Smarter SMP flushing macros. | 19 | * TLB flushing, formerly SMP-only |
19 | * c/o Linus Torvalds. | 20 | * c/o Linus Torvalds. |
20 | * | 21 | * |
21 | * These mean you can really definitely utterly forget about | 22 | * These mean you can really definitely utterly forget about |
@@ -34,6 +35,36 @@ struct flush_tlb_info { | |||
34 | unsigned long flush_end; | 35 | unsigned long flush_end; |
35 | }; | 36 | }; |
36 | 37 | ||
38 | static void load_new_mm_cr3(pgd_t *pgdir) | ||
39 | { | ||
40 | unsigned long new_mm_cr3 = __pa(pgdir); | ||
41 | |||
42 | if (kaiser_enabled) { | ||
43 | /* | ||
44 | * We reuse the same PCID for different tasks, so we must | ||
45 | * flush all the entries for the PCID out when we change tasks. | ||
46 | * Flush KERN below, flush USER when returning to userspace in | ||
47 | * kaiser's SWITCH_USER_CR3 (_SWITCH_TO_USER_CR3) macro. | ||
48 | * | ||
49 | * invpcid_flush_single_context(X86_CR3_PCID_ASID_USER) could | ||
50 | * do it here, but can only be used if X86_FEATURE_INVPCID is | ||
51 | * available - and many machines support pcid without invpcid. | ||
52 | * | ||
53 | * If X86_CR3_PCID_KERN_FLUSH actually added something, then it | ||
54 | * would be needed in the write_cr3() below - if PCIDs enabled. | ||
55 | */ | ||
56 | BUILD_BUG_ON(X86_CR3_PCID_KERN_FLUSH); | ||
57 | kaiser_flush_tlb_on_return_to_user(); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Caution: many callers of this function expect | ||
62 | * that load_cr3() is serializing and orders TLB | ||
63 | * fills with respect to the mm_cpumask writes. | ||
64 | */ | ||
65 | write_cr3(new_mm_cr3); | ||
66 | } | ||
67 | |||
37 | /* | 68 | /* |
38 | * We cannot call mmdrop() because we are in interrupt context, | 69 | * We cannot call mmdrop() because we are in interrupt context, |
39 | * instead update mm->cpu_vm_mask. | 70 | * instead update mm->cpu_vm_mask. |
@@ -45,7 +76,7 @@ void leave_mm(int cpu) | |||
45 | BUG(); | 76 | BUG(); |
46 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { | 77 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { |
47 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); | 78 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); |
48 | load_cr3(swapper_pg_dir); | 79 | load_new_mm_cr3(swapper_pg_dir); |
49 | /* | 80 | /* |
50 | * This gets called in the idle path where RCU | 81 | * This gets called in the idle path where RCU |
51 | * functions differently. Tracing normally | 82 | * functions differently. Tracing normally |
@@ -57,6 +88,109 @@ void leave_mm(int cpu) | |||
57 | } | 88 | } |
58 | EXPORT_SYMBOL_GPL(leave_mm); | 89 | EXPORT_SYMBOL_GPL(leave_mm); |
59 | 90 | ||
91 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
92 | struct task_struct *tsk) | ||
93 | { | ||
94 | unsigned long flags; | ||
95 | |||
96 | local_irq_save(flags); | ||
97 | switch_mm_irqs_off(prev, next, tsk); | ||
98 | local_irq_restore(flags); | ||
99 | } | ||
100 | |||
101 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | ||
102 | struct task_struct *tsk) | ||
103 | { | ||
104 | unsigned cpu = smp_processor_id(); | ||
105 | |||
106 | if (likely(prev != next)) { | ||
107 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); | ||
108 | this_cpu_write(cpu_tlbstate.active_mm, next); | ||
109 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
110 | |||
111 | /* | ||
112 | * Re-load page tables. | ||
113 | * | ||
114 | * This logic has an ordering constraint: | ||
115 | * | ||
116 | * CPU 0: Write to a PTE for 'next' | ||
117 | * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI. | ||
118 | * CPU 1: set bit 1 in next's mm_cpumask | ||
119 | * CPU 1: load from the PTE that CPU 0 writes (implicit) | ||
120 | * | ||
121 | * We need to prevent an outcome in which CPU 1 observes | ||
122 | * the new PTE value and CPU 0 observes bit 1 clear in | ||
123 | * mm_cpumask. (If that occurs, then the IPI will never | ||
124 | * be sent, and CPU 0's TLB will contain a stale entry.) | ||
125 | * | ||
126 | * The bad outcome can occur if either CPU's load is | ||
127 | * reordered before that CPU's store, so both CPUs must | ||
128 | * execute full barriers to prevent this from happening. | ||
129 | * | ||
130 | * Thus, switch_mm needs a full barrier between the | ||
131 | * store to mm_cpumask and any operation that could load | ||
132 | * from next->pgd. TLB fills are special and can happen | ||
133 | * due to instruction fetches or for no reason at all, | ||
134 | * and neither LOCK nor MFENCE orders them. | ||
135 | * Fortunately, load_cr3() is serializing and gives the | ||
136 | * ordering guarantee we need. | ||
137 | * | ||
138 | */ | ||
139 | load_new_mm_cr3(next->pgd); | ||
140 | |||
141 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | ||
142 | |||
143 | /* Stop flush ipis for the previous mm */ | ||
144 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | ||
145 | |||
146 | /* Load per-mm CR4 state */ | ||
147 | load_mm_cr4(next); | ||
148 | |||
149 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | ||
150 | /* | ||
151 | * Load the LDT, if the LDT is different. | ||
152 | * | ||
153 | * It's possible that prev->context.ldt doesn't match | ||
154 | * the LDT register. This can happen if leave_mm(prev) | ||
155 | * was called and then modify_ldt changed | ||
156 | * prev->context.ldt but suppressed an IPI to this CPU. | ||
157 | * In this case, prev->context.ldt != NULL, because we | ||
158 | * never set context.ldt to NULL while the mm still | ||
159 | * exists. That means that next->context.ldt != | ||
160 | * prev->context.ldt, because mms never share an LDT. | ||
161 | */ | ||
162 | if (unlikely(prev->context.ldt != next->context.ldt)) | ||
163 | load_mm_ldt(next); | ||
164 | #endif | ||
165 | } else { | ||
166 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); | ||
167 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); | ||
168 | |||
169 | if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { | ||
170 | /* | ||
171 | * On established mms, the mm_cpumask is only changed | ||
172 | * from irq context, from ptep_clear_flush() while in | ||
173 | * lazy tlb mode, and here. Irqs are blocked during | ||
174 | * schedule, protecting us from simultaneous changes. | ||
175 | */ | ||
176 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
177 | |||
178 | /* | ||
179 | * We were in lazy tlb mode and leave_mm disabled | ||
180 | * tlb flush IPI delivery. We must reload CR3 | ||
181 | * to make sure to use no freed page tables. | ||
182 | * | ||
183 | * As above, load_cr3() is serializing and orders TLB | ||
184 | * fills with respect to the mm_cpumask write. | ||
185 | */ | ||
186 | load_new_mm_cr3(next->pgd); | ||
187 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | ||
188 | load_mm_cr4(next); | ||
189 | load_mm_ldt(next); | ||
190 | } | ||
191 | } | ||
192 | } | ||
193 | |||
60 | /* | 194 | /* |
61 | * The flush IPI assumes that a thread switch happens in this order: | 195 | * The flush IPI assumes that a thread switch happens in this order: |
62 | * [cpu0: the cpu that switches] | 196 | * [cpu0: the cpu that switches] |
@@ -104,7 +238,7 @@ static void flush_tlb_func(void *info) | |||
104 | 238 | ||
105 | inc_irq_stat(irq_tlb_count); | 239 | inc_irq_stat(irq_tlb_count); |
106 | 240 | ||
107 | if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) | 241 | if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) |
108 | return; | 242 | return; |
109 | 243 | ||
110 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); | 244 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
@@ -158,23 +292,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask, | |||
158 | smp_call_function_many(cpumask, flush_tlb_func, &info, 1); | 292 | smp_call_function_many(cpumask, flush_tlb_func, &info, 1); |
159 | } | 293 | } |
160 | 294 | ||
161 | void flush_tlb_current_task(void) | ||
162 | { | ||
163 | struct mm_struct *mm = current->mm; | ||
164 | |||
165 | preempt_disable(); | ||
166 | |||
167 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | ||
168 | |||
169 | /* This is an implicit full barrier that synchronizes with switch_mm. */ | ||
170 | local_flush_tlb(); | ||
171 | |||
172 | trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); | ||
173 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) | ||
174 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); | ||
175 | preempt_enable(); | ||
176 | } | ||
177 | |||
178 | /* | 295 | /* |
179 | * See Documentation/x86/tlb.txt for details. We choose 33 | 296 | * See Documentation/x86/tlb.txt for details. We choose 33 |
180 | * because it is large enough to cover the vast majority (at | 297 | * because it is large enough to cover the vast majority (at |
@@ -195,6 +312,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | |||
195 | unsigned long base_pages_to_flush = TLB_FLUSH_ALL; | 312 | unsigned long base_pages_to_flush = TLB_FLUSH_ALL; |
196 | 313 | ||
197 | preempt_disable(); | 314 | preempt_disable(); |
315 | |||
316 | if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) | ||
317 | base_pages_to_flush = (end - start) >> PAGE_SHIFT; | ||
318 | if (base_pages_to_flush > tlb_single_page_flush_ceiling) | ||
319 | base_pages_to_flush = TLB_FLUSH_ALL; | ||
320 | |||
198 | if (current->active_mm != mm) { | 321 | if (current->active_mm != mm) { |
199 | /* Synchronize with switch_mm. */ | 322 | /* Synchronize with switch_mm. */ |
200 | smp_mb(); | 323 | smp_mb(); |
@@ -211,15 +334,11 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | |||
211 | goto out; | 334 | goto out; |
212 | } | 335 | } |
213 | 336 | ||
214 | if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) | ||
215 | base_pages_to_flush = (end - start) >> PAGE_SHIFT; | ||
216 | |||
217 | /* | 337 | /* |
218 | * Both branches below are implicit full barriers (MOV to CR or | 338 | * Both branches below are implicit full barriers (MOV to CR or |
219 | * INVLPG) that synchronize with switch_mm. | 339 | * INVLPG) that synchronize with switch_mm. |
220 | */ | 340 | */ |
221 | if (base_pages_to_flush > tlb_single_page_flush_ceiling) { | 341 | if (base_pages_to_flush == TLB_FLUSH_ALL) { |
222 | base_pages_to_flush = TLB_FLUSH_ALL; | ||
223 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | 342 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
224 | local_flush_tlb(); | 343 | local_flush_tlb(); |
225 | } else { | 344 | } else { |
@@ -240,33 +359,6 @@ out: | |||
240 | preempt_enable(); | 359 | preempt_enable(); |
241 | } | 360 | } |
242 | 361 | ||
243 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) | ||
244 | { | ||
245 | struct mm_struct *mm = vma->vm_mm; | ||
246 | |||
247 | preempt_disable(); | ||
248 | |||
249 | if (current->active_mm == mm) { | ||
250 | if (current->mm) { | ||
251 | /* | ||
252 | * Implicit full barrier (INVLPG) that synchronizes | ||
253 | * with switch_mm. | ||
254 | */ | ||
255 | __flush_tlb_one(start); | ||
256 | } else { | ||
257 | leave_mm(smp_processor_id()); | ||
258 | |||
259 | /* Synchronize with switch_mm. */ | ||
260 | smp_mb(); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) | ||
265 | flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE); | ||
266 | |||
267 | preempt_enable(); | ||
268 | } | ||
269 | |||
270 | static void do_flush_tlb_all(void *info) | 362 | static void do_flush_tlb_all(void *info) |
271 | { | 363 | { |
272 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); | 364 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 75991979f667..33c42b826791 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -266,10 +266,10 @@ static void emit_bpf_tail_call(u8 **pprog) | |||
266 | /* if (index >= array->map.max_entries) | 266 | /* if (index >= array->map.max_entries) |
267 | * goto out; | 267 | * goto out; |
268 | */ | 268 | */ |
269 | EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ | 269 | EMIT2(0x89, 0xD2); /* mov edx, edx */ |
270 | EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ | ||
270 | offsetof(struct bpf_array, map.max_entries)); | 271 | offsetof(struct bpf_array, map.max_entries)); |
271 | EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ | 272 | #define OFFSET1 43 /* number of bytes to jump */ |
272 | #define OFFSET1 47 /* number of bytes to jump */ | ||
273 | EMIT2(X86_JBE, OFFSET1); /* jbe out */ | 273 | EMIT2(X86_JBE, OFFSET1); /* jbe out */ |
274 | label1 = cnt; | 274 | label1 = cnt; |
275 | 275 | ||
@@ -278,21 +278,20 @@ static void emit_bpf_tail_call(u8 **pprog) | |||
278 | */ | 278 | */ |
279 | EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ | 279 | EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ |
280 | EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ | 280 | EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ |
281 | #define OFFSET2 36 | 281 | #define OFFSET2 32 |
282 | EMIT2(X86_JA, OFFSET2); /* ja out */ | 282 | EMIT2(X86_JA, OFFSET2); /* ja out */ |
283 | label2 = cnt; | 283 | label2 = cnt; |
284 | EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ | 284 | EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ |
285 | EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ | 285 | EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ |
286 | 286 | ||
287 | /* prog = array->ptrs[index]; */ | 287 | /* prog = array->ptrs[index]; */ |
288 | EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */ | 288 | EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */ |
289 | offsetof(struct bpf_array, ptrs)); | 289 | offsetof(struct bpf_array, ptrs)); |
290 | EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ | ||
291 | 290 | ||
292 | /* if (prog == NULL) | 291 | /* if (prog == NULL) |
293 | * goto out; | 292 | * goto out; |
294 | */ | 293 | */ |
295 | EMIT4(0x48, 0x83, 0xF8, 0x00); /* cmp rax, 0 */ | 294 | EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ |
296 | #define OFFSET3 10 | 295 | #define OFFSET3 10 |
297 | EMIT2(X86_JE, OFFSET3); /* je out */ | 296 | EMIT2(X86_JE, OFFSET3); /* je out */ |
298 | label3 = cnt; | 297 | label3 = cnt; |
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 0b7a63d98440..805a3271a137 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <asm/cacheflush.h> | 4 | #include <asm/cacheflush.h> |
5 | #include <asm/pgtable.h> | 5 | #include <asm/pgtable.h> |
6 | #include <asm/realmode.h> | 6 | #include <asm/realmode.h> |
7 | #include <asm/kaiser.h> | ||
7 | 8 | ||
8 | struct real_mode_header *real_mode_header; | 9 | struct real_mode_header *real_mode_header; |
9 | u32 *trampoline_cr4_features; | 10 | u32 *trampoline_cr4_features; |
@@ -15,7 +16,8 @@ void __init reserve_real_mode(void) | |||
15 | size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); | 16 | size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); |
16 | 17 | ||
17 | /* Has to be under 1M so we can execute real-mode AP code. */ | 18 | /* Has to be under 1M so we can execute real-mode AP code. */ |
18 | mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); | 19 | mem = memblock_find_in_range(0, 1 << 20, size, |
20 | KAISER_KERNEL_PGD_ALIGNMENT); | ||
19 | if (!mem) | 21 | if (!mem) |
20 | panic("Cannot allocate trampoline\n"); | 22 | panic("Cannot allocate trampoline\n"); |
21 | 23 | ||
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index dac7b20d2f9d..781cca63f795 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/msr.h> | 30 | #include <asm/msr.h> |
31 | #include <asm/segment.h> | 31 | #include <asm/segment.h> |
32 | #include <asm/processor-flags.h> | 32 | #include <asm/processor-flags.h> |
33 | #include <asm/kaiser.h> | ||
33 | #include "realmode.h" | 34 | #include "realmode.h" |
34 | 35 | ||
35 | .text | 36 | .text |
@@ -139,7 +140,7 @@ tr_gdt: | |||
139 | tr_gdt_end: | 140 | tr_gdt_end: |
140 | 141 | ||
141 | .bss | 142 | .bss |
142 | .balign PAGE_SIZE | 143 | .balign KAISER_KERNEL_PGD_ALIGNMENT |
143 | GLOBAL(trampoline_pgd) .space PAGE_SIZE | 144 | GLOBAL(trampoline_pgd) .space PAGE_SIZE |
144 | 145 | ||
145 | .balign 8 | 146 | .balign 8 |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index ffa41591bff9..cbef64b508e1 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -433,6 +433,12 @@ static void __init xen_init_cpuid_mask(void) | |||
433 | ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */ | 433 | ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */ |
434 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ | 434 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ |
435 | 435 | ||
436 | /* | ||
437 | * Xen PV would need some work to support PCID: CR3 handling as well | ||
438 | * as xen_flush_tlb_others() would need updating. | ||
439 | */ | ||
440 | cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_PCID % 32)); /* disable PCID */ | ||
441 | |||
436 | if (!xen_initial_domain()) | 442 | if (!xen_initial_domain()) |
437 | cpuid_leaf1_edx_mask &= | 443 | cpuid_leaf1_edx_mask &= |
438 | ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */ | 444 | ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */ |
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h index b39531babec0..72bfc1cbc2b5 100644 --- a/arch/xtensa/include/asm/futex.h +++ b/arch/xtensa/include/asm/futex.h | |||
@@ -109,7 +109,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
109 | u32 oldval, u32 newval) | 109 | u32 oldval, u32 newval) |
110 | { | 110 | { |
111 | int ret = 0; | 111 | int ret = 0; |
112 | u32 prev; | ||
113 | 112 | ||
114 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 113 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
115 | return -EFAULT; | 114 | return -EFAULT; |
@@ -120,26 +119,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
120 | 119 | ||
121 | __asm__ __volatile__ ( | 120 | __asm__ __volatile__ ( |
122 | " # futex_atomic_cmpxchg_inatomic\n" | 121 | " # futex_atomic_cmpxchg_inatomic\n" |
123 | "1: l32i %1, %3, 0\n" | 122 | " wsr %5, scompare1\n" |
124 | " mov %0, %5\n" | 123 | "1: s32c1i %1, %4, 0\n" |
125 | " wsr %1, scompare1\n" | 124 | " s32i %1, %6, 0\n" |
126 | "2: s32c1i %0, %3, 0\n" | 125 | "2:\n" |
127 | "3:\n" | ||
128 | " .section .fixup,\"ax\"\n" | 126 | " .section .fixup,\"ax\"\n" |
129 | " .align 4\n" | 127 | " .align 4\n" |
130 | "4: .long 3b\n" | 128 | "3: .long 2b\n" |
131 | "5: l32r %1, 4b\n" | 129 | "4: l32r %1, 3b\n" |
132 | " movi %0, %6\n" | 130 | " movi %0, %7\n" |
133 | " jx %1\n" | 131 | " jx %1\n" |
134 | " .previous\n" | 132 | " .previous\n" |
135 | " .section __ex_table,\"a\"\n" | 133 | " .section __ex_table,\"a\"\n" |
136 | " .long 1b,5b,2b,5b\n" | 134 | " .long 1b,4b\n" |
137 | " .previous\n" | 135 | " .previous\n" |
138 | : "+r" (ret), "=&r" (prev), "+m" (*uaddr) | 136 | : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval) |
139 | : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) | 137 | : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT) |
140 | : "memory"); | 138 | : "memory"); |
141 | 139 | ||
142 | *uval = prev; | ||
143 | return ret; | 140 | return ret; |
144 | } | 141 | } |
145 | 142 | ||
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index f5e18c2a4852..ca50eeb13097 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent); | |||
149 | 149 | ||
150 | static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 150 | static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
151 | { | 151 | { |
152 | const u32 forbidden = CRYPTO_ALG_INTERNAL; | 152 | const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; |
153 | struct sock *sk = sock->sk; | 153 | struct sock *sk = sock->sk; |
154 | struct alg_sock *ask = alg_sk(sk); | 154 | struct alg_sock *ask = alg_sk(sk); |
155 | struct sockaddr_alg *sa = (void *)uaddr; | 155 | struct sockaddr_alg *sa = (void *)uaddr; |
@@ -157,6 +157,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
157 | void *private; | 157 | void *private; |
158 | int err; | 158 | int err; |
159 | 159 | ||
160 | /* If caller uses non-allowed flag, return error. */ | ||
161 | if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) | ||
162 | return -EINVAL; | ||
163 | |||
160 | if (sock->state == SS_CONNECTED) | 164 | if (sock->state == SS_CONNECTED) |
161 | return -EINVAL; | 165 | return -EINVAL; |
162 | 166 | ||
@@ -175,9 +179,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
175 | if (IS_ERR(type)) | 179 | if (IS_ERR(type)) |
176 | return PTR_ERR(type); | 180 | return PTR_ERR(type); |
177 | 181 | ||
178 | private = type->bind(sa->salg_name, | 182 | private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); |
179 | sa->salg_feat & ~forbidden, | ||
180 | sa->salg_mask & ~forbidden); | ||
181 | if (IS_ERR(private)) { | 183 | if (IS_ERR(private)) { |
182 | module_put(type->owner); | 184 | module_put(type->owner); |
183 | return PTR_ERR(private); | 185 | return PTR_ERR(private); |
diff --git a/crypto/ahash.c b/crypto/ahash.c index f9caf0f74199..7006dbfd39bd 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -637,5 +637,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) | |||
637 | } | 637 | } |
638 | EXPORT_SYMBOL_GPL(ahash_attr_alg); | 638 | EXPORT_SYMBOL_GPL(ahash_attr_alg); |
639 | 639 | ||
640 | bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) | ||
641 | { | ||
642 | struct crypto_alg *alg = &halg->base; | ||
643 | |||
644 | if (alg->cra_type != &crypto_ahash_type) | ||
645 | return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); | ||
646 | |||
647 | return __crypto_ahash_alg(alg)->setkey != NULL; | ||
648 | } | ||
649 | EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); | ||
650 | |||
640 | MODULE_LICENSE("GPL"); | 651 | MODULE_LICENSE("GPL"); |
641 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); | 652 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 43f5bdb6b570..eb58b73ca925 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -168,6 +168,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, | |||
168 | 168 | ||
169 | spawn->alg = NULL; | 169 | spawn->alg = NULL; |
170 | spawns = &inst->alg.cra_users; | 170 | spawns = &inst->alg.cra_users; |
171 | |||
172 | /* | ||
173 | * We may encounter an unregistered instance here, since | ||
174 | * an instance's spawns are set up prior to the instance | ||
175 | * being registered. An unregistered instance will have | ||
176 | * NULL ->cra_users.next, since ->cra_users isn't | ||
177 | * properly initialized until registration. But an | ||
178 | * unregistered instance cannot have any users, so treat | ||
179 | * it the same as ->cra_users being empty. | ||
180 | */ | ||
181 | if (spawns->next == NULL) | ||
182 | break; | ||
171 | } | 183 | } |
172 | } while ((spawns = crypto_more_spawns(alg, &stack, &top, | 184 | } while ((spawns = crypto_more_spawns(alg, &stack, &top, |
173 | &secondary_spawns))); | 185 | &secondary_spawns))); |
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 99c3cce01290..0214600ba071 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c | |||
@@ -600,6 +600,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, | |||
600 | CRYPTO_ALG_TYPE_AHASH_MASK); | 600 | CRYPTO_ALG_TYPE_AHASH_MASK); |
601 | if (IS_ERR(poly)) | 601 | if (IS_ERR(poly)) |
602 | return PTR_ERR(poly); | 602 | return PTR_ERR(poly); |
603 | poly_hash = __crypto_hash_alg_common(poly); | ||
604 | |||
605 | err = -EINVAL; | ||
606 | if (poly_hash->digestsize != POLY1305_DIGEST_SIZE) | ||
607 | goto out_put_poly; | ||
603 | 608 | ||
604 | err = -ENOMEM; | 609 | err = -ENOMEM; |
605 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 610 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
@@ -608,7 +613,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, | |||
608 | 613 | ||
609 | ctx = aead_instance_ctx(inst); | 614 | ctx = aead_instance_ctx(inst); |
610 | ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; | 615 | ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; |
611 | poly_hash = __crypto_hash_alg_common(poly); | ||
612 | err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, | 616 | err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, |
613 | aead_crypto_instance(inst)); | 617 | aead_crypto_instance(inst)); |
614 | if (err) | 618 | if (err) |
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 26a504db3f53..10a5a3eb675a 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -654,7 +654,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |||
654 | inst->alg.finup = cryptd_hash_finup_enqueue; | 654 | inst->alg.finup = cryptd_hash_finup_enqueue; |
655 | inst->alg.export = cryptd_hash_export; | 655 | inst->alg.export = cryptd_hash_export; |
656 | inst->alg.import = cryptd_hash_import; | 656 | inst->alg.import = cryptd_hash_import; |
657 | inst->alg.setkey = cryptd_hash_setkey; | 657 | if (crypto_shash_alg_has_setkey(salg)) |
658 | inst->alg.setkey = cryptd_hash_setkey; | ||
658 | inst->alg.digest = cryptd_hash_digest_enqueue; | 659 | inst->alg.digest = cryptd_hash_digest_enqueue; |
659 | 660 | ||
660 | err = ahash_register_instance(tmpl, inst); | 661 | err = ahash_register_instance(tmpl, inst); |
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index b4f3930266b1..f620fe09d20a 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c | |||
@@ -80,6 +80,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue, | |||
80 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); | 80 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); |
81 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | 81 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
82 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); | 82 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); |
83 | spin_lock_init(&cpu_queue->q_lock); | ||
83 | } | 84 | } |
84 | return 0; | 85 | return 0; |
85 | } | 86 | } |
@@ -103,15 +104,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue, | |||
103 | int cpu, err; | 104 | int cpu, err; |
104 | struct mcryptd_cpu_queue *cpu_queue; | 105 | struct mcryptd_cpu_queue *cpu_queue; |
105 | 106 | ||
106 | cpu = get_cpu(); | 107 | cpu_queue = raw_cpu_ptr(queue->cpu_queue); |
107 | cpu_queue = this_cpu_ptr(queue->cpu_queue); | 108 | spin_lock(&cpu_queue->q_lock); |
108 | rctx->tag.cpu = cpu; | 109 | cpu = smp_processor_id(); |
110 | rctx->tag.cpu = smp_processor_id(); | ||
109 | 111 | ||
110 | err = crypto_enqueue_request(&cpu_queue->queue, request); | 112 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
111 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", | 113 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", |
112 | cpu, cpu_queue, request); | 114 | cpu, cpu_queue, request); |
115 | spin_unlock(&cpu_queue->q_lock); | ||
113 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 116 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
114 | put_cpu(); | ||
115 | 117 | ||
116 | return err; | 118 | return err; |
117 | } | 119 | } |
@@ -164,16 +166,11 @@ static void mcryptd_queue_worker(struct work_struct *work) | |||
164 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); | 166 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); |
165 | i = 0; | 167 | i = 0; |
166 | while (i < MCRYPTD_BATCH || single_task_running()) { | 168 | while (i < MCRYPTD_BATCH || single_task_running()) { |
167 | /* | 169 | |
168 | * preempt_disable/enable is used to prevent | 170 | spin_lock_bh(&cpu_queue->q_lock); |
169 | * being preempted by mcryptd_enqueue_request() | ||
170 | */ | ||
171 | local_bh_disable(); | ||
172 | preempt_disable(); | ||
173 | backlog = crypto_get_backlog(&cpu_queue->queue); | 171 | backlog = crypto_get_backlog(&cpu_queue->queue); |
174 | req = crypto_dequeue_request(&cpu_queue->queue); | 172 | req = crypto_dequeue_request(&cpu_queue->queue); |
175 | preempt_enable(); | 173 | spin_unlock_bh(&cpu_queue->q_lock); |
176 | local_bh_enable(); | ||
177 | 174 | ||
178 | if (!req) { | 175 | if (!req) { |
179 | mcryptd_opportunistic_flush(); | 176 | mcryptd_opportunistic_flush(); |
@@ -188,7 +185,7 @@ static void mcryptd_queue_worker(struct work_struct *work) | |||
188 | ++i; | 185 | ++i; |
189 | } | 186 | } |
190 | if (cpu_queue->queue.qlen) | 187 | if (cpu_queue->queue.qlen) |
191 | queue_work(kcrypto_wq, &cpu_queue->work); | 188 | queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work); |
192 | } | 189 | } |
193 | 190 | ||
194 | void mcryptd_flusher(struct work_struct *__work) | 191 | void mcryptd_flusher(struct work_struct *__work) |
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index ee9cfb99fe25..f8ec3d4ba4a8 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c | |||
@@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) | |||
254 | crypto_free_aead(ctx->child); | 254 | crypto_free_aead(ctx->child); |
255 | } | 255 | } |
256 | 256 | ||
257 | static void pcrypt_free(struct aead_instance *inst) | ||
258 | { | ||
259 | struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst); | ||
260 | |||
261 | crypto_drop_aead(&ctx->spawn); | ||
262 | kfree(inst); | ||
263 | } | ||
264 | |||
257 | static int pcrypt_init_instance(struct crypto_instance *inst, | 265 | static int pcrypt_init_instance(struct crypto_instance *inst, |
258 | struct crypto_alg *alg) | 266 | struct crypto_alg *alg) |
259 | { | 267 | { |
@@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, | |||
319 | inst->alg.encrypt = pcrypt_aead_encrypt; | 327 | inst->alg.encrypt = pcrypt_aead_encrypt; |
320 | inst->alg.decrypt = pcrypt_aead_decrypt; | 328 | inst->alg.decrypt = pcrypt_aead_decrypt; |
321 | 329 | ||
330 | inst->free = pcrypt_free; | ||
331 | |||
322 | err = aead_register_instance(tmpl, inst); | 332 | err = aead_register_instance(tmpl, inst); |
323 | if (err) | 333 | if (err) |
324 | goto out_drop_aead; | 334 | goto out_drop_aead; |
@@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
349 | return -EINVAL; | 359 | return -EINVAL; |
350 | } | 360 | } |
351 | 361 | ||
352 | static void pcrypt_free(struct crypto_instance *inst) | ||
353 | { | ||
354 | struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
355 | |||
356 | crypto_drop_aead(&ctx->spawn); | ||
357 | kfree(inst); | ||
358 | } | ||
359 | |||
360 | static int pcrypt_cpumask_change_notify(struct notifier_block *self, | 362 | static int pcrypt_cpumask_change_notify(struct notifier_block *self, |
361 | unsigned long val, void *data) | 363 | unsigned long val, void *data) |
362 | { | 364 | { |
@@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) | |||
469 | static struct crypto_template pcrypt_tmpl = { | 471 | static struct crypto_template pcrypt_tmpl = { |
470 | .name = "pcrypt", | 472 | .name = "pcrypt", |
471 | .create = pcrypt_create, | 473 | .create = pcrypt_create, |
472 | .free = pcrypt_free, | ||
473 | .module = THIS_MODULE, | 474 | .module = THIS_MODULE, |
474 | }; | 475 | }; |
475 | 476 | ||
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index 2df9835dfbc0..bca99238948f 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c | |||
@@ -51,17 +51,6 @@ int crypto_poly1305_init(struct shash_desc *desc) | |||
51 | } | 51 | } |
52 | EXPORT_SYMBOL_GPL(crypto_poly1305_init); | 52 | EXPORT_SYMBOL_GPL(crypto_poly1305_init); |
53 | 53 | ||
54 | int crypto_poly1305_setkey(struct crypto_shash *tfm, | ||
55 | const u8 *key, unsigned int keylen) | ||
56 | { | ||
57 | /* Poly1305 requires a unique key for each tag, which implies that | ||
58 | * we can't set it on the tfm that gets accessed by multiple users | ||
59 | * simultaneously. Instead we expect the key as the first 32 bytes in | ||
60 | * the update() call. */ | ||
61 | return -ENOTSUPP; | ||
62 | } | ||
63 | EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); | ||
64 | |||
65 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) | 54 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) |
66 | { | 55 | { |
67 | /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ | 56 | /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ |
@@ -80,6 +69,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) | |||
80 | dctx->s[3] = le32_to_cpuvp(key + 12); | 69 | dctx->s[3] = le32_to_cpuvp(key + 12); |
81 | } | 70 | } |
82 | 71 | ||
72 | /* | ||
73 | * Poly1305 requires a unique key for each tag, which implies that we can't set | ||
74 | * it on the tfm that gets accessed by multiple users simultaneously. Instead we | ||
75 | * expect the key as the first 32 bytes in the update() call. | ||
76 | */ | ||
83 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, | 77 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, |
84 | const u8 *src, unsigned int srclen) | 78 | const u8 *src, unsigned int srclen) |
85 | { | 79 | { |
@@ -285,7 +279,6 @@ static struct shash_alg poly1305_alg = { | |||
285 | .init = crypto_poly1305_init, | 279 | .init = crypto_poly1305_init, |
286 | .update = crypto_poly1305_update, | 280 | .update = crypto_poly1305_update, |
287 | .final = crypto_poly1305_final, | 281 | .final = crypto_poly1305_final, |
288 | .setkey = crypto_poly1305_setkey, | ||
289 | .descsize = sizeof(struct poly1305_desc_ctx), | 282 | .descsize = sizeof(struct poly1305_desc_ctx), |
290 | .base = { | 283 | .base = { |
291 | .cra_name = "poly1305", | 284 | .cra_name = "poly1305", |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index f522828d45c9..1d92b5d2d6bd 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -291,11 +291,13 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], | |||
291 | } | 291 | } |
292 | 292 | ||
293 | sg_init_table(sg, np + 1); | 293 | sg_init_table(sg, np + 1); |
294 | np--; | 294 | if (rem) |
295 | np--; | ||
295 | for (k = 0; k < np; k++) | 296 | for (k = 0; k < np; k++) |
296 | sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); | 297 | sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); |
297 | 298 | ||
298 | sg_set_buf(&sg[k + 1], xbuf[k], rem); | 299 | if (rem) |
300 | sg_set_buf(&sg[k + 1], xbuf[k], rem); | ||
299 | } | 301 | } |
300 | 302 | ||
301 | static void test_aead_speed(const char *algo, int enc, unsigned int secs, | 303 | static void test_aead_speed(const char *algo, int enc, unsigned int secs, |
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 9f77943653fb..b63a173786d5 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
331 | pr->throttling.duty_width = acpi_gbl_FADT.duty_width; | 331 | pr->throttling.duty_width = acpi_gbl_FADT.duty_width; |
332 | 332 | ||
333 | pr->pblk = object.processor.pblk_address; | 333 | pr->pblk = object.processor.pblk_address; |
334 | |||
335 | /* | ||
336 | * We don't care about error returns - we just try to mark | ||
337 | * these reserved so that nobody else is confused into thinking | ||
338 | * that this region might be unused.. | ||
339 | * | ||
340 | * (In particular, allocating the IO range for Cardbus) | ||
341 | */ | ||
342 | request_region(pr->throttling.address, 6, "ACPI CPU throttle"); | ||
343 | } | 334 | } |
344 | 335 | ||
345 | /* | 336 | /* |
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index de325ae04ce1..3b3c5b90bd20 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c | |||
@@ -593,25 +593,20 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle) | |||
593 | void acpi_ns_terminate(void) | 593 | void acpi_ns_terminate(void) |
594 | { | 594 | { |
595 | acpi_status status; | 595 | acpi_status status; |
596 | union acpi_operand_object *prev; | ||
597 | union acpi_operand_object *next; | ||
596 | 598 | ||
597 | ACPI_FUNCTION_TRACE(ns_terminate); | 599 | ACPI_FUNCTION_TRACE(ns_terminate); |
598 | 600 | ||
599 | #ifdef ACPI_EXEC_APP | 601 | /* Delete any module-level code blocks */ |
600 | { | ||
601 | union acpi_operand_object *prev; | ||
602 | union acpi_operand_object *next; | ||
603 | 602 | ||
604 | /* Delete any module-level code blocks */ | 603 | next = acpi_gbl_module_code_list; |
605 | 604 | while (next) { | |
606 | next = acpi_gbl_module_code_list; | 605 | prev = next; |
607 | while (next) { | 606 | next = next->method.mutex; |
608 | prev = next; | 607 | prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */ |
609 | next = next->method.mutex; | 608 | acpi_ut_remove_reference(prev); |
610 | prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */ | ||
611 | acpi_ut_remove_reference(prev); | ||
612 | } | ||
613 | } | 609 | } |
614 | #endif | ||
615 | 610 | ||
616 | /* | 611 | /* |
617 | * Free the entire namespace -- all nodes and all objects | 612 | * Free the entire namespace -- all nodes and all objects |
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 6682c5daf742..4c9be45ea328 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c | |||
@@ -1020,7 +1020,7 @@ skip: | |||
1020 | /* The record may be cleared by others, try read next record */ | 1020 | /* The record may be cleared by others, try read next record */ |
1021 | if (len == -ENOENT) | 1021 | if (len == -ENOENT) |
1022 | goto skip; | 1022 | goto skip; |
1023 | else if (len < sizeof(*rcd)) { | 1023 | else if (len < 0 || len < sizeof(*rcd)) { |
1024 | rc = -EIO; | 1024 | rc = -EIO; |
1025 | goto out; | 1025 | goto out; |
1026 | } | 1026 | } |
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index b9afb47db7ed..1521d9a41d25 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c | |||
@@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, | |||
146 | int count; | 146 | int count; |
147 | struct acpi_hardware_id *id; | 147 | struct acpi_hardware_id *id; |
148 | 148 | ||
149 | /* Avoid unnecessarily loading modules for non present devices. */ | ||
150 | if (!acpi_device_is_present(acpi_dev)) | ||
151 | return 0; | ||
152 | |||
149 | /* | 153 | /* |
150 | * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should | 154 | * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should |
151 | * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the | 155 | * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 73c9c7fa9001..f06317d6fc38 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children) | |||
99 | return -ENODEV; | 99 | return -ENODEV; |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * If the device has a _HID (or _CID) returning a valid ACPI/PNP | 102 | * If the device has a _HID returning a valid ACPI/PNP device ID, it is |
103 | * device ID, it is better to make it look less attractive here, so that | 103 | * better to make it look less attractive here, so that the other device |
104 | * the other device with the same _ADR value (that may not have a valid | 104 | * with the same _ADR value (that may not have a valid device ID) can be |
105 | * device ID) can be matched going forward. [This means a second spec | 105 | * matched going forward. [This means a second spec violation in a row, |
106 | * violation in a row, so whatever we do here is best effort anyway.] | 106 | * so whatever we do here is best effort anyway.] |
107 | */ | 107 | */ |
108 | return sta_present && list_empty(&adev->pnp.ids) ? | 108 | return sta_present && !adev->pnp.type.platform_id ? |
109 | FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; | 109 | FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; |
110 | } | 110 | } |
111 | 111 | ||
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index f170d746336d..c72e64893d03 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) | |||
676 | if (!pr->flags.throttling) | 676 | if (!pr->flags.throttling) |
677 | return -ENODEV; | 677 | return -ENODEV; |
678 | 678 | ||
679 | /* | ||
680 | * We don't care about error returns - we just try to mark | ||
681 | * these reserved so that nobody else is confused into thinking | ||
682 | * that this region might be unused.. | ||
683 | * | ||
684 | * (In particular, allocating the IO range for Cardbus) | ||
685 | */ | ||
686 | request_region(pr->throttling.address, 6, "ACPI CPU throttle"); | ||
687 | |||
679 | pr->throttling.state = 0; | 688 | pr->throttling.state = 0; |
680 | 689 | ||
681 | duty_mask = pr->throttling.state_count - 1; | 690 | duty_mask = pr->throttling.state_count - 1; |
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 2fa8304171e0..7a3431018e0a 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c | |||
@@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device) | |||
275 | device->driver_data = hc; | 275 | device->driver_data = hc; |
276 | 276 | ||
277 | acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc); | 277 | acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc); |
278 | printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n", | 278 | dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n", |
279 | hc->ec, hc->offset, hc->query_bit); | 279 | hc->offset, hc->query_bit); |
280 | 280 | ||
281 | return 0; | 281 | return 0; |
282 | } | 282 | } |
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index fe842a38b65f..6945180caa70 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "binder_alloc.h" | 30 | #include "binder_alloc.h" |
31 | #include "binder_trace.h" | 31 | #include "binder_trace.h" |
32 | 32 | ||
33 | #define BINDER_MIN_ALLOC (1 * PAGE_SIZE) | ||
34 | |||
33 | static DEFINE_MUTEX(binder_alloc_mmap_lock); | 35 | static DEFINE_MUTEX(binder_alloc_mmap_lock); |
34 | 36 | ||
35 | enum { | 37 | enum { |
@@ -182,9 +184,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, | |||
182 | return buffer; | 184 | return buffer; |
183 | } | 185 | } |
184 | 186 | ||
185 | static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | 187 | static int __binder_update_page_range(struct binder_alloc *alloc, int allocate, |
186 | void *start, void *end, | 188 | void *start, void *end, |
187 | struct vm_area_struct *vma) | 189 | struct vm_area_struct *vma) |
188 | { | 190 | { |
189 | void *page_addr; | 191 | void *page_addr; |
190 | unsigned long user_page_addr; | 192 | unsigned long user_page_addr; |
@@ -284,6 +286,20 @@ err_no_vma: | |||
284 | return vma ? -ENOMEM : -ESRCH; | 286 | return vma ? -ENOMEM : -ESRCH; |
285 | } | 287 | } |
286 | 288 | ||
289 | static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | ||
290 | void *start, void *end, | ||
291 | struct vm_area_struct *vma) | ||
292 | { | ||
293 | /* | ||
294 | * For regular updates, move up start if needed since MIN_ALLOC pages | ||
295 | * are always mapped | ||
296 | */ | ||
297 | if (start - alloc->buffer < BINDER_MIN_ALLOC) | ||
298 | start = alloc->buffer + BINDER_MIN_ALLOC; | ||
299 | |||
300 | return __binder_update_page_range(alloc, allocate, start, end, vma); | ||
301 | } | ||
302 | |||
287 | struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, | 303 | struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, |
288 | size_t data_size, | 304 | size_t data_size, |
289 | size_t offsets_size, | 305 | size_t offsets_size, |
@@ -667,6 +683,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
667 | goto err_alloc_buf_struct_failed; | 683 | goto err_alloc_buf_struct_failed; |
668 | } | 684 | } |
669 | 685 | ||
686 | if (__binder_update_page_range(alloc, 1, alloc->buffer, | ||
687 | alloc->buffer + BINDER_MIN_ALLOC, vma)) { | ||
688 | ret = -ENOMEM; | ||
689 | failure_string = "alloc small buf"; | ||
690 | goto err_alloc_small_buf_failed; | ||
691 | } | ||
670 | buffer->data = alloc->buffer; | 692 | buffer->data = alloc->buffer; |
671 | list_add(&buffer->entry, &alloc->buffers); | 693 | list_add(&buffer->entry, &alloc->buffers); |
672 | buffer->free = 1; | 694 | buffer->free = 1; |
@@ -678,6 +700,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
678 | 700 | ||
679 | return 0; | 701 | return 0; |
680 | 702 | ||
703 | err_alloc_small_buf_failed: | ||
704 | kfree(buffer); | ||
681 | err_alloc_buf_struct_failed: | 705 | err_alloc_buf_struct_failed: |
682 | kfree(alloc->pages); | 706 | kfree(alloc->pages); |
683 | alloc->pages = NULL; | 707 | alloc->pages = NULL; |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 60a15831c009..8ddf5d5c94fd 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -260,9 +260,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
260 | { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ | 260 | { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ |
261 | { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ | 261 | { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ |
262 | { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ | 262 | { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ |
263 | { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ | 263 | { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH M AHCI */ |
264 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ | 264 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ |
265 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ | 265 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH M RAID */ |
266 | { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ | 266 | { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ |
267 | { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ | 267 | { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ |
268 | { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ | 268 | { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ |
@@ -285,9 +285,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
285 | { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ | 285 | { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ |
286 | { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ | 286 | { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ |
287 | { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ | 287 | { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ |
288 | { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ | 288 | { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT M AHCI */ |
289 | { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ | 289 | { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ |
290 | { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ | 290 | { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT M RAID */ |
291 | { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ | 291 | { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ |
292 | { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ | 292 | { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ |
293 | { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ | 293 | { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ |
@@ -296,20 +296,20 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
296 | { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ | 296 | { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ |
297 | { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ | 297 | { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ |
298 | { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ | 298 | { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ |
299 | { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */ | 299 | { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point M AHCI */ |
300 | { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ | 300 | { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ |
301 | { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ | 301 | { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ |
302 | { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ | 302 | { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ |
303 | { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */ | 303 | { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point M RAID */ |
304 | { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */ | 304 | { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */ |
305 | { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */ | 305 | { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */ |
306 | { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */ | 306 | { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point M AHCI */ |
307 | { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */ | 307 | { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */ |
308 | { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */ | 308 | { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point M RAID */ |
309 | { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */ | 309 | { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */ |
310 | { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ | 310 | { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point M RAID */ |
311 | { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ | 311 | { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ |
312 | { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ | 312 | { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point M RAID */ |
313 | { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ | 313 | { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ |
314 | { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ | 314 | { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ |
315 | { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ | 315 | { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ |
@@ -350,21 +350,21 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
350 | { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ | 350 | { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ |
351 | { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ | 351 | { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ |
352 | { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ | 352 | { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ |
353 | { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */ | 353 | { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series M AHCI */ |
354 | { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ | 354 | { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ |
355 | { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */ | 355 | { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series M RAID */ |
356 | { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ | 356 | { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ |
357 | { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ | 357 | { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series M RAID */ |
358 | { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ | 358 | { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ |
359 | { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ | 359 | { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series M RAID */ |
360 | { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ | 360 | { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ |
361 | { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ | 361 | { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ |
362 | { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ | 362 | { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ |
363 | { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */ | 363 | { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */ |
364 | { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ | 364 | { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H M AHCI */ |
365 | { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ | 365 | { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ |
366 | { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */ | 366 | { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */ |
367 | { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ | 367 | { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H M RAID */ |
368 | { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ | 368 | { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ |
369 | { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ | 369 | { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ |
370 | { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ | 370 | { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ |
@@ -382,6 +382,11 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
382 | { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ | 382 | { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ |
383 | { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ | 383 | { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ |
384 | { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ | 384 | { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ |
385 | { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */ | ||
386 | { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */ | ||
387 | { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */ | ||
388 | { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */ | ||
389 | { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */ | ||
385 | 390 | ||
386 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 391 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
387 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 392 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b0b77b61c40c..69ec1c5d7152 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4143,6 +4143,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4143 | * https://bugzilla.kernel.org/show_bug.cgi?id=121671 | 4143 | * https://bugzilla.kernel.org/show_bug.cgi?id=121671 |
4144 | */ | 4144 | */ |
4145 | { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, | 4145 | { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, |
4146 | { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 }, | ||
4146 | 4147 | ||
4147 | /* Devices we expect to fail diagnostics */ | 4148 | /* Devices we expect to fail diagnostics */ |
4148 | 4149 | ||
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 98504ec99c7d..59992788966c 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -223,6 +223,9 @@ config GENERIC_CPU_DEVICES | |||
223 | config GENERIC_CPU_AUTOPROBE | 223 | config GENERIC_CPU_AUTOPROBE |
224 | bool | 224 | bool |
225 | 225 | ||
226 | config GENERIC_CPU_VULNERABILITIES | ||
227 | bool | ||
228 | |||
226 | config SOC_BUS | 229 | config SOC_BUS |
227 | bool | 230 | bool |
228 | 231 | ||
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index e9fd32e91668..70e13cf06ed0 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c | |||
@@ -16,6 +16,7 @@ | |||
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | #include <linux/acpi.h> | ||
19 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
20 | #include <linux/cacheinfo.h> | 21 | #include <linux/cacheinfo.h> |
21 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
@@ -104,9 +105,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) | |||
104 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | 105 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
105 | struct cacheinfo *this_leaf, *sib_leaf; | 106 | struct cacheinfo *this_leaf, *sib_leaf; |
106 | unsigned int index; | 107 | unsigned int index; |
107 | int ret; | 108 | int ret = 0; |
109 | |||
110 | if (this_cpu_ci->cpu_map_populated) | ||
111 | return 0; | ||
108 | 112 | ||
109 | ret = cache_setup_of_node(cpu); | 113 | if (of_have_populated_dt()) |
114 | ret = cache_setup_of_node(cpu); | ||
115 | else if (!acpi_disabled) | ||
116 | /* No cache property/hierarchy support yet in ACPI */ | ||
117 | ret = -ENOTSUPP; | ||
110 | if (ret) | 118 | if (ret) |
111 | return ret; | 119 | return ret; |
112 | 120 | ||
@@ -203,8 +211,7 @@ static int detect_cache_attributes(unsigned int cpu) | |||
203 | */ | 211 | */ |
204 | ret = cache_shared_cpu_map_setup(cpu); | 212 | ret = cache_shared_cpu_map_setup(cpu); |
205 | if (ret) { | 213 | if (ret) { |
206 | pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n", | 214 | pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu); |
207 | cpu); | ||
208 | goto free_ci; | 215 | goto free_ci; |
209 | } | 216 | } |
210 | return 0; | 217 | return 0; |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 91bbb1959d8d..3db71afbba93 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -498,10 +498,58 @@ static void __init cpu_dev_register_generic(void) | |||
498 | #endif | 498 | #endif |
499 | } | 499 | } |
500 | 500 | ||
501 | #ifdef CONFIG_GENERIC_CPU_VULNERABILITIES | ||
502 | |||
503 | ssize_t __weak cpu_show_meltdown(struct device *dev, | ||
504 | struct device_attribute *attr, char *buf) | ||
505 | { | ||
506 | return sprintf(buf, "Not affected\n"); | ||
507 | } | ||
508 | |||
509 | ssize_t __weak cpu_show_spectre_v1(struct device *dev, | ||
510 | struct device_attribute *attr, char *buf) | ||
511 | { | ||
512 | return sprintf(buf, "Not affected\n"); | ||
513 | } | ||
514 | |||
515 | ssize_t __weak cpu_show_spectre_v2(struct device *dev, | ||
516 | struct device_attribute *attr, char *buf) | ||
517 | { | ||
518 | return sprintf(buf, "Not affected\n"); | ||
519 | } | ||
520 | |||
521 | static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); | ||
522 | static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); | ||
523 | static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); | ||
524 | |||
525 | static struct attribute *cpu_root_vulnerabilities_attrs[] = { | ||
526 | &dev_attr_meltdown.attr, | ||
527 | &dev_attr_spectre_v1.attr, | ||
528 | &dev_attr_spectre_v2.attr, | ||
529 | NULL | ||
530 | }; | ||
531 | |||
532 | static const struct attribute_group cpu_root_vulnerabilities_group = { | ||
533 | .name = "vulnerabilities", | ||
534 | .attrs = cpu_root_vulnerabilities_attrs, | ||
535 | }; | ||
536 | |||
537 | static void __init cpu_register_vulnerabilities(void) | ||
538 | { | ||
539 | if (sysfs_create_group(&cpu_subsys.dev_root->kobj, | ||
540 | &cpu_root_vulnerabilities_group)) | ||
541 | pr_err("Unable to register CPU vulnerabilities\n"); | ||
542 | } | ||
543 | |||
544 | #else | ||
545 | static inline void cpu_register_vulnerabilities(void) { } | ||
546 | #endif | ||
547 | |||
501 | void __init cpu_dev_init(void) | 548 | void __init cpu_dev_init(void) |
502 | { | 549 | { |
503 | if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) | 550 | if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) |
504 | panic("Failed to register CPU subsystem"); | 551 | panic("Failed to register CPU subsystem"); |
505 | 552 | ||
506 | cpu_dev_register_generic(); | 553 | cpu_dev_register_generic(); |
554 | cpu_register_vulnerabilities(); | ||
507 | } | 555 | } |
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index a311cfa4c5bd..a6975795e7f3 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c | |||
@@ -166,14 +166,14 @@ void generate_pm_trace(const void *tracedata, unsigned int user) | |||
166 | } | 166 | } |
167 | EXPORT_SYMBOL(generate_pm_trace); | 167 | EXPORT_SYMBOL(generate_pm_trace); |
168 | 168 | ||
169 | extern char __tracedata_start, __tracedata_end; | 169 | extern char __tracedata_start[], __tracedata_end[]; |
170 | static int show_file_hash(unsigned int value) | 170 | static int show_file_hash(unsigned int value) |
171 | { | 171 | { |
172 | int match; | 172 | int match; |
173 | char *tracedata; | 173 | char *tracedata; |
174 | 174 | ||
175 | match = 0; | 175 | match = 0; |
176 | for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ; | 176 | for (tracedata = __tracedata_start ; tracedata < __tracedata_end ; |
177 | tracedata += 2 + sizeof(unsigned long)) { | 177 | tracedata += 2 + sizeof(unsigned long)) { |
178 | unsigned short lineno = *(unsigned short *)tracedata; | 178 | unsigned short lineno = *(unsigned short *)tracedata; |
179 | const char *file = *(const char **)(tracedata + 2); | 179 | const char *file = *(const char **)(tracedata + 2); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index cec36d5c24f5..1c36de9719e5 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1569,9 +1569,8 @@ out: | |||
1569 | return err; | 1569 | return err; |
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | static void lo_release(struct gendisk *disk, fmode_t mode) | 1572 | static void __lo_release(struct loop_device *lo) |
1573 | { | 1573 | { |
1574 | struct loop_device *lo = disk->private_data; | ||
1575 | int err; | 1574 | int err; |
1576 | 1575 | ||
1577 | if (atomic_dec_return(&lo->lo_refcnt)) | 1576 | if (atomic_dec_return(&lo->lo_refcnt)) |
@@ -1597,6 +1596,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode) | |||
1597 | mutex_unlock(&lo->lo_ctl_mutex); | 1596 | mutex_unlock(&lo->lo_ctl_mutex); |
1598 | } | 1597 | } |
1599 | 1598 | ||
1599 | static void lo_release(struct gendisk *disk, fmode_t mode) | ||
1600 | { | ||
1601 | mutex_lock(&loop_index_mutex); | ||
1602 | __lo_release(disk->private_data); | ||
1603 | mutex_unlock(&loop_index_mutex); | ||
1604 | } | ||
1605 | |||
1600 | static const struct block_device_operations lo_fops = { | 1606 | static const struct block_device_operations lo_fops = { |
1601 | .owner = THIS_MODULE, | 1607 | .owner = THIS_MODULE, |
1602 | .open = lo_open, | 1608 | .open = lo_open, |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index d06c62eccdf0..156968a6655d 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -2779,7 +2779,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) | |||
2779 | pd->pkt_dev = MKDEV(pktdev_major, idx); | 2779 | pd->pkt_dev = MKDEV(pktdev_major, idx); |
2780 | ret = pkt_new_dev(pd, dev); | 2780 | ret = pkt_new_dev(pd, dev); |
2781 | if (ret) | 2781 | if (ret) |
2782 | goto out_new_dev; | 2782 | goto out_mem2; |
2783 | 2783 | ||
2784 | /* inherit events of the host device */ | 2784 | /* inherit events of the host device */ |
2785 | disk->events = pd->bdev->bd_disk->events; | 2785 | disk->events = pd->bdev->bd_disk->events; |
@@ -2797,8 +2797,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) | |||
2797 | mutex_unlock(&ctl_mutex); | 2797 | mutex_unlock(&ctl_mutex); |
2798 | return 0; | 2798 | return 0; |
2799 | 2799 | ||
2800 | out_new_dev: | ||
2801 | blk_cleanup_queue(disk->queue); | ||
2802 | out_mem2: | 2800 | out_mem2: |
2803 | put_disk(disk); | 2801 | put_disk(disk); |
2804 | out_mem: | 2802 | out_mem: |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ca3bcc81b623..e0699a20859f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -3767,7 +3767,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
3767 | segment_size = rbd_obj_bytes(&rbd_dev->header); | 3767 | segment_size = rbd_obj_bytes(&rbd_dev->header); |
3768 | blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); | 3768 | blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); |
3769 | q->limits.max_sectors = queue_max_hw_sectors(q); | 3769 | q->limits.max_sectors = queue_max_hw_sectors(q); |
3770 | blk_queue_max_segments(q, segment_size / SECTOR_SIZE); | 3770 | blk_queue_max_segments(q, USHRT_MAX); |
3771 | blk_queue_max_segment_size(q, segment_size); | 3771 | blk_queue_max_segment_size(q, segment_size); |
3772 | blk_queue_io_min(q, segment_size); | 3772 | blk_queue_io_min(q, segment_size); |
3773 | blk_queue_io_opt(q, segment_size); | 3773 | blk_queue_io_opt(q, segment_size); |
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 7b624423a7e8..89ccb604045c 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/errno.h> | 31 | #include <linux/errno.h> |
32 | #include <linux/skbuff.h> | 32 | #include <linux/skbuff.h> |
33 | 33 | ||
34 | #include <linux/mmc/host.h> | ||
34 | #include <linux/mmc/sdio_ids.h> | 35 | #include <linux/mmc/sdio_ids.h> |
35 | #include <linux/mmc/sdio_func.h> | 36 | #include <linux/mmc/sdio_func.h> |
36 | 37 | ||
@@ -291,6 +292,14 @@ static int btsdio_probe(struct sdio_func *func, | |||
291 | tuple = tuple->next; | 292 | tuple = tuple->next; |
292 | } | 293 | } |
293 | 294 | ||
295 | /* BCM43341 devices soldered onto the PCB (non-removable) use an | ||
296 | * uart connection for bluetooth, ignore the BT SDIO interface. | ||
297 | */ | ||
298 | if (func->vendor == SDIO_VENDOR_ID_BROADCOM && | ||
299 | func->device == SDIO_DEVICE_ID_BROADCOM_43341 && | ||
300 | !mmc_card_is_removable(func->card->host)) | ||
301 | return -ENODEV; | ||
302 | |||
294 | data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); | 303 | data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); |
295 | if (!data) | 304 | if (!data) |
296 | return -ENOMEM; | 305 | return -ENOMEM; |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index ce120fbe229e..54cef3dc0beb 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/usb.h> | 25 | #include <linux/usb.h> |
26 | #include <linux/usb/quirks.h> | ||
26 | #include <linux/firmware.h> | 27 | #include <linux/firmware.h> |
27 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
28 | 29 | ||
@@ -360,8 +361,8 @@ static const struct usb_device_id blacklist_table[] = { | |||
360 | #define BTUSB_FIRMWARE_LOADED 7 | 361 | #define BTUSB_FIRMWARE_LOADED 7 |
361 | #define BTUSB_FIRMWARE_FAILED 8 | 362 | #define BTUSB_FIRMWARE_FAILED 8 |
362 | #define BTUSB_BOOTING 9 | 363 | #define BTUSB_BOOTING 9 |
363 | #define BTUSB_RESET_RESUME 10 | 364 | #define BTUSB_DIAG_RUNNING 10 |
364 | #define BTUSB_DIAG_RUNNING 11 | 365 | #define BTUSB_OOB_WAKE_ENABLED 11 |
365 | 366 | ||
366 | struct btusb_data { | 367 | struct btusb_data { |
367 | struct hci_dev *hdev; | 368 | struct hci_dev *hdev; |
@@ -1050,10 +1051,6 @@ static int btusb_open(struct hci_dev *hdev) | |||
1050 | return err; | 1051 | return err; |
1051 | 1052 | ||
1052 | data->intf->needs_remote_wakeup = 1; | 1053 | data->intf->needs_remote_wakeup = 1; |
1053 | /* device specific wakeup source enabled and required for USB | ||
1054 | * remote wakeup while host is suspended | ||
1055 | */ | ||
1056 | device_wakeup_enable(&data->udev->dev); | ||
1057 | 1054 | ||
1058 | if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) | 1055 | if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) |
1059 | goto done; | 1056 | goto done; |
@@ -1117,7 +1114,6 @@ static int btusb_close(struct hci_dev *hdev) | |||
1117 | goto failed; | 1114 | goto failed; |
1118 | 1115 | ||
1119 | data->intf->needs_remote_wakeup = 0; | 1116 | data->intf->needs_remote_wakeup = 0; |
1120 | device_wakeup_disable(&data->udev->dev); | ||
1121 | usb_autopm_put_interface(data->intf); | 1117 | usb_autopm_put_interface(data->intf); |
1122 | 1118 | ||
1123 | failed: | 1119 | failed: |
@@ -2977,9 +2973,9 @@ static int btusb_probe(struct usb_interface *intf, | |||
2977 | 2973 | ||
2978 | /* QCA Rome devices lose their updated firmware over suspend, | 2974 | /* QCA Rome devices lose their updated firmware over suspend, |
2979 | * but the USB hub doesn't notice any status change. | 2975 | * but the USB hub doesn't notice any status change. |
2980 | * Explicitly request a device reset on resume. | 2976 | * explicitly request a device reset on resume. |
2981 | */ | 2977 | */ |
2982 | set_bit(BTUSB_RESET_RESUME, &data->flags); | 2978 | interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; |
2983 | } | 2979 | } |
2984 | 2980 | ||
2985 | #ifdef CONFIG_BT_HCIBTUSB_RTL | 2981 | #ifdef CONFIG_BT_HCIBTUSB_RTL |
@@ -2990,7 +2986,7 @@ static int btusb_probe(struct usb_interface *intf, | |||
2990 | * but the USB hub doesn't notice any status change. | 2986 | * but the USB hub doesn't notice any status change. |
2991 | * Explicitly request a device reset on resume. | 2987 | * Explicitly request a device reset on resume. |
2992 | */ | 2988 | */ |
2993 | set_bit(BTUSB_RESET_RESUME, &data->flags); | 2989 | interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; |
2994 | } | 2990 | } |
2995 | #endif | 2991 | #endif |
2996 | 2992 | ||
@@ -3147,14 +3143,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) | |||
3147 | btusb_stop_traffic(data); | 3143 | btusb_stop_traffic(data); |
3148 | usb_kill_anchored_urbs(&data->tx_anchor); | 3144 | usb_kill_anchored_urbs(&data->tx_anchor); |
3149 | 3145 | ||
3150 | /* Optionally request a device reset on resume, but only when | ||
3151 | * wakeups are disabled. If wakeups are enabled we assume the | ||
3152 | * device will stay powered up throughout suspend. | ||
3153 | */ | ||
3154 | if (test_bit(BTUSB_RESET_RESUME, &data->flags) && | ||
3155 | !device_may_wakeup(&data->udev->dev)) | ||
3156 | data->udev->reset_resume = 1; | ||
3157 | |||
3158 | return 0; | 3146 | return 0; |
3159 | } | 3147 | } |
3160 | 3148 | ||
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 25996e256110..0ffb247b42d6 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c | |||
@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = { | |||
178 | .match = sunxi_rsb_device_match, | 178 | .match = sunxi_rsb_device_match, |
179 | .probe = sunxi_rsb_device_probe, | 179 | .probe = sunxi_rsb_device_probe, |
180 | .remove = sunxi_rsb_device_remove, | 180 | .remove = sunxi_rsb_device_remove, |
181 | .uevent = of_device_uevent_modalias, | ||
181 | }; | 182 | }; |
182 | 183 | ||
183 | static void sunxi_rsb_dev_release(struct device *dev) | 184 | static void sunxi_rsb_dev_release(struct device *dev) |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index cf25020576fa..340f96e44642 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, | |||
238 | goto out; | 238 | goto out; |
239 | } | 239 | } |
240 | 240 | ||
241 | mutex_lock(&reading_mutex); | 241 | if (mutex_lock_interruptible(&reading_mutex)) { |
242 | err = -ERESTARTSYS; | ||
243 | goto out_put; | ||
244 | } | ||
242 | if (!data_avail) { | 245 | if (!data_avail) { |
243 | bytes_read = rng_get_data(rng, rng_buffer, | 246 | bytes_read = rng_get_data(rng, rng_buffer, |
244 | rng_buffer_size(), | 247 | rng_buffer_size(), |
@@ -288,6 +291,7 @@ out: | |||
288 | 291 | ||
289 | out_unlock_reading: | 292 | out_unlock_reading: |
290 | mutex_unlock(&reading_mutex); | 293 | mutex_unlock(&reading_mutex); |
294 | out_put: | ||
291 | put_rng(rng); | 295 | put_rng(rng); |
292 | goto out; | 296 | goto out; |
293 | } | 297 | } |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 2898d19fadf5..23f52a897283 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -70,12 +70,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) | |||
70 | u64 cursor = from; | 70 | u64 cursor = from; |
71 | 71 | ||
72 | while (cursor < to) { | 72 | while (cursor < to) { |
73 | if (!devmem_is_allowed(pfn)) { | 73 | if (!devmem_is_allowed(pfn)) |
74 | printk(KERN_INFO | ||
75 | "Program %s tried to access /dev/mem between %Lx->%Lx.\n", | ||
76 | current->comm, from, to); | ||
77 | return 0; | 74 | return 0; |
78 | } | ||
79 | cursor += PAGE_SIZE; | 75 | cursor += PAGE_SIZE; |
80 | pfn++; | 76 | pfn++; |
81 | } | 77 | } |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 8c4fae1bccf4..1f62bcd313ad 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -158,6 +158,14 @@ config ARM_ARCH_TIMER_EVTSTREAM | |||
158 | This must be disabled for hardware validation purposes to detect any | 158 | This must be disabled for hardware validation purposes to detect any |
159 | hardware anomalies of missing events. | 159 | hardware anomalies of missing events. |
160 | 160 | ||
161 | config ARM_ARCH_TIMER_VCT_ACCESS | ||
162 | bool "Support for ARM architected timer virtual counter access in userspace" | ||
163 | default !ARM64 | ||
164 | depends on ARM_ARCH_TIMER | ||
165 | help | ||
166 | This option enables support for reading the ARM architected timer's | ||
167 | virtual counter in userspace. | ||
168 | |||
161 | config ARM_GLOBAL_TIMER | 169 | config ARM_GLOBAL_TIMER |
162 | bool "Support for the ARM global timer unit" | 170 | bool "Support for the ARM global timer unit" |
163 | select CLKSRC_OF if OF | 171 | select CLKSRC_OF if OF |
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index c64d543d64bf..1c029b49f96d 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
@@ -333,7 +333,10 @@ static void arch_counter_set_user_access(void) | |||
333 | | ARCH_TIMER_USR_PCT_ACCESS_EN); | 333 | | ARCH_TIMER_USR_PCT_ACCESS_EN); |
334 | 334 | ||
335 | /* Enable user access to the virtual counter */ | 335 | /* Enable user access to the virtual counter */ |
336 | cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; | 336 | if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_VCT_ACCESS)) |
337 | cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; | ||
338 | else | ||
339 | cntkctl &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; | ||
337 | 340 | ||
338 | arch_timer_set_cntkctl(cntkctl); | 341 | arch_timer_set_cntkctl(cntkctl); |
339 | } | 342 | } |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 91b05e2b8799..f193f5309ce9 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -307,6 +307,7 @@ endif | |||
307 | if MIPS | 307 | if MIPS |
308 | config LOONGSON2_CPUFREQ | 308 | config LOONGSON2_CPUFREQ |
309 | tristate "Loongson2 CPUFreq Driver" | 309 | tristate "Loongson2 CPUFreq Driver" |
310 | depends on LEMOTE_MACH2F | ||
310 | help | 311 | help |
311 | This option adds a CPUFreq driver for loongson processors which | 312 | This option adds a CPUFreq driver for loongson processors which |
312 | support software configurable cpu frequency. | 313 | support software configurable cpu frequency. |
@@ -319,6 +320,7 @@ config LOONGSON2_CPUFREQ | |||
319 | 320 | ||
320 | config LOONGSON1_CPUFREQ | 321 | config LOONGSON1_CPUFREQ |
321 | tristate "Loongson1 CPUFreq Driver" | 322 | tristate "Loongson1 CPUFreq Driver" |
323 | depends on LOONGSON1_LS1B | ||
322 | help | 324 | help |
323 | This option adds a CPUFreq driver for loongson1 processors which | 325 | This option adds a CPUFreq driver for loongson1 processors which |
324 | support software configurable cpu frequency. | 326 | support software configurable cpu frequency. |
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 845bafcfa792..d5c5a476360f 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c | |||
@@ -160,6 +160,24 @@ static int powernv_cpuidle_driver_init(void) | |||
160 | drv->state_count += 1; | 160 | drv->state_count += 1; |
161 | } | 161 | } |
162 | 162 | ||
163 | /* | ||
164 | * On the PowerNV platform cpu_present may be less than cpu_possible in | ||
165 | * cases when firmware detects the CPU, but it is not available to the | ||
166 | * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at | ||
167 | * run time and hence cpu_devices are not created for those CPUs by the | ||
168 | * generic topology_init(). | ||
169 | * | ||
170 | * drv->cpumask defaults to cpu_possible_mask in | ||
171 | * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where | ||
172 | * cpu_devices are not created for CPUs in cpu_possible_mask that | ||
173 | * cannot be hot-added later at run time. | ||
174 | * | ||
175 | * Trying cpuidle_register_device() on a CPU without a cpu_device is | ||
176 | * incorrect, so pass a correct CPU mask to the generic cpuidle driver. | ||
177 | */ | ||
178 | |||
179 | drv->cpumask = (struct cpumask *)cpu_present_mask; | ||
180 | |||
163 | return 0; | 181 | return 0; |
164 | } | 182 | } |
165 | 183 | ||
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 151971627757..6b68416cf9df 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -189,6 +189,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
189 | return -EBUSY; | 189 | return -EBUSY; |
190 | } | 190 | } |
191 | target_state = &drv->states[index]; | 191 | target_state = &drv->states[index]; |
192 | broadcast = false; | ||
192 | } | 193 | } |
193 | 194 | ||
194 | /* Take note of the planned idle state. */ | 195 | /* Take note of the planned idle state. */ |
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 832a2c3f01ff..9e98a5fbbc1d 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
@@ -613,6 +613,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) | |||
613 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); | 613 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); |
614 | int error; | 614 | int error; |
615 | 615 | ||
616 | /* | ||
617 | * Return if cpu_device is not setup for this CPU. | ||
618 | * | ||
619 | * This could happen if the arch did not set up cpu_device | ||
620 | * since this CPU is not in cpu_present mask and the | ||
621 | * driver did not send a correct CPU mask during registration. | ||
622 | * Without this check we would end up passing bogus | ||
623 | * value for &cpu_dev->kobj in kobject_init_and_add() | ||
624 | */ | ||
625 | if (!cpu_dev) | ||
626 | return -ENODEV; | ||
627 | |||
616 | kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); | 628 | kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); |
617 | if (!kdev) | 629 | if (!kdev) |
618 | return -ENOMEM; | 630 | return -ENOMEM; |
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index bac0bdeb4b5f..b6529b9fcbe2 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h | |||
@@ -32,12 +32,12 @@ | |||
32 | #define PPC405EX_CE_RESET 0x00000008 | 32 | #define PPC405EX_CE_RESET 0x00000008 |
33 | 33 | ||
34 | #define CRYPTO4XX_CRYPTO_PRIORITY 300 | 34 | #define CRYPTO4XX_CRYPTO_PRIORITY 300 |
35 | #define PPC4XX_LAST_PD 63 | 35 | #define PPC4XX_NUM_PD 256 |
36 | #define PPC4XX_NUM_PD 64 | 36 | #define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1) |
37 | #define PPC4XX_LAST_GD 1023 | ||
38 | #define PPC4XX_NUM_GD 1024 | 37 | #define PPC4XX_NUM_GD 1024 |
39 | #define PPC4XX_LAST_SD 63 | 38 | #define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1) |
40 | #define PPC4XX_NUM_SD 64 | 39 | #define PPC4XX_NUM_SD 256 |
40 | #define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1) | ||
41 | #define PPC4XX_SD_BUFFER_SIZE 2048 | 41 | #define PPC4XX_SD_BUFFER_SIZE 2048 |
42 | 42 | ||
43 | #define PD_ENTRY_INUSE 1 | 43 | #define PD_ENTRY_INUSE 1 |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 53e61459c69f..ee87eb77095c 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -224,12 +224,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, | |||
224 | * without any error (HW optimizations for later | 224 | * without any error (HW optimizations for later |
225 | * CAAM eras), then try again. | 225 | * CAAM eras), then try again. |
226 | */ | 226 | */ |
227 | if (ret) | ||
228 | break; | ||
229 | |||
227 | rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; | 230 | rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; |
228 | if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || | 231 | if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || |
229 | !(rdsta_val & (1 << sh_idx))) | 232 | !(rdsta_val & (1 << sh_idx))) { |
230 | ret = -EAGAIN; | 233 | ret = -EAGAIN; |
231 | if (ret) | ||
232 | break; | 234 | break; |
235 | } | ||
236 | |||
233 | dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); | 237 | dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); |
234 | /* Clear the contents before recreating the descriptor */ | 238 | /* Clear the contents before recreating the descriptor */ |
235 | memset(desc, 0x00, CAAM_CMD_SZ * 7); | 239 | memset(desc, 0x00, CAAM_CMD_SZ * 7); |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 5450880abb7b..5a9083021fa0 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -1641,6 +1641,7 @@ static int queue_cache_init(void) | |||
1641 | CWQ_ENTRY_SIZE, 0, NULL); | 1641 | CWQ_ENTRY_SIZE, 0, NULL); |
1642 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { | 1642 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { |
1643 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | 1643 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); |
1644 | queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; | ||
1644 | return -ENOMEM; | 1645 | return -ENOMEM; |
1645 | } | 1646 | } |
1646 | return 0; | 1647 | return 0; |
@@ -1650,6 +1651,8 @@ static void queue_cache_destroy(void) | |||
1650 | { | 1651 | { |
1651 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | 1652 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); |
1652 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); | 1653 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); |
1654 | queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; | ||
1655 | queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL; | ||
1653 | } | 1656 | } |
1654 | 1657 | ||
1655 | static int spu_queue_register(struct spu_queue *p, unsigned long q_type) | 1658 | static int spu_queue_register(struct spu_queue *p, unsigned long q_type) |
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index ca848cc6a8fd..4f6fc1cfd7da 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c | |||
@@ -583,7 +583,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev, | |||
583 | devfreq = devfreq_add_device(dev, profile, governor_name, data); | 583 | devfreq = devfreq_add_device(dev, profile, governor_name, data); |
584 | if (IS_ERR(devfreq)) { | 584 | if (IS_ERR(devfreq)) { |
585 | devres_free(ptr); | 585 | devres_free(ptr); |
586 | return ERR_PTR(-ENOMEM); | 586 | return devfreq; |
587 | } | 587 | } |
588 | 588 | ||
589 | *ptr = devfreq; | 589 | *ptr = devfreq; |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 7254c20007f8..6796eb1a8a4c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -329,7 +329,7 @@ static void dmatest_callback(void *arg) | |||
329 | { | 329 | { |
330 | struct dmatest_done *done = arg; | 330 | struct dmatest_done *done = arg; |
331 | struct dmatest_thread *thread = | 331 | struct dmatest_thread *thread = |
332 | container_of(arg, struct dmatest_thread, done_wait); | 332 | container_of(done, struct dmatest_thread, test_done); |
333 | if (!thread->done) { | 333 | if (!thread->done) { |
334 | done->done = true; | 334 | done->done = true; |
335 | wake_up_all(done->wait); | 335 | wake_up_all(done->wait); |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 5f50f0934426..ea2aeee3ed00 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -854,11 +854,8 @@ static int edma_terminate_all(struct dma_chan *chan) | |||
854 | /* Move the cyclic channel back to default queue */ | 854 | /* Move the cyclic channel back to default queue */ |
855 | if (!echan->tc && echan->edesc->cyclic) | 855 | if (!echan->tc && echan->edesc->cyclic) |
856 | edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); | 856 | edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); |
857 | /* | 857 | |
858 | * free the running request descriptor | 858 | vchan_terminate_vdesc(&echan->edesc->vdesc); |
859 | * since it is not in any of the vdesc lists | ||
860 | */ | ||
861 | edma_desc_free(&echan->edesc->vdesc); | ||
862 | echan->edesc = NULL; | 859 | echan->edesc = NULL; |
863 | } | 860 | } |
864 | 861 | ||
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 744c8ae8434a..cf721899a826 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -1240,7 +1240,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan) | |||
1240 | * c->desc is NULL and exit.) | 1240 | * c->desc is NULL and exit.) |
1241 | */ | 1241 | */ |
1242 | if (c->desc) { | 1242 | if (c->desc) { |
1243 | omap_dma_desc_free(&c->desc->vd); | 1243 | vchan_terminate_vdesc(&c->desc->vd); |
1244 | c->desc = NULL; | 1244 | c->desc = NULL; |
1245 | /* Avoid stopping the dma twice */ | 1245 | /* Avoid stopping the dma twice */ |
1246 | if (!c->paused) | 1246 | if (!c->paused) |
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index a35c211857dd..f1f637cf098a 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c | |||
@@ -110,10 +110,7 @@ static void vchan_complete(unsigned long arg) | |||
110 | cb_data = vd->tx.callback_param; | 110 | cb_data = vd->tx.callback_param; |
111 | 111 | ||
112 | list_del(&vd->node); | 112 | list_del(&vd->node); |
113 | if (dmaengine_desc_test_reuse(&vd->tx)) | 113 | vchan_vdesc_fini(vd); |
114 | list_add(&vd->node, &vc->desc_allocated); | ||
115 | else | ||
116 | vc->desc_free(vd); | ||
117 | 114 | ||
118 | if (cb) | 115 | if (cb) |
119 | cb(cb_data); | 116 | cb(cb_data); |
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index d9731ca5e262..12eb10a41db3 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h | |||
@@ -35,6 +35,7 @@ struct virt_dma_chan { | |||
35 | struct list_head desc_completed; | 35 | struct list_head desc_completed; |
36 | 36 | ||
37 | struct virt_dma_desc *cyclic; | 37 | struct virt_dma_desc *cyclic; |
38 | struct virt_dma_desc *vd_terminated; | ||
38 | }; | 39 | }; |
39 | 40 | ||
40 | static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) | 41 | static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) |
@@ -104,6 +105,20 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd) | |||
104 | } | 105 | } |
105 | 106 | ||
106 | /** | 107 | /** |
108 | * vchan_vdesc_fini - Free or reuse a descriptor | ||
109 | * @vd: virtual descriptor to free/reuse | ||
110 | */ | ||
111 | static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) | ||
112 | { | ||
113 | struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); | ||
114 | |||
115 | if (dmaengine_desc_test_reuse(&vd->tx)) | ||
116 | list_add(&vd->node, &vc->desc_allocated); | ||
117 | else | ||
118 | vc->desc_free(vd); | ||
119 | } | ||
120 | |||
121 | /** | ||
107 | * vchan_cyclic_callback - report the completion of a period | 122 | * vchan_cyclic_callback - report the completion of a period |
108 | * @vd: virtual descriptor | 123 | * @vd: virtual descriptor |
109 | */ | 124 | */ |
@@ -116,6 +131,25 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) | |||
116 | } | 131 | } |
117 | 132 | ||
118 | /** | 133 | /** |
134 | * vchan_terminate_vdesc - Disable pending cyclic callback | ||
135 | * @vd: virtual descriptor to be terminated | ||
136 | * | ||
137 | * vc.lock must be held by caller | ||
138 | */ | ||
139 | static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) | ||
140 | { | ||
141 | struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); | ||
142 | |||
143 | /* free up stuck descriptor */ | ||
144 | if (vc->vd_terminated) | ||
145 | vchan_vdesc_fini(vc->vd_terminated); | ||
146 | |||
147 | vc->vd_terminated = vd; | ||
148 | if (vc->cyclic == vd) | ||
149 | vc->cyclic = NULL; | ||
150 | } | ||
151 | |||
152 | /** | ||
119 | * vchan_next_desc - peek at the next descriptor to be processed | 153 | * vchan_next_desc - peek at the next descriptor to be processed |
120 | * @vc: virtual channel to obtain descriptor from | 154 | * @vc: virtual channel to obtain descriptor from |
121 | * | 155 | * |
@@ -170,10 +204,20 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) | |||
170 | * Makes sure that all scheduled or active callbacks have finished running. For | 204 | * Makes sure that all scheduled or active callbacks have finished running. For |
171 | * proper operation the caller has to ensure that no new callbacks are scheduled | 205 | * proper operation the caller has to ensure that no new callbacks are scheduled |
172 | * after the invocation of this function started. | 206 | * after the invocation of this function started. |
207 | * Free up the terminated cyclic descriptor to prevent memory leakage. | ||
173 | */ | 208 | */ |
174 | static inline void vchan_synchronize(struct virt_dma_chan *vc) | 209 | static inline void vchan_synchronize(struct virt_dma_chan *vc) |
175 | { | 210 | { |
211 | unsigned long flags; | ||
212 | |||
176 | tasklet_kill(&vc->task); | 213 | tasklet_kill(&vc->task); |
214 | |||
215 | spin_lock_irqsave(&vc->lock, flags); | ||
216 | if (vc->vd_terminated) { | ||
217 | vchan_vdesc_fini(vc->vd_terminated); | ||
218 | vc->vd_terminated = NULL; | ||
219 | } | ||
220 | spin_unlock_irqrestore(&vc->lock, flags); | ||
177 | } | 221 | } |
178 | 222 | ||
179 | #endif | 223 | #endif |
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c index cda6dab5067a..6b65a102b49d 100644 --- a/drivers/edac/octeon_edac-lmc.c +++ b/drivers/edac/octeon_edac-lmc.c | |||
@@ -79,6 +79,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci) | |||
79 | if (!pvt->inject) | 79 | if (!pvt->inject) |
80 | int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx)); | 80 | int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx)); |
81 | else { | 81 | else { |
82 | int_reg.u64 = 0; | ||
82 | if (pvt->error_type == 1) | 83 | if (pvt->error_type == 1) |
83 | int_reg.s.sec_err = 1; | 84 | int_reg.s.sec_err = 1; |
84 | if (pvt->error_type == 2) | 85 | if (pvt->error_type == 2) |
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index 5eaea8b812cf..089a78983b39 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c | |||
@@ -203,3 +203,6 @@ static struct platform_driver ath79_gpio_driver = { | |||
203 | }; | 203 | }; |
204 | 204 | ||
205 | module_platform_driver(ath79_gpio_driver); | 205 | module_platform_driver(ath79_gpio_driver); |
206 | |||
207 | MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support"); | ||
208 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c index 2ed0237a8baf..304e68633d29 100644 --- a/drivers/gpio/gpio-iop.c +++ b/drivers/gpio/gpio-iop.c | |||
@@ -129,3 +129,7 @@ static int __init iop3xx_gpio_init(void) | |||
129 | return platform_driver_register(&iop3xx_gpio_driver); | 129 | return platform_driver_register(&iop3xx_gpio_driver); |
130 | } | 130 | } |
131 | arch_initcall(iop3xx_gpio_init); | 131 | arch_initcall(iop3xx_gpio_init); |
132 | |||
133 | MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors"); | ||
134 | MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); | ||
135 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 0e1376317683..b233cf8436b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | |||
@@ -367,29 +367,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) | |||
367 | { | 367 | { |
368 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | 368 | struct amdgpu_device *adev = get_amdgpu_device(kgd); |
369 | struct cik_sdma_rlc_registers *m; | 369 | struct cik_sdma_rlc_registers *m; |
370 | unsigned long end_jiffies; | ||
370 | uint32_t sdma_base_addr; | 371 | uint32_t sdma_base_addr; |
372 | uint32_t data; | ||
371 | 373 | ||
372 | m = get_sdma_mqd(mqd); | 374 | m = get_sdma_mqd(mqd); |
373 | sdma_base_addr = get_sdma_base_addr(m); | 375 | sdma_base_addr = get_sdma_base_addr(m); |
374 | 376 | ||
375 | WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, | 377 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
376 | m->sdma_rlc_virtual_addr); | 378 | m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); |
377 | 379 | ||
378 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, | 380 | end_jiffies = msecs_to_jiffies(2000) + jiffies; |
379 | m->sdma_rlc_rb_base); | 381 | while (true) { |
382 | data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); | ||
383 | if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) | ||
384 | break; | ||
385 | if (time_after(jiffies, end_jiffies)) | ||
386 | return -ETIME; | ||
387 | usleep_range(500, 1000); | ||
388 | } | ||
389 | if (m->sdma_engine_id) { | ||
390 | data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); | ||
391 | data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, | ||
392 | RESUME_CTX, 0); | ||
393 | WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); | ||
394 | } else { | ||
395 | data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); | ||
396 | data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, | ||
397 | RESUME_CTX, 0); | ||
398 | WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); | ||
399 | } | ||
380 | 400 | ||
401 | WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, | ||
402 | m->sdma_rlc_doorbell); | ||
403 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); | ||
404 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); | ||
405 | WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, | ||
406 | m->sdma_rlc_virtual_addr); | ||
407 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); | ||
381 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, | 408 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, |
382 | m->sdma_rlc_rb_base_hi); | 409 | m->sdma_rlc_rb_base_hi); |
383 | |||
384 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, | 410 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, |
385 | m->sdma_rlc_rb_rptr_addr_lo); | 411 | m->sdma_rlc_rb_rptr_addr_lo); |
386 | |||
387 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, | 412 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, |
388 | m->sdma_rlc_rb_rptr_addr_hi); | 413 | m->sdma_rlc_rb_rptr_addr_hi); |
389 | |||
390 | WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, | ||
391 | m->sdma_rlc_doorbell); | ||
392 | |||
393 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, | 414 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
394 | m->sdma_rlc_rb_cntl); | 415 | m->sdma_rlc_rb_cntl); |
395 | 416 | ||
@@ -492,9 +513,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, | |||
492 | } | 513 | } |
493 | 514 | ||
494 | WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); | 515 | WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); |
495 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); | 516 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
496 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); | 517 | RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | |
497 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); | 518 | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); |
498 | 519 | ||
499 | return 0; | 520 | return 0; |
500 | } | 521 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index d83de985e88c..8577a563600f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | |||
@@ -215,8 +215,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, | |||
215 | BUG_ON(!mm || !mqd || !q); | 215 | BUG_ON(!mm || !mqd || !q); |
216 | 216 | ||
217 | m = get_sdma_mqd(mqd); | 217 | m = get_sdma_mqd(mqd); |
218 | m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << | 218 | m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) |
219 | SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | | 219 | << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | |
220 | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | | 220 | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | |
221 | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | | 221 | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | |
222 | 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; | 222 | 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 7b69070f7ecc..aa41b840048f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | |||
@@ -205,6 +205,24 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
205 | 205 | ||
206 | switch (type) { | 206 | switch (type) { |
207 | case KFD_QUEUE_TYPE_SDMA: | 207 | case KFD_QUEUE_TYPE_SDMA: |
208 | if (dev->dqm->queue_count >= | ||
209 | CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) { | ||
210 | pr_err("Over-subscription is not allowed for SDMA.\n"); | ||
211 | retval = -EPERM; | ||
212 | goto err_create_queue; | ||
213 | } | ||
214 | |||
215 | retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); | ||
216 | if (retval != 0) | ||
217 | goto err_create_queue; | ||
218 | pqn->q = q; | ||
219 | pqn->kq = NULL; | ||
220 | retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, | ||
221 | &q->properties.vmid); | ||
222 | pr_debug("DQM returned %d for create_queue\n", retval); | ||
223 | print_queue(q); | ||
224 | break; | ||
225 | |||
208 | case KFD_QUEUE_TYPE_COMPUTE: | 226 | case KFD_QUEUE_TYPE_COMPUTE: |
209 | /* check if there is over subscription */ | 227 | /* check if there is over subscription */ |
210 | if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && | 228 | if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && |
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 0be137b5e958..d14c4fcfc9d5 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c | |||
@@ -294,11 +294,6 @@ static const struct { | |||
294 | }, | 294 | }, |
295 | }; | 295 | }; |
296 | 296 | ||
297 | struct color_conv_coef { | ||
298 | int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb; | ||
299 | int full_range; | ||
300 | }; | ||
301 | |||
302 | static unsigned long dispc_fclk_rate(void); | 297 | static unsigned long dispc_fclk_rate(void); |
303 | static unsigned long dispc_core_clk_rate(void); | 298 | static unsigned long dispc_core_clk_rate(void); |
304 | static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel); | 299 | static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel); |
@@ -741,9 +736,18 @@ static void dispc_ovl_set_scale_coef(enum omap_plane plane, int fir_hinc, | |||
741 | } | 736 | } |
742 | } | 737 | } |
743 | 738 | ||
739 | struct csc_coef_yuv2rgb { | ||
740 | int ry, rcb, rcr, gy, gcb, gcr, by, bcb, bcr; | ||
741 | bool full_range; | ||
742 | }; | ||
743 | |||
744 | struct csc_coef_rgb2yuv { | ||
745 | int yr, yg, yb, cbr, cbg, cbb, crr, crg, crb; | ||
746 | bool full_range; | ||
747 | }; | ||
744 | 748 | ||
745 | static void dispc_ovl_write_color_conv_coef(enum omap_plane plane, | 749 | static void dispc_ovl_write_color_conv_coef(enum omap_plane plane, |
746 | const struct color_conv_coef *ct) | 750 | const struct csc_coef_yuv2rgb *ct) |
747 | { | 751 | { |
748 | #define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0)) | 752 | #define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0)) |
749 | 753 | ||
@@ -753,7 +757,24 @@ static void dispc_ovl_write_color_conv_coef(enum omap_plane plane, | |||
753 | dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by)); | 757 | dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by)); |
754 | dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb)); | 758 | dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb)); |
755 | 759 | ||
756 | REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11); | 760 | REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), !!ct->full_range, 11, 11); |
761 | |||
762 | #undef CVAL | ||
763 | } | ||
764 | |||
765 | static void dispc_wb_write_color_conv_coef(const struct csc_coef_rgb2yuv *ct) | ||
766 | { | ||
767 | const enum omap_plane plane = OMAP_DSS_WB; | ||
768 | |||
769 | #define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0)) | ||
770 | |||
771 | dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->yg, ct->yr)); | ||
772 | dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->crr, ct->yb)); | ||
773 | dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->crb, ct->crg)); | ||
774 | dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->cbg, ct->cbr)); | ||
775 | dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->cbb)); | ||
776 | |||
777 | REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), !!ct->full_range, 11, 11); | ||
757 | 778 | ||
758 | #undef CVAL | 779 | #undef CVAL |
759 | } | 780 | } |
@@ -762,20 +783,28 @@ static void dispc_setup_color_conv_coef(void) | |||
762 | { | 783 | { |
763 | int i; | 784 | int i; |
764 | int num_ovl = dss_feat_get_num_ovls(); | 785 | int num_ovl = dss_feat_get_num_ovls(); |
765 | const struct color_conv_coef ctbl_bt601_5_ovl = { | 786 | |
766 | /* YUV -> RGB */ | 787 | /* YUV -> RGB, ITU-R BT.601, limited range */ |
767 | 298, 409, 0, 298, -208, -100, 298, 0, 517, 0, | 788 | const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = { |
789 | 298, 0, 409, /* ry, rcb, rcr */ | ||
790 | 298, -100, -208, /* gy, gcb, gcr */ | ||
791 | 298, 516, 0, /* by, bcb, bcr */ | ||
792 | false, /* limited range */ | ||
768 | }; | 793 | }; |
769 | const struct color_conv_coef ctbl_bt601_5_wb = { | 794 | |
770 | /* RGB -> YUV */ | 795 | /* RGB -> YUV, ITU-R BT.601, full range */ |
771 | 66, 129, 25, 112, -94, -18, -38, -74, 112, 0, | 796 | const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_full = { |
797 | 77, 150, 29, /* yr, yg, yb | 0.299 0.587 0.114|*/ | ||
798 | -43, -85, 128, /* cbr, cbg, cbb |-0.173 -0.339 0.511|*/ | ||
799 | 128, -107, -21, /* crr, crg, crb | 0.511 -0.428 -0.083|*/ | ||
800 | true, /* full range */ | ||
772 | }; | 801 | }; |
773 | 802 | ||
774 | for (i = 1; i < num_ovl; i++) | 803 | for (i = 1; i < num_ovl; i++) |
775 | dispc_ovl_write_color_conv_coef(i, &ctbl_bt601_5_ovl); | 804 | dispc_ovl_write_color_conv_coef(i, &coefs_yuv2rgb_bt601_lim); |
776 | 805 | ||
777 | if (dispc.feat->has_writeback) | 806 | if (dispc.feat->has_writeback) |
778 | dispc_ovl_write_color_conv_coef(OMAP_DSS_WB, &ctbl_bt601_5_wb); | 807 | dispc_wb_write_color_conv_coef(&coefs_rgb2yuv_bt601_full); |
779 | } | 808 | } |
780 | 809 | ||
781 | static void dispc_ovl_set_ba0(enum omap_plane plane, u32 paddr) | 810 | static void dispc_ovl_set_ba0(enum omap_plane plane, u32 paddr) |
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 652c5651d327..26d6ff06cd99 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | |||
@@ -794,7 +794,8 @@ static int omap_dmm_probe(struct platform_device *dev) | |||
794 | match = of_match_node(dmm_of_match, dev->dev.of_node); | 794 | match = of_match_node(dmm_of_match, dev->dev.of_node); |
795 | if (!match) { | 795 | if (!match) { |
796 | dev_err(&dev->dev, "failed to find matching device node\n"); | 796 | dev_err(&dev->dev, "failed to find matching device node\n"); |
797 | return -ENODEV; | 797 | ret = -ENODEV; |
798 | goto fail; | ||
798 | } | 799 | } |
799 | 800 | ||
800 | omap_dmm->plat_data = match->data; | 801 | omap_dmm->plat_data = match->data; |
diff --git a/drivers/gpu/drm/omapdrm/omap_wb.c b/drivers/gpu/drm/omapdrm/omap_wb.c index 73b76fca2387..8c82828dd52a 100644 --- a/drivers/gpu/drm/omapdrm/omap_wb.c +++ b/drivers/gpu/drm/omapdrm/omap_wb.c | |||
@@ -26,6 +26,11 @@ MODULE_PARM_DESC(wbdebug, "activates debug info"); | |||
26 | struct wb_fmt wb_formats[] = { | 26 | struct wb_fmt wb_formats[] = { |
27 | { | 27 | { |
28 | .fourcc = V4L2_PIX_FMT_NV12, | 28 | .fourcc = V4L2_PIX_FMT_NV12, |
29 | .coplanar = 0, | ||
30 | .depth = {8, 4}, | ||
31 | }, | ||
32 | { | ||
33 | .fourcc = V4L2_PIX_FMT_NV12M, | ||
29 | .coplanar = 1, | 34 | .coplanar = 1, |
30 | .depth = {8, 4}, | 35 | .depth = {8, 4}, |
31 | }, | 36 | }, |
@@ -64,6 +69,24 @@ struct wb_fmt *find_format(struct v4l2_format *f) | |||
64 | return NULL; | 69 | return NULL; |
65 | } | 70 | } |
66 | 71 | ||
72 | int omap_wb_fourcc_v4l2_to_drm(u32 fourcc) | ||
73 | { | ||
74 | switch (fourcc) { | ||
75 | case V4L2_PIX_FMT_NV12: | ||
76 | case V4L2_PIX_FMT_NV12M: | ||
77 | return DRM_FORMAT_NV12; | ||
78 | case V4L2_PIX_FMT_YUYV: | ||
79 | return DRM_FORMAT_YUYV; | ||
80 | case V4L2_PIX_FMT_UYVY: | ||
81 | return DRM_FORMAT_UYVY; | ||
82 | case V4L2_PIX_FMT_XBGR32: | ||
83 | return DRM_FORMAT_XRGB8888; | ||
84 | default: | ||
85 | BUG(); | ||
86 | return 0; | ||
87 | } | ||
88 | } | ||
89 | |||
67 | static void wb_irq(struct omap_drm_irq *irq, uint32_t irqstatus) | 90 | static void wb_irq(struct omap_drm_irq *irq, uint32_t irqstatus) |
68 | { | 91 | { |
69 | struct wb_dev *dev = container_of(irq, struct wb_dev, wb_irq); | 92 | struct wb_dev *dev = container_of(irq, struct wb_dev, wb_irq); |
diff --git a/drivers/gpu/drm/omapdrm/omap_wb.h b/drivers/gpu/drm/omapdrm/omap_wb.h index 08f722572fc2..c03001a3cfee 100644 --- a/drivers/gpu/drm/omapdrm/omap_wb.h +++ b/drivers/gpu/drm/omapdrm/omap_wb.h | |||
@@ -93,9 +93,6 @@ struct wb_fmt { | |||
93 | extern struct wb_fmt wb_formats[]; | 93 | extern struct wb_fmt wb_formats[]; |
94 | extern unsigned int num_wb_formats; | 94 | extern unsigned int num_wb_formats; |
95 | 95 | ||
96 | /* Return a specific unsigned byte from an unsigned int */ | ||
97 | #define GET_BYTE(c, b) ((c >> (b * 8)) & 0xff) | ||
98 | |||
99 | struct wb_buffer { | 96 | struct wb_buffer { |
100 | struct vb2_v4l2_buffer vb; | 97 | struct vb2_v4l2_buffer vb; |
101 | struct list_head list; | 98 | struct list_head list; |
@@ -222,6 +219,8 @@ static inline dma_addr_t vb2_dma_addr_plus_data_offset(struct vb2_buffer *vb, | |||
222 | vb->planes[plane_no].data_offset; | 219 | vb->planes[plane_no].data_offset; |
223 | } | 220 | } |
224 | 221 | ||
222 | int omap_wb_fourcc_v4l2_to_drm(u32 fourcc); | ||
223 | |||
225 | void wbm2m_irq(struct wbm2m_dev *dev, uint32_t irqstatus); | 224 | void wbm2m_irq(struct wbm2m_dev *dev, uint32_t irqstatus); |
226 | int wbm2m_init(struct wb_dev *dev); | 225 | int wbm2m_init(struct wb_dev *dev); |
227 | void wbm2m_cleanup(struct wb_dev *dev); | 226 | void wbm2m_cleanup(struct wb_dev *dev); |
diff --git a/drivers/gpu/drm/omapdrm/omap_wb_cap.c b/drivers/gpu/drm/omapdrm/omap_wb_cap.c index aa9217033e21..834138a72e53 100644 --- a/drivers/gpu/drm/omapdrm/omap_wb_cap.c +++ b/drivers/gpu/drm/omapdrm/omap_wb_cap.c | |||
@@ -196,16 +196,19 @@ static int wbcap_schedule_next_buffer(struct wbcap_dev *dev) | |||
196 | addr_y = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); | 196 | addr_y = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); |
197 | if (num_planes == 2) | 197 | if (num_planes == 2) |
198 | addr_uv = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 1); | 198 | addr_uv = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 1); |
199 | else if (pix->pixelformat == V4L2_PIX_FMT_NV12) | ||
200 | addr_uv = addr_y + (pix->plane_fmt[0].bytesperline * | ||
201 | pix->height); | ||
199 | 202 | ||
200 | /* fill WB DSS info */ | 203 | /* fill WB DSS info */ |
201 | wb_info.paddr = (u32)addr_y; | 204 | wb_info.paddr = (u32)addr_y; |
202 | wb_info.p_uv_addr = (u32)addr_uv; | 205 | wb_info.p_uv_addr = (u32)addr_uv; |
203 | wb_info.buf_width = pix->plane_fmt[0].bytesperline / | 206 | wb_info.buf_width = pix->plane_fmt[0].bytesperline / |
204 | (q_data->fmt->depth[0] / 8); | 207 | (q_data->fmt->depth[LUMA_PLANE] / 8); |
205 | 208 | ||
206 | wb_info.width = pix->width; | 209 | wb_info.width = pix->width; |
207 | wb_info.height = pix->height; | 210 | wb_info.height = pix->height; |
208 | wb_info.color_mode = pix->pixelformat; | 211 | wb_info.color_mode = omap_wb_fourcc_v4l2_to_drm(pix->pixelformat); |
209 | wb_info.pre_mult_alpha = 1; | 212 | wb_info.pre_mult_alpha = 1; |
210 | 213 | ||
211 | wb_info.rotation = BIT(DRM_ROTATE_0); | 214 | wb_info.rotation = BIT(DRM_ROTATE_0); |
@@ -536,7 +539,7 @@ static int wbcap_querycap(struct file *file, void *priv, | |||
536 | struct wbcap_dev *wbcap = video_drvdata(file); | 539 | struct wbcap_dev *wbcap = video_drvdata(file); |
537 | 540 | ||
538 | strlcpy(cap->driver, WBCAP_MODULE_NAME, sizeof(cap->driver)); | 541 | strlcpy(cap->driver, WBCAP_MODULE_NAME, sizeof(cap->driver)); |
539 | strlcpy(cap->card, "wbcap", sizeof(cap->card)); | 542 | strlcpy(cap->card, WBCAP_MODULE_NAME, sizeof(cap->card)); |
540 | snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", | 543 | snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", |
541 | wbcap->v4l2_dev.name); | 544 | wbcap->v4l2_dev.name); |
542 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_READWRITE | | 545 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_READWRITE | |
@@ -604,12 +607,22 @@ static int wbcap_fill_pix_format(struct wbcap_dev *wbcap, | |||
604 | depth = fmt->depth[i]; | 607 | depth = fmt->depth[i]; |
605 | 608 | ||
606 | if (i == LUMA_PLANE) | 609 | if (i == LUMA_PLANE) |
607 | plane_fmt->bytesperline = (pix->width * depth) >> 3; | 610 | plane_fmt->bytesperline = pix->width * depth / 8; |
608 | else | 611 | else |
609 | plane_fmt->bytesperline = pix->width; | 612 | plane_fmt->bytesperline = pix->width; |
610 | 613 | ||
611 | plane_fmt->sizeimage = | 614 | plane_fmt->sizeimage = (pix->height * pix->width * |
612 | (pix->height * pix->width * depth) >> 3; | 615 | depth) / 8; |
616 | |||
617 | if (fmt->fourcc == V4L2_PIX_FMT_NV12) { | ||
618 | /* | ||
619 | * Since we are using a single plane buffer | ||
620 | * we need to adjust the reported sizeimage | ||
621 | * to include the colocated UV part. | ||
622 | */ | ||
623 | plane_fmt->sizeimage += (pix->height / 2 * | ||
624 | plane_fmt->bytesperline); | ||
625 | } | ||
613 | 626 | ||
614 | memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved)); | 627 | memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved)); |
615 | } | 628 | } |
@@ -625,11 +638,8 @@ static int wbcap_try_fmt_vid_cap(struct file *file, void *priv, | |||
625 | struct drm_crtc *crtc; | 638 | struct drm_crtc *crtc; |
626 | struct omap_video_timings *ct; | 639 | struct omap_video_timings *ct; |
627 | 640 | ||
628 | log_dbg(wbcap, "requested fourcc:%c%c%c%c size: %dx%d\n", | 641 | log_dbg(wbcap, "requested fourcc:%4.4s size: %dx%d\n", |
629 | GET_BYTE(f->fmt.pix_mp.pixelformat, 0), | 642 | (char *)&f->fmt.pix_mp.pixelformat, |
630 | GET_BYTE(f->fmt.pix_mp.pixelformat, 1), | ||
631 | GET_BYTE(f->fmt.pix_mp.pixelformat, 2), | ||
632 | GET_BYTE(f->fmt.pix_mp.pixelformat, 3), | ||
633 | f->fmt.pix_mp.width, f->fmt.pix_mp.height); | 643 | f->fmt.pix_mp.width, f->fmt.pix_mp.height); |
634 | 644 | ||
635 | /* | 645 | /* |
@@ -643,11 +653,8 @@ static int wbcap_try_fmt_vid_cap(struct file *file, void *priv, | |||
643 | f->fmt.pix.width = ct->x_res; | 653 | f->fmt.pix.width = ct->x_res; |
644 | f->fmt.pix.height = ct->y_res; | 654 | f->fmt.pix.height = ct->y_res; |
645 | 655 | ||
646 | log_dbg(wbcap, "replied fourcc:%c%c%c%c size: %dx%d\n", | 656 | log_dbg(wbcap, "replied fourcc:%4.4s size: %dx%d\n", |
647 | GET_BYTE(f->fmt.pix_mp.pixelformat, 0), | 657 | (char *)&f->fmt.pix_mp.pixelformat, |
648 | GET_BYTE(f->fmt.pix_mp.pixelformat, 1), | ||
649 | GET_BYTE(f->fmt.pix_mp.pixelformat, 2), | ||
650 | GET_BYTE(f->fmt.pix_mp.pixelformat, 3), | ||
651 | f->fmt.pix_mp.width, f->fmt.pix_mp.height); | 658 | f->fmt.pix_mp.width, f->fmt.pix_mp.height); |
652 | 659 | ||
653 | return wbcap_fill_pix_format(wbcap, f); | 660 | return wbcap_fill_pix_format(wbcap, f); |
@@ -678,12 +685,9 @@ static int wbcap_s_fmt_vid_cap(struct file *file, void *priv, | |||
678 | q_data->format = *f; | 685 | q_data->format = *f; |
679 | q_data->fmt = find_format(f); | 686 | q_data->fmt = find_format(f); |
680 | 687 | ||
681 | log_dbg(wbcap, "Setting format for type %d, %dx%d, fmt: %c%c%c%c bpl_y %d", | 688 | log_dbg(wbcap, "Setting format for type %d, %dx%d, fmt: %4.4s bpl_y %d", |
682 | f->type, f->fmt.pix_mp.width, f->fmt.pix_mp.height, | 689 | f->type, f->fmt.pix_mp.width, f->fmt.pix_mp.height, |
683 | GET_BYTE(f->fmt.pix_mp.pixelformat, 0), | 690 | (char *)&f->fmt.pix_mp.pixelformat, |
684 | GET_BYTE(f->fmt.pix_mp.pixelformat, 1), | ||
685 | GET_BYTE(f->fmt.pix_mp.pixelformat, 2), | ||
686 | GET_BYTE(f->fmt.pix_mp.pixelformat, 3), | ||
687 | f->fmt.pix_mp.plane_fmt[LUMA_PLANE].bytesperline); | 691 | f->fmt.pix_mp.plane_fmt[LUMA_PLANE].bytesperline); |
688 | if (f->fmt.pix_mp.num_planes == 2) | 692 | if (f->fmt.pix_mp.num_planes == 2) |
689 | log_dbg(wbcap, " bpl_uv %d\n", | 693 | log_dbg(wbcap, " bpl_uv %d\n", |
@@ -712,6 +716,7 @@ static int wbcap_enum_fmt_vid_cap(struct file *file, void *priv, | |||
712 | if (f->index >= num_wb_formats) | 716 | if (f->index >= num_wb_formats) |
713 | return -EINVAL; | 717 | return -EINVAL; |
714 | 718 | ||
719 | f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | ||
715 | f->pixelformat = wb_formats[f->index].fourcc; | 720 | f->pixelformat = wb_formats[f->index].fourcc; |
716 | return 0; | 721 | return 0; |
717 | } | 722 | } |
diff --git a/drivers/gpu/drm/omapdrm/omap_wb_m2m.c b/drivers/gpu/drm/omapdrm/omap_wb_m2m.c index 4a3291f699e6..90063f9cb0f7 100644 --- a/drivers/gpu/drm/omapdrm/omap_wb_m2m.c +++ b/drivers/gpu/drm/omapdrm/omap_wb_m2m.c | |||
@@ -169,6 +169,9 @@ static void device_run(void *priv) | |||
169 | src_dma_addr[0] = vb2_dma_addr_plus_data_offset(s_vb, 0); | 169 | src_dma_addr[0] = vb2_dma_addr_plus_data_offset(s_vb, 0); |
170 | if (spix->num_planes == 2) | 170 | if (spix->num_planes == 2) |
171 | src_dma_addr[1] = vb2_dma_addr_plus_data_offset(s_vb, 1); | 171 | src_dma_addr[1] = vb2_dma_addr_plus_data_offset(s_vb, 1); |
172 | else if (spix->pixelformat == V4L2_PIX_FMT_NV12) | ||
173 | src_dma_addr[1] = src_dma_addr[0] + | ||
174 | (spix->plane_fmt[0].bytesperline * spix->height); | ||
172 | if (!src_dma_addr[0]) { | 175 | if (!src_dma_addr[0]) { |
173 | log_err(dev, | 176 | log_err(dev, |
174 | "acquiring source buffer(%d) dma_addr failed\n", | 177 | "acquiring source buffer(%d) dma_addr failed\n", |
@@ -180,6 +183,9 @@ static void device_run(void *priv) | |||
180 | dst_dma_addr[0] = vb2_dma_addr_plus_data_offset(d_vb, 0); | 183 | dst_dma_addr[0] = vb2_dma_addr_plus_data_offset(d_vb, 0); |
181 | if (dpix->num_planes == 2) | 184 | if (dpix->num_planes == 2) |
182 | dst_dma_addr[1] = vb2_dma_addr_plus_data_offset(d_vb, 1); | 185 | dst_dma_addr[1] = vb2_dma_addr_plus_data_offset(d_vb, 1); |
186 | else if (dpix->pixelformat == V4L2_PIX_FMT_NV12) | ||
187 | dst_dma_addr[1] = dst_dma_addr[0] + | ||
188 | (dpix->plane_fmt[0].bytesperline * dpix->height); | ||
183 | if (!dst_dma_addr[0]) { | 189 | if (!dst_dma_addr[0]) { |
184 | log_err(dev, | 190 | log_err(dev, |
185 | "acquiring destination buffer(%d) dma_addr failed\n", | 191 | "acquiring destination buffer(%d) dma_addr failed\n", |
@@ -202,7 +208,7 @@ static void device_run(void *priv) | |||
202 | src_info.out_width = spix->width; | 208 | src_info.out_width = spix->width; |
203 | src_info.out_height = spix->height; | 209 | src_info.out_height = spix->height; |
204 | 210 | ||
205 | src_info.color_mode = spix->pixelformat; | 211 | src_info.color_mode = omap_wb_fourcc_v4l2_to_drm(spix->pixelformat); |
206 | src_info.global_alpha = 0xff; | 212 | src_info.global_alpha = 0xff; |
207 | 213 | ||
208 | src_info.rotation = BIT(DRM_ROTATE_0); | 214 | src_info.rotation = BIT(DRM_ROTATE_0); |
@@ -221,7 +227,7 @@ static void device_run(void *priv) | |||
221 | 227 | ||
222 | wb_info.width = dpix->width; | 228 | wb_info.width = dpix->width; |
223 | wb_info.height = dpix->height; | 229 | wb_info.height = dpix->height; |
224 | wb_info.color_mode = dpix->pixelformat; | 230 | wb_info.color_mode = omap_wb_fourcc_v4l2_to_drm(dpix->pixelformat); |
225 | wb_info.pre_mult_alpha = 1; | 231 | wb_info.pre_mult_alpha = 1; |
226 | 232 | ||
227 | wb_info.rotation = DRM_ROTATE_0; | 233 | wb_info.rotation = DRM_ROTATE_0; |
@@ -236,19 +242,13 @@ static void device_run(void *priv) | |||
236 | log_err(dev, | 242 | log_err(dev, |
237 | "Conversion setup failed, check source and destination parameters\n" | 243 | "Conversion setup failed, check source and destination parameters\n" |
238 | ); | 244 | ); |
239 | log_err(dev, "\tSRC: %dx%d, fmt: %c%c%c%c sw %d\n", | 245 | log_err(dev, "\tSRC: %dx%d, fmt: %4.4s sw %d\n", |
240 | src_info.width, src_info.height, | 246 | src_info.width, src_info.height, |
241 | GET_BYTE(spix->pixelformat, 0), | 247 | (char *)&spix->pixelformat, |
242 | GET_BYTE(spix->pixelformat, 1), | ||
243 | GET_BYTE(spix->pixelformat, 2), | ||
244 | GET_BYTE(spix->pixelformat, 3), | ||
245 | src_info.screen_width); | 248 | src_info.screen_width); |
246 | log_err(dev, "\tDST: %dx%d, fmt: %c%c%c%c sw %d\n", | 249 | log_err(dev, "\tDST: %dx%d, fmt: %4.4s sw %d\n", |
247 | wb_info.width, wb_info.height, | 250 | wb_info.width, wb_info.height, |
248 | GET_BYTE(dpix->pixelformat, 0), | 251 | (char *)&dpix->pixelformat, |
249 | GET_BYTE(dpix->pixelformat, 1), | ||
250 | GET_BYTE(dpix->pixelformat, 2), | ||
251 | GET_BYTE(dpix->pixelformat, 3), | ||
252 | wb_info.buf_width); | 252 | wb_info.buf_width); |
253 | return; | 253 | return; |
254 | } | 254 | } |
@@ -337,6 +337,7 @@ static int wbm2m_enum_fmt(struct file *file, void *priv, | |||
337 | if (f->index >= num_wb_formats) | 337 | if (f->index >= num_wb_formats) |
338 | return -EINVAL; | 338 | return -EINVAL; |
339 | 339 | ||
340 | f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | ||
340 | f->pixelformat = wb_formats[f->index].fourcc; | 341 | f->pixelformat = wb_formats[f->index].fourcc; |
341 | return 0; | 342 | return 0; |
342 | } | 343 | } |
@@ -366,12 +367,9 @@ static int wbm2m_g_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
366 | s_q_data->format.fmt.pix_mp.colorspace; | 367 | s_q_data->format.fmt.pix_mp.colorspace; |
367 | } | 368 | } |
368 | 369 | ||
369 | log_dbg(ctx->dev, "ctx %pa type %d, %dx%d, fmt: %c%c%c%c bpl_y %d", | 370 | log_dbg(ctx->dev, "ctx %pa type %d, %dx%d, fmt: %4.4s bpl_y %d", |
370 | &ctx, f->type, pix->width, pix->height, | 371 | &ctx, f->type, pix->width, pix->height, |
371 | GET_BYTE(pix->pixelformat, 0), | 372 | (char *)&pix->pixelformat, |
372 | GET_BYTE(pix->pixelformat, 1), | ||
373 | GET_BYTE(pix->pixelformat, 2), | ||
374 | GET_BYTE(pix->pixelformat, 3), | ||
375 | pix->plane_fmt[LUMA_PLANE].bytesperline); | 373 | pix->plane_fmt[LUMA_PLANE].bytesperline); |
376 | if (pix->num_planes == 2) | 374 | if (pix->num_planes == 2) |
377 | log_dbg(ctx->dev, " bpl_uv %d\n", | 375 | log_dbg(ctx->dev, " bpl_uv %d\n", |
@@ -443,12 +441,22 @@ static int wbm2m_try_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
443 | depth = fmt->depth[i]; | 441 | depth = fmt->depth[i]; |
444 | 442 | ||
445 | if (i == LUMA_PLANE) | 443 | if (i == LUMA_PLANE) |
446 | plane_fmt->bytesperline = (pix->width * depth) >> 3; | 444 | plane_fmt->bytesperline = pix->width * depth / 8; |
447 | else | 445 | else |
448 | plane_fmt->bytesperline = pix->width; | 446 | plane_fmt->bytesperline = pix->width; |
449 | 447 | ||
450 | plane_fmt->sizeimage = | 448 | plane_fmt->sizeimage = (pix->height * pix->width * |
451 | (pix->height * pix->width * depth) >> 3; | 449 | depth) / 8; |
450 | |||
451 | if (fmt->fourcc == V4L2_PIX_FMT_NV12) { | ||
452 | /* | ||
453 | * Since we are using a single plane buffer | ||
454 | * we need to adjust the reported sizeimage | ||
455 | * to include the colocated UV part. | ||
456 | */ | ||
457 | plane_fmt->sizeimage += (pix->height / 2 * | ||
458 | plane_fmt->bytesperline); | ||
459 | } | ||
452 | 460 | ||
453 | memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved)); | 461 | memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved)); |
454 | } | 462 | } |
@@ -483,12 +491,9 @@ static int __wbm2m_s_fmt(struct wbm2m_ctx *ctx, struct v4l2_format *f) | |||
483 | q_data->c_rect.width = pix->width; | 491 | q_data->c_rect.width = pix->width; |
484 | q_data->c_rect.height = pix->height; | 492 | q_data->c_rect.height = pix->height; |
485 | 493 | ||
486 | log_dbg(ctx->dev, "ctx %pa type %d, %dx%d, fmt: %c%c%c%c bpl_y %d", | 494 | log_dbg(ctx->dev, "ctx %pa type %d, %dx%d, fmt: %4.4s bpl_y %d", |
487 | &ctx, f->type, pix->width, pix->height, | 495 | &ctx, f->type, pix->width, pix->height, |
488 | GET_BYTE(pix->pixelformat, 0), | 496 | (char *)&pix->pixelformat, |
489 | GET_BYTE(pix->pixelformat, 1), | ||
490 | GET_BYTE(pix->pixelformat, 2), | ||
491 | GET_BYTE(pix->pixelformat, 3), | ||
492 | pix->plane_fmt[LUMA_PLANE].bytesperline); | 497 | pix->plane_fmt[LUMA_PLANE].bytesperline); |
493 | if (pix->num_planes == 2) | 498 | if (pix->num_planes == 2) |
494 | log_dbg(ctx->dev, " bpl_uv %d\n", | 499 | log_dbg(ctx->dev, " bpl_uv %d\n", |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 6edcb5485092..b35ebabd6a9f 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -946,7 +946,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, | |||
946 | /* calc dclk divider with current vco freq */ | 946 | /* calc dclk divider with current vco freq */ |
947 | dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, | 947 | dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, |
948 | pd_min, pd_even); | 948 | pd_min, pd_even); |
949 | if (vclk_div > pd_max) | 949 | if (dclk_div > pd_max) |
950 | break; /* vco is too big, it has to stop */ | 950 | break; /* vco is too big, it has to stop */ |
951 | 951 | ||
952 | /* calc score with current vco freq */ | 952 | /* calc score with current vco freq */ |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 9befd624a5f0..6fab07935d16 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c | |||
@@ -371,6 +371,31 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) | |||
371 | rcrtc->started = true; | 371 | rcrtc->started = true; |
372 | } | 372 | } |
373 | 373 | ||
374 | static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc) | ||
375 | { | ||
376 | struct rcar_du_device *rcdu = rcrtc->group->dev; | ||
377 | struct drm_crtc *crtc = &rcrtc->crtc; | ||
378 | u32 status; | ||
379 | /* Make sure vblank interrupts are enabled. */ | ||
380 | drm_crtc_vblank_get(crtc); | ||
381 | /* | ||
382 | * Disable planes and calculate how many vertical blanking interrupts we | ||
383 | * have to wait for. If a vertical blanking interrupt has been triggered | ||
384 | * but not processed yet, we don't know whether it occurred before or | ||
385 | * after the planes got disabled. We thus have to wait for two vblank | ||
386 | * interrupts in that case. | ||
387 | */ | ||
388 | spin_lock_irq(&rcrtc->vblank_lock); | ||
389 | rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); | ||
390 | status = rcar_du_crtc_read(rcrtc, DSSR); | ||
391 | rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1; | ||
392 | spin_unlock_irq(&rcrtc->vblank_lock); | ||
393 | if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0, | ||
394 | msecs_to_jiffies(100))) | ||
395 | dev_warn(rcdu->dev, "vertical blanking timeout\n"); | ||
396 | drm_crtc_vblank_put(crtc); | ||
397 | } | ||
398 | |||
374 | static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) | 399 | static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) |
375 | { | 400 | { |
376 | struct drm_crtc *crtc = &rcrtc->crtc; | 401 | struct drm_crtc *crtc = &rcrtc->crtc; |
@@ -379,17 +404,16 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) | |||
379 | return; | 404 | return; |
380 | 405 | ||
381 | /* Disable all planes and wait for the change to take effect. This is | 406 | /* Disable all planes and wait for the change to take effect. This is |
382 | * required as the DSnPR registers are updated on vblank, and no vblank | 407 | * required as the plane enable registers are updated on vblank, and no |
383 | * will occur once the CRTC is stopped. Disabling planes when starting | 408 | * vblank will occur once the CRTC is stopped. Disabling planes when |
384 | * the CRTC thus wouldn't be enough as it would start scanning out | 409 | * starting the CRTC thus wouldn't be enough as it would start scanning |
385 | * immediately from old frame buffers until the next vblank. | 410 | * out immediately from old frame buffers until the next vblank. |
386 | * | 411 | * |
387 | * This increases the CRTC stop delay, especially when multiple CRTCs | 412 | * This increases the CRTC stop delay, especially when multiple CRTCs |
388 | * are stopped in one operation as we now wait for one vblank per CRTC. | 413 | * are stopped in one operation as we now wait for one vblank per CRTC. |
389 | * Whether this can be improved needs to be researched. | 414 | * Whether this can be improved needs to be researched. |
390 | */ | 415 | */ |
391 | rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); | 416 | rcar_du_crtc_disable_planes(rcrtc); |
392 | drm_crtc_wait_one_vblank(crtc); | ||
393 | 417 | ||
394 | /* Disable vertical blanking interrupt reporting. We first need to wait | 418 | /* Disable vertical blanking interrupt reporting. We first need to wait |
395 | * for page flip completion before stopping the CRTC as userspace | 419 | * for page flip completion before stopping the CRTC as userspace |
@@ -528,10 +552,26 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) | |||
528 | irqreturn_t ret = IRQ_NONE; | 552 | irqreturn_t ret = IRQ_NONE; |
529 | u32 status; | 553 | u32 status; |
530 | 554 | ||
555 | spin_lock(&rcrtc->vblank_lock); | ||
556 | |||
531 | status = rcar_du_crtc_read(rcrtc, DSSR); | 557 | status = rcar_du_crtc_read(rcrtc, DSSR); |
532 | rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); | 558 | rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); |
533 | 559 | ||
534 | if (status & DSSR_FRM) { | 560 | if (status & DSSR_VBK) { |
561 | /* | ||
562 | * Wake up the vblank wait if the counter reaches 0. This must | ||
563 | * be protected by the vblank_lock to avoid races in | ||
564 | * rcar_du_crtc_disable_planes(). | ||
565 | */ | ||
566 | if (rcrtc->vblank_count) { | ||
567 | if (--rcrtc->vblank_count == 0) | ||
568 | wake_up(&rcrtc->vblank_wait); | ||
569 | } | ||
570 | } | ||
571 | |||
572 | spin_unlock(&rcrtc->vblank_lock); | ||
573 | |||
574 | if (status & DSSR_VBK) { | ||
535 | drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index); | 575 | drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index); |
536 | rcar_du_crtc_finish_page_flip(rcrtc); | 576 | rcar_du_crtc_finish_page_flip(rcrtc); |
537 | ret = IRQ_HANDLED; | 577 | ret = IRQ_HANDLED; |
@@ -585,6 +625,8 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) | |||
585 | } | 625 | } |
586 | 626 | ||
587 | init_waitqueue_head(&rcrtc->flip_wait); | 627 | init_waitqueue_head(&rcrtc->flip_wait); |
628 | init_waitqueue_head(&rcrtc->vblank_wait); | ||
629 | spin_lock_init(&rcrtc->vblank_lock); | ||
588 | 630 | ||
589 | rcrtc->group = rgrp; | 631 | rcrtc->group = rgrp; |
590 | rcrtc->mmio_offset = mmio_offsets[index]; | 632 | rcrtc->mmio_offset = mmio_offsets[index]; |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 2bbe3f5aab65..be22ce33b70a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #define __RCAR_DU_CRTC_H__ | 15 | #define __RCAR_DU_CRTC_H__ |
16 | 16 | ||
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/spinlock.h> | ||
18 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
19 | 20 | ||
20 | #include <drm/drmP.h> | 21 | #include <drm/drmP.h> |
@@ -32,6 +33,9 @@ struct rcar_du_group; | |||
32 | * @started: whether the CRTC has been started and is running | 33 | * @started: whether the CRTC has been started and is running |
33 | * @event: event to post when the pending page flip completes | 34 | * @event: event to post when the pending page flip completes |
34 | * @flip_wait: wait queue used to signal page flip completion | 35 | * @flip_wait: wait queue used to signal page flip completion |
36 | * @vblank_lock: protects vblank_wait and vblank_count | ||
37 | * @vblank_wait: wait queue used to signal vertical blanking | ||
38 | * @vblank_count: number of vertical blanking interrupts to wait for | ||
35 | * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC | 39 | * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC |
36 | * @enabled: whether the CRTC is enabled, used to control system resume | 40 | * @enabled: whether the CRTC is enabled, used to control system resume |
37 | * @group: CRTC group this CRTC belongs to | 41 | * @group: CRTC group this CRTC belongs to |
@@ -48,6 +52,10 @@ struct rcar_du_crtc { | |||
48 | struct drm_pending_vblank_event *event; | 52 | struct drm_pending_vblank_event *event; |
49 | wait_queue_head_t flip_wait; | 53 | wait_queue_head_t flip_wait; |
50 | 54 | ||
55 | spinlock_t vblank_lock; | ||
56 | wait_queue_head_t vblank_wait; | ||
57 | unsigned int vblank_count; | ||
58 | |||
51 | unsigned int outputs; | 59 | unsigned int outputs; |
52 | bool enabled; | 60 | bool enabled; |
53 | 61 | ||
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index 0d20488eaa37..a355b31d91a0 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c | |||
@@ -396,7 +396,7 @@ struct platform_driver tfp410_driver = { | |||
396 | .remove = tfp410_remove, | 396 | .remove = tfp410_remove, |
397 | .driver = { | 397 | .driver = { |
398 | .owner = THIS_MODULE, | 398 | .owner = THIS_MODULE, |
399 | .name = "tfp410", | 399 | .name = "tilcdc-tfp410", |
400 | .of_match_table = tfp410_of_match, | 400 | .of_match_table = tfp410_of_match, |
401 | }, | 401 | }, |
402 | }; | 402 | }; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 04fd0f2b6af0..fda8e85dd5a2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -2678,6 +2678,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, | |||
2678 | } | 2678 | } |
2679 | 2679 | ||
2680 | view_type = vmw_view_cmd_to_type(header->id); | 2680 | view_type = vmw_view_cmd_to_type(header->id); |
2681 | if (view_type == vmw_view_max) | ||
2682 | return -EINVAL; | ||
2681 | cmd = container_of(header, typeof(*cmd), header); | 2683 | cmd = container_of(header, typeof(*cmd), header); |
2682 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 2684 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2683 | user_surface_converter, | 2685 | user_surface_converter, |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 1a1fc8351289..6861b74e2b61 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -2053,6 +2053,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
2053 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, | 2053 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, |
2054 | { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, | 2054 | { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, |
2055 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, | 2055 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, |
2056 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, | ||
2056 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, | 2057 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, |
2057 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, | 2058 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, |
2058 | { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, | 2059 | { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, |
@@ -2307,7 +2308,6 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
2307 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) }, | 2308 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) }, |
2308 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) }, | 2309 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) }, |
2309 | { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, | 2310 | { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, |
2310 | { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) }, | ||
2311 | { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, | 2311 | { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, |
2312 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, | 2312 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, |
2313 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, | 2313 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, |
@@ -2577,6 +2577,17 @@ bool hid_ignore(struct hid_device *hdev) | |||
2577 | strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) | 2577 | strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) |
2578 | return true; | 2578 | return true; |
2579 | break; | 2579 | break; |
2580 | case USB_VENDOR_ID_ELAN: | ||
2581 | /* | ||
2582 | * Many Elan devices have a product id of 0x0401 and are handled | ||
2583 | * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev | ||
2584 | * is not (and cannot be) handled by that driver -> | ||
2585 | * Ignore all 0x0401 devs except for the ELAN0800 dev. | ||
2586 | */ | ||
2587 | if (hdev->product == 0x0401 && | ||
2588 | strncmp(hdev->name, "ELAN0800", 8) != 0) | ||
2589 | return true; | ||
2590 | break; | ||
2580 | } | 2591 | } |
2581 | 2592 | ||
2582 | if (hdev->type == HID_TYPE_USBMOUSE && | 2593 | if (hdev->type == HID_TYPE_USBMOUSE && |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 6937086060a6..b554d17c9156 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -1021,6 +1021,7 @@ | |||
1021 | 1021 | ||
1022 | #define USB_VENDOR_ID_XIN_MO 0x16c0 | 1022 | #define USB_VENDOR_ID_XIN_MO 0x16c0 |
1023 | #define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1 | 1023 | #define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1 |
1024 | #define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1 | ||
1024 | 1025 | ||
1025 | #define USB_VENDOR_ID_XIROKU 0x1477 | 1026 | #define USB_VENDOR_ID_XIROKU 0x1477 |
1026 | #define USB_DEVICE_ID_XIROKU_SPX 0x1006 | 1027 | #define USB_DEVICE_ID_XIROKU_SPX 0x1006 |
diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c index 7df5227a7e61..9ad7731d2e10 100644 --- a/drivers/hid/hid-xinmo.c +++ b/drivers/hid/hid-xinmo.c | |||
@@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field, | |||
46 | 46 | ||
47 | static const struct hid_device_id xinmo_devices[] = { | 47 | static const struct hid_device_id xinmo_devices[] = { |
48 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, | 48 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, |
49 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, | ||
49 | { } | 50 | { } |
50 | }; | 51 | }; |
51 | 52 | ||
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 8ce1f2e22912..d415a804fd26 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/clockchips.h> | 31 | #include <linux/clockchips.h> |
32 | #include <asm/hyperv.h> | 32 | #include <asm/hyperv.h> |
33 | #include <asm/mshyperv.h> | 33 | #include <asm/mshyperv.h> |
34 | #include <asm/nospec-branch.h> | ||
34 | #include "hyperv_vmbus.h" | 35 | #include "hyperv_vmbus.h" |
35 | 36 | ||
36 | /* The one and only */ | 37 | /* The one and only */ |
@@ -103,9 +104,10 @@ static u64 do_hypercall(u64 control, void *input, void *output) | |||
103 | return (u64)ULLONG_MAX; | 104 | return (u64)ULLONG_MAX; |
104 | 105 | ||
105 | __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8"); | 106 | __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8"); |
106 | __asm__ __volatile__("call *%3" : "=a" (hv_status) : | 107 | __asm__ __volatile__(CALL_NOSPEC : |
108 | "=a" (hv_status) : | ||
107 | "c" (control), "d" (input_address), | 109 | "c" (control), "d" (input_address), |
108 | "m" (hypercall_page)); | 110 | THUNK_TARGET(hypercall_page)); |
109 | 111 | ||
110 | return hv_status; | 112 | return hv_status; |
111 | 113 | ||
@@ -123,11 +125,12 @@ static u64 do_hypercall(u64 control, void *input, void *output) | |||
123 | if (!hypercall_page) | 125 | if (!hypercall_page) |
124 | return (u64)ULLONG_MAX; | 126 | return (u64)ULLONG_MAX; |
125 | 127 | ||
126 | __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi), | 128 | __asm__ __volatile__ (CALL_NOSPEC : "=d"(hv_status_hi), |
127 | "=a"(hv_status_lo) : "d" (control_hi), | 129 | "=a"(hv_status_lo) : "d" (control_hi), |
128 | "a" (control_lo), "b" (input_address_hi), | 130 | "a" (control_lo), "b" (input_address_hi), |
129 | "c" (input_address_lo), "D"(output_address_hi), | 131 | "c" (input_address_lo), "D"(output_address_hi), |
130 | "S"(output_address_lo), "m" (hypercall_page)); | 132 | "S"(output_address_lo), |
133 | THUNK_TARGET(hypercall_page)); | ||
131 | 134 | ||
132 | return hv_status_lo | ((u64)hv_status_hi << 32); | 135 | return hv_status_lo | ((u64)hv_status_hi << 32); |
133 | #endif /* !x86_64 */ | 136 | #endif /* !x86_64 */ |
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index cccef87963e0..975c43d446f8 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
@@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value) | |||
646 | else | 646 | else |
647 | err = atk_read_value_new(sensor, value); | 647 | err = atk_read_value_new(sensor, value); |
648 | 648 | ||
649 | if (err) | ||
650 | return err; | ||
651 | |||
649 | sensor->is_valid = true; | 652 | sensor->is_valid = true; |
650 | sensor->last_updated = jiffies; | 653 | sensor->last_updated = jiffies; |
651 | sensor->cached_value = *value; | 654 | sensor->cached_value = *value; |
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index ba59eaef2e07..d013acf3f83a 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/math64.h> | ||
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
24 | #include <linux/init.h> | 25 | #include <linux/init.h> |
25 | #include <linux/err.h> | 26 | #include <linux/err.h> |
@@ -476,8 +477,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data, | |||
476 | static long pmbus_reg2data_direct(struct pmbus_data *data, | 477 | static long pmbus_reg2data_direct(struct pmbus_data *data, |
477 | struct pmbus_sensor *sensor) | 478 | struct pmbus_sensor *sensor) |
478 | { | 479 | { |
479 | long val = (s16) sensor->data; | 480 | s64 b, val = (s16)sensor->data; |
480 | long m, b, R; | 481 | s32 m, R; |
481 | 482 | ||
482 | m = data->info->m[sensor->class]; | 483 | m = data->info->m[sensor->class]; |
483 | b = data->info->b[sensor->class]; | 484 | b = data->info->b[sensor->class]; |
@@ -505,11 +506,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data, | |||
505 | R--; | 506 | R--; |
506 | } | 507 | } |
507 | while (R < 0) { | 508 | while (R < 0) { |
508 | val = DIV_ROUND_CLOSEST(val, 10); | 509 | val = div_s64(val + 5LL, 10L); /* round closest */ |
509 | R++; | 510 | R++; |
510 | } | 511 | } |
511 | 512 | ||
512 | return (val - b) / m; | 513 | val = div_s64(val - b, m); |
514 | return clamp_val(val, LONG_MIN, LONG_MAX); | ||
513 | } | 515 | } |
514 | 516 | ||
515 | /* | 517 | /* |
@@ -629,7 +631,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, | |||
629 | static u16 pmbus_data2reg_direct(struct pmbus_data *data, | 631 | static u16 pmbus_data2reg_direct(struct pmbus_data *data, |
630 | struct pmbus_sensor *sensor, long val) | 632 | struct pmbus_sensor *sensor, long val) |
631 | { | 633 | { |
632 | long m, b, R; | 634 | s64 b, val64 = val; |
635 | s32 m, R; | ||
633 | 636 | ||
634 | m = data->info->m[sensor->class]; | 637 | m = data->info->m[sensor->class]; |
635 | b = data->info->b[sensor->class]; | 638 | b = data->info->b[sensor->class]; |
@@ -646,18 +649,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data, | |||
646 | R -= 3; /* Adjust R and b for data in milli-units */ | 649 | R -= 3; /* Adjust R and b for data in milli-units */ |
647 | b *= 1000; | 650 | b *= 1000; |
648 | } | 651 | } |
649 | val = val * m + b; | 652 | val64 = val64 * m + b; |
650 | 653 | ||
651 | while (R > 0) { | 654 | while (R > 0) { |
652 | val *= 10; | 655 | val64 *= 10; |
653 | R--; | 656 | R--; |
654 | } | 657 | } |
655 | while (R < 0) { | 658 | while (R < 0) { |
656 | val = DIV_ROUND_CLOSEST(val, 10); | 659 | val64 = div_s64(val64 + 5LL, 10L); /* round closest */ |
657 | R++; | 660 | R++; |
658 | } | 661 | } |
659 | 662 | ||
660 | return val; | 663 | return (u16)clamp_val(val64, S16_MIN, S16_MAX); |
661 | } | 664 | } |
662 | 665 | ||
663 | static u16 pmbus_data2reg_vid(struct pmbus_data *data, | 666 | static u16 pmbus_data2reg_vid(struct pmbus_data *data, |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index bc147582bed9..6d62b69c898e 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -579,10 +579,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, | |||
579 | ret = -EAGAIN; | 579 | ret = -EAGAIN; |
580 | goto skip_cqe; | 580 | goto skip_cqe; |
581 | } | 581 | } |
582 | if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { | 582 | if (unlikely(!CQE_STATUS(hw_cqe) && |
583 | CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) { | ||
583 | t4_set_wq_in_error(wq); | 584 | t4_set_wq_in_error(wq); |
584 | hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN)); | 585 | hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN)); |
585 | goto proc_cqe; | ||
586 | } | 586 | } |
587 | goto proc_cqe; | 587 | goto proc_cqe; |
588 | } | 588 | } |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8763fb832b01..5a2a0b5db938 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -2483,9 +2483,8 @@ err_steer_free_bitmap: | |||
2483 | kfree(ibdev->ib_uc_qpns_bitmap); | 2483 | kfree(ibdev->ib_uc_qpns_bitmap); |
2484 | 2484 | ||
2485 | err_steer_qp_release: | 2485 | err_steer_qp_release: |
2486 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) | 2486 | mlx4_qp_release_range(dev, ibdev->steer_qpn_base, |
2487 | mlx4_qp_release_range(dev, ibdev->steer_qpn_base, | 2487 | ibdev->steer_qpn_count); |
2488 | ibdev->steer_qpn_count); | ||
2489 | err_counter: | 2488 | err_counter: |
2490 | for (i = 0; i < ibdev->num_ports; ++i) | 2489 | for (i = 0; i < ibdev->num_ports; ++i) |
2491 | mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); | 2490 | mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); |
@@ -2586,11 +2585,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
2586 | ibdev->iboe.nb.notifier_call = NULL; | 2585 | ibdev->iboe.nb.notifier_call = NULL; |
2587 | } | 2586 | } |
2588 | 2587 | ||
2589 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { | 2588 | mlx4_qp_release_range(dev, ibdev->steer_qpn_base, |
2590 | mlx4_qp_release_range(dev, ibdev->steer_qpn_base, | 2589 | ibdev->steer_qpn_count); |
2591 | ibdev->steer_qpn_count); | 2590 | kfree(ibdev->ib_uc_qpns_bitmap); |
2592 | kfree(ibdev->ib_uc_qpns_bitmap); | ||
2593 | } | ||
2594 | 2591 | ||
2595 | iounmap(ibdev->uar_map); | 2592 | iounmap(ibdev->uar_map); |
2596 | for (p = 0; p < ibdev->num_ports; ++p) | 2593 | for (p = 0; p < ibdev->num_ports; ++p) |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 8a5998e6a407..88f97ea6b366 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -450,6 +450,7 @@ struct iser_fr_desc { | |||
450 | struct list_head list; | 450 | struct list_head list; |
451 | struct iser_reg_resources rsc; | 451 | struct iser_reg_resources rsc; |
452 | struct iser_pi_context *pi_ctx; | 452 | struct iser_pi_context *pi_ctx; |
453 | struct list_head all_list; | ||
453 | }; | 454 | }; |
454 | 455 | ||
455 | /** | 456 | /** |
@@ -463,6 +464,7 @@ struct iser_fr_pool { | |||
463 | struct list_head list; | 464 | struct list_head list; |
464 | spinlock_t lock; | 465 | spinlock_t lock; |
465 | int size; | 466 | int size; |
467 | struct list_head all_list; | ||
466 | }; | 468 | }; |
467 | 469 | ||
468 | /** | 470 | /** |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 42f4da620f2e..0cbc7ceb9a55 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -405,6 +405,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, | |||
405 | int i, ret; | 405 | int i, ret; |
406 | 406 | ||
407 | INIT_LIST_HEAD(&fr_pool->list); | 407 | INIT_LIST_HEAD(&fr_pool->list); |
408 | INIT_LIST_HEAD(&fr_pool->all_list); | ||
408 | spin_lock_init(&fr_pool->lock); | 409 | spin_lock_init(&fr_pool->lock); |
409 | fr_pool->size = 0; | 410 | fr_pool->size = 0; |
410 | for (i = 0; i < cmds_max; i++) { | 411 | for (i = 0; i < cmds_max; i++) { |
@@ -416,6 +417,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, | |||
416 | } | 417 | } |
417 | 418 | ||
418 | list_add_tail(&desc->list, &fr_pool->list); | 419 | list_add_tail(&desc->list, &fr_pool->list); |
420 | list_add_tail(&desc->all_list, &fr_pool->all_list); | ||
419 | fr_pool->size++; | 421 | fr_pool->size++; |
420 | } | 422 | } |
421 | 423 | ||
@@ -435,13 +437,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn) | |||
435 | struct iser_fr_desc *desc, *tmp; | 437 | struct iser_fr_desc *desc, *tmp; |
436 | int i = 0; | 438 | int i = 0; |
437 | 439 | ||
438 | if (list_empty(&fr_pool->list)) | 440 | if (list_empty(&fr_pool->all_list)) |
439 | return; | 441 | return; |
440 | 442 | ||
441 | iser_info("freeing conn %p fr pool\n", ib_conn); | 443 | iser_info("freeing conn %p fr pool\n", ib_conn); |
442 | 444 | ||
443 | list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) { | 445 | list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { |
444 | list_del(&desc->list); | 446 | list_del(&desc->all_list); |
445 | iser_free_reg_res(&desc->rsc); | 447 | iser_free_reg_res(&desc->rsc); |
446 | if (desc->pi_ctx) | 448 | if (desc->pi_ctx) |
447 | iser_free_pi_ctx(desc->pi_ctx); | 449 | iser_free_pi_ctx(desc->pi_ctx); |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index c52131233ba7..a73874508c3a 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -957,8 +957,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) | |||
957 | return -ENOMEM; | 957 | return -ENOMEM; |
958 | 958 | ||
959 | attr->qp_state = IB_QPS_INIT; | 959 | attr->qp_state = IB_QPS_INIT; |
960 | attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | | 960 | attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE; |
961 | IB_ACCESS_REMOTE_WRITE; | ||
962 | attr->port_num = ch->sport->port; | 961 | attr->port_num = ch->sport->port; |
963 | attr->pkey_index = 0; | 962 | attr->pkey_index = 0; |
964 | 963 | ||
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index 10c4e3d462f1..7233db002588 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c | |||
@@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops, | |||
178 | twl4030_vibra_suspend, twl4030_vibra_resume); | 178 | twl4030_vibra_suspend, twl4030_vibra_resume); |
179 | 179 | ||
180 | static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, | 180 | static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, |
181 | struct device_node *node) | 181 | struct device_node *parent) |
182 | { | 182 | { |
183 | struct device_node *node; | ||
184 | |||
183 | if (pdata && pdata->coexist) | 185 | if (pdata && pdata->coexist) |
184 | return true; | 186 | return true; |
185 | 187 | ||
186 | node = of_find_node_by_name(node, "codec"); | 188 | node = of_get_child_by_name(parent, "codec"); |
187 | if (node) { | 189 | if (node) { |
188 | of_node_put(node); | 190 | of_node_put(node); |
189 | return true; | 191 | return true; |
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c index ea63fad48de6..1e968ae37f60 100644 --- a/drivers/input/misc/twl6040-vibra.c +++ b/drivers/input/misc/twl6040-vibra.c | |||
@@ -262,7 +262,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev) | |||
262 | int vddvibr_uV = 0; | 262 | int vddvibr_uV = 0; |
263 | int error; | 263 | int error; |
264 | 264 | ||
265 | twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node, | 265 | twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node, |
266 | "vibra"); | 266 | "vibra"); |
267 | if (!twl6040_core_node) { | 267 | if (!twl6040_core_node) { |
268 | dev_err(&pdev->dev, "parent of node is missing?\n"); | 268 | dev_err(&pdev->dev, "parent of node is missing?\n"); |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 6f4dc0fd2ca3..51b96e9bf793 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd) | |||
1613 | case 5: | 1613 | case 5: |
1614 | etd->hw_version = 3; | 1614 | etd->hw_version = 3; |
1615 | break; | 1615 | break; |
1616 | case 6 ... 14: | 1616 | case 6 ... 15: |
1617 | etd->hw_version = 4; | 1617 | etd->hw_version = 4; |
1618 | break; | 1618 | break; |
1619 | default: | 1619 | default: |
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 7e2dc5e56632..0b49f29bf0da 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c | |||
@@ -383,6 +383,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties) | |||
383 | if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) { | 383 | if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) { |
384 | psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); | 384 | psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); |
385 | button_info = 0x33; | 385 | button_info = 0x33; |
386 | } else if (!button_info) { | ||
387 | psmouse_warn(psmouse, "got 0 in extended button data, assuming 3 buttons\n"); | ||
388 | button_info = 0x33; | ||
386 | } | 389 | } |
387 | 390 | ||
388 | psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); | 391 | psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); |
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c index 251ff2aa0633..7a0dbce4dae9 100644 --- a/drivers/input/touchscreen/88pm860x-ts.c +++ b/drivers/input/touchscreen/88pm860x-ts.c | |||
@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, | |||
126 | int data, n, ret; | 126 | int data, n, ret; |
127 | if (!np) | 127 | if (!np) |
128 | return -ENODEV; | 128 | return -ENODEV; |
129 | np = of_find_node_by_name(np, "touch"); | 129 | np = of_get_child_by_name(np, "touch"); |
130 | if (!np) { | 130 | if (!np) { |
131 | dev_err(&pdev->dev, "Can't find touch node\n"); | 131 | dev_err(&pdev->dev, "Can't find touch node\n"); |
132 | return -EINVAL; | 132 | return -EINVAL; |
@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, | |||
144 | if (data) { | 144 | if (data) { |
145 | ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data); | 145 | ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data); |
146 | if (ret < 0) | 146 | if (ret < 0) |
147 | return -EINVAL; | 147 | goto err_put_node; |
148 | } | 148 | } |
149 | /* set tsi prebias time */ | 149 | /* set tsi prebias time */ |
150 | if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) { | 150 | if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) { |
151 | ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data); | 151 | ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data); |
152 | if (ret < 0) | 152 | if (ret < 0) |
153 | return -EINVAL; | 153 | goto err_put_node; |
154 | } | 154 | } |
155 | /* set prebias & prechg time of pen detect */ | 155 | /* set prebias & prechg time of pen detect */ |
156 | data = 0; | 156 | data = 0; |
@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, | |||
161 | if (data) { | 161 | if (data) { |
162 | ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data); | 162 | ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data); |
163 | if (ret < 0) | 163 | if (ret < 0) |
164 | return -EINVAL; | 164 | goto err_put_node; |
165 | } | 165 | } |
166 | of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x); | 166 | of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x); |
167 | |||
168 | of_node_put(np); | ||
169 | |||
167 | return 0; | 170 | return 0; |
171 | |||
172 | err_put_node: | ||
173 | of_node_put(np); | ||
174 | |||
175 | return -EINVAL; | ||
168 | } | 176 | } |
169 | #else | 177 | #else |
170 | #define pm860x_touch_dt_init(x, y, z) (-1) | 178 | #define pm860x_touch_dt_init(x, y, z) (-1) |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 64f1eb8fdcbc..347aaaa5a7ea 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -1541,13 +1541,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) | |||
1541 | return -ENOMEM; | 1541 | return -ENOMEM; |
1542 | 1542 | ||
1543 | arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | 1543 | arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; |
1544 | smmu_domain->pgtbl_ops = pgtbl_ops; | ||
1545 | 1544 | ||
1546 | ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); | 1545 | ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); |
1547 | if (IS_ERR_VALUE(ret)) | 1546 | if (IS_ERR_VALUE(ret)) { |
1548 | free_io_pgtable_ops(pgtbl_ops); | 1547 | free_io_pgtable_ops(pgtbl_ops); |
1548 | return ret; | ||
1549 | } | ||
1549 | 1550 | ||
1550 | return ret; | 1551 | smmu_domain->pgtbl_ops = pgtbl_ops; |
1552 | return 0; | ||
1551 | } | 1553 | } |
1552 | 1554 | ||
1553 | static struct arm_smmu_group *arm_smmu_group_get(struct device *dev) | 1555 | static struct arm_smmu_group *arm_smmu_group_get(struct device *dev) |
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 823f6985b260..dd7e38ac29bd 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c | |||
@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data) | |||
1032 | sizeof(avmb1_carddef)))) | 1032 | sizeof(avmb1_carddef)))) |
1033 | return -EFAULT; | 1033 | return -EFAULT; |
1034 | cdef.cardtype = AVM_CARDTYPE_B1; | 1034 | cdef.cardtype = AVM_CARDTYPE_B1; |
1035 | cdef.cardnr = 0; | ||
1035 | } else { | 1036 | } else { |
1036 | if ((retval = copy_from_user(&cdef, data, | 1037 | if ((retval = copy_from_user(&cdef, data, |
1037 | sizeof(avmb1_extcarddef)))) | 1038 | sizeof(avmb1_extcarddef)))) |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5b815e64c1c9..a5a6909280fe 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -808,7 +808,10 @@ int bch_btree_cache_alloc(struct cache_set *c) | |||
808 | c->shrink.scan_objects = bch_mca_scan; | 808 | c->shrink.scan_objects = bch_mca_scan; |
809 | c->shrink.seeks = 4; | 809 | c->shrink.seeks = 4; |
810 | c->shrink.batch = c->btree_pages * 2; | 810 | c->shrink.batch = c->btree_pages * 2; |
811 | register_shrinker(&c->shrink); | 811 | |
812 | if (register_shrinker(&c->shrink)) | ||
813 | pr_warn("bcache: %s: could not register shrinker", | ||
814 | __func__); | ||
812 | 815 | ||
813 | return 0; | 816 | return 0; |
814 | } | 817 | } |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 2ec7f90e3455..969c815c90b6 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -1527,7 +1527,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, | |||
1527 | int l; | 1527 | int l; |
1528 | struct dm_buffer *b, *tmp; | 1528 | struct dm_buffer *b, *tmp; |
1529 | unsigned long freed = 0; | 1529 | unsigned long freed = 0; |
1530 | unsigned long count = nr_to_scan; | 1530 | unsigned long count = c->n_buffers[LIST_CLEAN] + |
1531 | c->n_buffers[LIST_DIRTY]; | ||
1531 | unsigned long retain_target = get_retain_buffers(c); | 1532 | unsigned long retain_target = get_retain_buffers(c); |
1532 | 1533 | ||
1533 | for (l = 0; l < LIST_SIZE; l++) { | 1534 | for (l = 0; l < LIST_SIZE; l++) { |
@@ -1564,6 +1565,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |||
1564 | { | 1565 | { |
1565 | struct dm_bufio_client *c; | 1566 | struct dm_bufio_client *c; |
1566 | unsigned long count; | 1567 | unsigned long count; |
1568 | unsigned long retain_target; | ||
1567 | 1569 | ||
1568 | c = container_of(shrink, struct dm_bufio_client, shrinker); | 1570 | c = container_of(shrink, struct dm_bufio_client, shrinker); |
1569 | if (sc->gfp_mask & __GFP_FS) | 1571 | if (sc->gfp_mask & __GFP_FS) |
@@ -1572,8 +1574,9 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |||
1572 | return 0; | 1574 | return 0; |
1573 | 1575 | ||
1574 | count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; | 1576 | count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; |
1577 | retain_target = get_retain_buffers(c); | ||
1575 | dm_bufio_unlock(c); | 1578 | dm_bufio_unlock(c); |
1576 | return count; | 1579 | return (count < retain_target) ? 0 : (count - retain_target); |
1577 | } | 1580 | } |
1578 | 1581 | ||
1579 | /* | 1582 | /* |
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 3b67afda430b..e339f4288e8f 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c | |||
@@ -81,10 +81,14 @@ | |||
81 | #define SECTOR_TO_BLOCK_SHIFT 3 | 81 | #define SECTOR_TO_BLOCK_SHIFT 3 |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * For btree insert: | ||
84 | * 3 for btree insert + | 85 | * 3 for btree insert + |
85 | * 2 for btree lookup used within space map | 86 | * 2 for btree lookup used within space map |
87 | * For btree remove: | ||
88 | * 2 for shadow spine + | ||
89 | * 4 for rebalance 3 child node | ||
86 | */ | 90 | */ |
87 | #define THIN_MAX_CONCURRENT_LOCKS 5 | 91 | #define THIN_MAX_CONCURRENT_LOCKS 6 |
88 | 92 | ||
89 | /* This should be plenty */ | 93 | /* This should be plenty */ |
90 | #define SPACE_MAP_ROOT_SIZE 128 | 94 | #define SPACE_MAP_ROOT_SIZE 128 |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9ec6948e3b8b..3d9a80759d95 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -974,7 +974,8 @@ static void dec_pending(struct dm_io *io, int error) | |||
974 | } else { | 974 | } else { |
975 | /* done with normal IO or empty flush */ | 975 | /* done with normal IO or empty flush */ |
976 | trace_block_bio_complete(md->queue, bio, io_error); | 976 | trace_block_bio_complete(md->queue, bio, io_error); |
977 | bio->bi_error = io_error; | 977 | if (io_error) |
978 | bio->bi_error = io_error; | ||
978 | bio_endio(bio); | 979 | bio_endio(bio); |
979 | } | 980 | } |
980 | } | 981 | } |
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index a1a68209bd36..880b7dee9c52 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c | |||
@@ -671,23 +671,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) | |||
671 | pn->keys[1] = rn->keys[0]; | 671 | pn->keys[1] = rn->keys[0]; |
672 | memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); | 672 | memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); |
673 | 673 | ||
674 | /* | 674 | unlock_block(s->info, left); |
675 | * rejig the spine. This is ugly, since it knows too | 675 | unlock_block(s->info, right); |
676 | * much about the spine | ||
677 | */ | ||
678 | if (s->nodes[0] != new_parent) { | ||
679 | unlock_block(s->info, s->nodes[0]); | ||
680 | s->nodes[0] = new_parent; | ||
681 | } | ||
682 | if (key < le64_to_cpu(rn->keys[0])) { | ||
683 | unlock_block(s->info, right); | ||
684 | s->nodes[1] = left; | ||
685 | } else { | ||
686 | unlock_block(s->info, left); | ||
687 | s->nodes[1] = right; | ||
688 | } | ||
689 | s->count = 2; | ||
690 | |||
691 | return 0; | 676 | return 0; |
692 | } | 677 | } |
693 | 678 | ||
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c index 7979e5d6498b..7ca359391535 100644 --- a/drivers/media/dvb-frontends/ts2020.c +++ b/drivers/media/dvb-frontends/ts2020.c | |||
@@ -369,7 +369,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc, | |||
369 | gain2 = clamp_t(long, gain2, 0, 13); | 369 | gain2 = clamp_t(long, gain2, 0, 13); |
370 | v_agc = clamp_t(long, v_agc, 400, 1100); | 370 | v_agc = clamp_t(long, v_agc, 400, 1100); |
371 | 371 | ||
372 | *_gain = -(gain1 * 2330 + | 372 | *_gain = -((__s64)gain1 * 2330 + |
373 | gain2 * 3500 + | 373 | gain2 * 3500 + |
374 | v_agc * 24 / 10 * 10 + | 374 | v_agc * 24 / 10 * 10 + |
375 | 10000); | 375 | 10000); |
@@ -387,7 +387,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc, | |||
387 | gain3 = clamp_t(long, gain3, 0, 6); | 387 | gain3 = clamp_t(long, gain3, 0, 6); |
388 | v_agc = clamp_t(long, v_agc, 600, 1600); | 388 | v_agc = clamp_t(long, v_agc, 600, 1600); |
389 | 389 | ||
390 | *_gain = -(gain1 * 2650 + | 390 | *_gain = -((__s64)gain1 * 2650 + |
391 | gain2 * 3380 + | 391 | gain2 * 3380 + |
392 | gain3 * 2850 + | 392 | gain3 * 2850 + |
393 | v_agc * 176 / 100 * 10 - | 393 | v_agc * 176 / 100 * 10 - |
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c index bda29bc1b933..2f74a5ac0147 100644 --- a/drivers/media/platform/soc_camera/soc_scale_crop.c +++ b/drivers/media/platform/soc_camera/soc_scale_crop.c | |||
@@ -405,3 +405,7 @@ void soc_camera_calc_client_output(struct soc_camera_device *icd, | |||
405 | mf->height = soc_camera_shift_scale(rect->height, shift, scale_v); | 405 | mf->height = soc_camera_shift_scale(rect->height, shift, scale_v); |
406 | } | 406 | } |
407 | EXPORT_SYMBOL(soc_camera_calc_client_output); | 407 | EXPORT_SYMBOL(soc_camera_calc_client_output); |
408 | |||
409 | MODULE_DESCRIPTION("soc-camera scaling-cropping functions"); | ||
410 | MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>"); | ||
411 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c index a7a8452e99d2..c1ce8d3ce877 100644 --- a/drivers/media/tuners/r820t.c +++ b/drivers/media/tuners/r820t.c | |||
@@ -410,9 +410,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val, | |||
410 | return 0; | 410 | return 0; |
411 | } | 411 | } |
412 | 412 | ||
413 | static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) | 413 | static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) |
414 | { | 414 | { |
415 | return r820t_write(priv, reg, &val, 1); | 415 | u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */ |
416 | |||
417 | return r820t_write(priv, reg, &tmp, 1); | ||
416 | } | 418 | } |
417 | 419 | ||
418 | static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) | 420 | static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) |
@@ -425,17 +427,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) | |||
425 | return -EINVAL; | 427 | return -EINVAL; |
426 | } | 428 | } |
427 | 429 | ||
428 | static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, | 430 | static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, |
429 | u8 bit_mask) | 431 | u8 bit_mask) |
430 | { | 432 | { |
433 | u8 tmp = val; | ||
431 | int rc = r820t_read_cache_reg(priv, reg); | 434 | int rc = r820t_read_cache_reg(priv, reg); |
432 | 435 | ||
433 | if (rc < 0) | 436 | if (rc < 0) |
434 | return rc; | 437 | return rc; |
435 | 438 | ||
436 | val = (rc & ~bit_mask) | (val & bit_mask); | 439 | tmp = (rc & ~bit_mask) | (tmp & bit_mask); |
437 | 440 | ||
438 | return r820t_write(priv, reg, &val, 1); | 441 | return r820t_write(priv, reg, &tmp, 1); |
439 | } | 442 | } |
440 | 443 | ||
441 | static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len) | 444 | static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len) |
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 3721ee63b8fb..09c97847bf95 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c | |||
@@ -503,18 +503,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, | |||
503 | 503 | ||
504 | static int lme2510_return_status(struct dvb_usb_device *d) | 504 | static int lme2510_return_status(struct dvb_usb_device *d) |
505 | { | 505 | { |
506 | int ret = 0; | 506 | int ret; |
507 | u8 *data; | 507 | u8 *data; |
508 | 508 | ||
509 | data = kzalloc(10, GFP_KERNEL); | 509 | data = kzalloc(6, GFP_KERNEL); |
510 | if (!data) | 510 | if (!data) |
511 | return -ENOMEM; | 511 | return -ENOMEM; |
512 | 512 | ||
513 | ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), | 513 | ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), |
514 | 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200); | 514 | 0x06, 0x80, 0x0302, 0x00, |
515 | info("Firmware Status: %x (%x)", ret , data[2]); | 515 | data, 0x6, 200); |
516 | if (ret != 6) | ||
517 | ret = -EINVAL; | ||
518 | else | ||
519 | ret = data[2]; | ||
520 | |||
521 | info("Firmware Status: %6ph", data); | ||
516 | 522 | ||
517 | ret = (ret < 0) ? -ENODEV : data[2]; | ||
518 | kfree(data); | 523 | kfree(data); |
519 | return ret; | 524 | return ret; |
520 | } | 525 | } |
@@ -1078,8 +1083,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap) | |||
1078 | 1083 | ||
1079 | if (adap->fe[0]) { | 1084 | if (adap->fe[0]) { |
1080 | info("FE Found M88RS2000"); | 1085 | info("FE Found M88RS2000"); |
1081 | dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config, | ||
1082 | &d->i2c_adap); | ||
1083 | st->i2c_tuner_gate_w = 5; | 1086 | st->i2c_tuner_gate_w = 5; |
1084 | st->i2c_tuner_gate_r = 5; | 1087 | st->i2c_tuner_gate_r = 5; |
1085 | st->i2c_tuner_addr = 0x60; | 1088 | st->i2c_tuner_addr = 0x60; |
@@ -1145,17 +1148,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap) | |||
1145 | ret = st->tuner_config; | 1148 | ret = st->tuner_config; |
1146 | break; | 1149 | break; |
1147 | case TUNER_RS2000: | 1150 | case TUNER_RS2000: |
1148 | ret = st->tuner_config; | 1151 | if (dvb_attach(ts2020_attach, adap->fe[0], |
1152 | &ts2020_config, &d->i2c_adap)) | ||
1153 | ret = st->tuner_config; | ||
1149 | break; | 1154 | break; |
1150 | default: | 1155 | default: |
1151 | break; | 1156 | break; |
1152 | } | 1157 | } |
1153 | 1158 | ||
1154 | if (ret) | 1159 | if (ret) { |
1155 | info("TUN Found %s tuner", tun_msg[ret]); | 1160 | info("TUN Found %s tuner", tun_msg[ret]); |
1156 | else { | 1161 | } else { |
1157 | info("TUN No tuner found --- resetting device"); | 1162 | info("TUN No tuner found"); |
1158 | lme_coldreset(d); | ||
1159 | return -ENODEV; | 1163 | return -ENODEV; |
1160 | } | 1164 | } |
1161 | 1165 | ||
@@ -1199,6 +1203,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d) | |||
1199 | static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) | 1203 | static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) |
1200 | { | 1204 | { |
1201 | struct lme2510_state *st = d->priv; | 1205 | struct lme2510_state *st = d->priv; |
1206 | int status; | ||
1202 | 1207 | ||
1203 | usb_reset_configuration(d->udev); | 1208 | usb_reset_configuration(d->udev); |
1204 | 1209 | ||
@@ -1207,12 +1212,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) | |||
1207 | 1212 | ||
1208 | st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware; | 1213 | st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware; |
1209 | 1214 | ||
1210 | if (lme2510_return_status(d) == 0x44) { | 1215 | status = lme2510_return_status(d); |
1216 | if (status == 0x44) { | ||
1211 | *name = lme_firmware_switch(d, 0); | 1217 | *name = lme_firmware_switch(d, 0); |
1212 | return COLD; | 1218 | return COLD; |
1213 | } | 1219 | } |
1214 | 1220 | ||
1215 | return 0; | 1221 | if (status != 0x47) |
1222 | return -EINVAL; | ||
1223 | |||
1224 | return WARM; | ||
1216 | } | 1225 | } |
1217 | 1226 | ||
1218 | static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type, | 1227 | static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type, |
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index ab7151181728..d00b27ed73a6 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c | |||
@@ -818,6 +818,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component, | |||
818 | case XC2028_RESET_CLK: | 818 | case XC2028_RESET_CLK: |
819 | deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg); | 819 | deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg); |
820 | break; | 820 | break; |
821 | case XC2028_I2C_FLUSH: | ||
822 | break; | ||
821 | default: | 823 | default: |
822 | deb_info("%s: unknown command %d, arg %d\n", __func__, | 824 | deb_info("%s: unknown command %d, arg %d\n", __func__, |
823 | command, arg); | 825 | command, arg); |
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 7df0707a0455..38c03283a441 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c | |||
@@ -431,6 +431,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component, | |||
431 | state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); | 431 | state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); |
432 | break; | 432 | break; |
433 | case XC2028_RESET_CLK: | 433 | case XC2028_RESET_CLK: |
434 | case XC2028_I2C_FLUSH: | ||
434 | break; | 435 | break; |
435 | default: | 436 | default: |
436 | err("%s: unknown command %d, arg %d\n", __func__, | 437 | err("%s: unknown command %d, arg %d\n", __func__, |
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index 29428bef272c..3bbc77aa6a33 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c | |||
@@ -127,6 +127,7 @@ static void usbtv_disconnect(struct usb_interface *intf) | |||
127 | 127 | ||
128 | static struct usb_device_id usbtv_id_table[] = { | 128 | static struct usb_device_id usbtv_id_table[] = { |
129 | { USB_DEVICE(0x1b71, 0x3002) }, | 129 | { USB_DEVICE(0x1b71, 0x3002) }, |
130 | { USB_DEVICE(0x1f71, 0x3301) }, | ||
130 | {} | 131 | {} |
131 | }; | 132 | }; |
132 | MODULE_DEVICE_TABLE(usb, usbtv_id_table); | 133 | MODULE_DEVICE_TABLE(usb, usbtv_id_table); |
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index 91d709efef7a..cafc34938a79 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c | |||
@@ -1461,6 +1461,13 @@ static int usbvision_probe(struct usb_interface *intf, | |||
1461 | printk(KERN_INFO "%s: %s found\n", __func__, | 1461 | printk(KERN_INFO "%s: %s found\n", __func__, |
1462 | usbvision_device_data[model].model_string); | 1462 | usbvision_device_data[model].model_string); |
1463 | 1463 | ||
1464 | /* | ||
1465 | * this is a security check. | ||
1466 | * an exploit using an incorrect bInterfaceNumber is known | ||
1467 | */ | ||
1468 | if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum]) | ||
1469 | return -ENODEV; | ||
1470 | |||
1464 | if (usbvision_device_data[model].interface >= 0) | 1471 | if (usbvision_device_data[model].interface >= 0) |
1465 | interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; | 1472 | interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; |
1466 | else if (ifnum < dev->actconfig->desc.bNumInterfaces) | 1473 | else if (ifnum < dev->actconfig->desc.bNumInterfaces) |
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 4379b949bb93..943f90e392a7 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c | |||
@@ -18,8 +18,18 @@ | |||
18 | #include <linux/videodev2.h> | 18 | #include <linux/videodev2.h> |
19 | #include <linux/v4l2-subdev.h> | 19 | #include <linux/v4l2-subdev.h> |
20 | #include <media/v4l2-dev.h> | 20 | #include <media/v4l2-dev.h> |
21 | #include <media/v4l2-fh.h> | ||
22 | #include <media/v4l2-ctrls.h> | ||
21 | #include <media/v4l2-ioctl.h> | 23 | #include <media/v4l2-ioctl.h> |
22 | 24 | ||
25 | /* Use the same argument order as copy_in_user */ | ||
26 | #define assign_in_user(to, from) \ | ||
27 | ({ \ | ||
28 | typeof(*from) __assign_tmp; \ | ||
29 | \ | ||
30 | get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \ | ||
31 | }) | ||
32 | |||
23 | static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 33 | static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
24 | { | 34 | { |
25 | long ret = -ENOIOCTLCMD; | 35 | long ret = -ENOIOCTLCMD; |
@@ -33,131 +43,88 @@ static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
33 | 43 | ||
34 | struct v4l2_clip32 { | 44 | struct v4l2_clip32 { |
35 | struct v4l2_rect c; | 45 | struct v4l2_rect c; |
36 | compat_caddr_t next; | 46 | compat_caddr_t next; |
37 | }; | 47 | }; |
38 | 48 | ||
39 | struct v4l2_window32 { | 49 | struct v4l2_window32 { |
40 | struct v4l2_rect w; | 50 | struct v4l2_rect w; |
41 | __u32 field; /* enum v4l2_field */ | 51 | __u32 field; /* enum v4l2_field */ |
42 | __u32 chromakey; | 52 | __u32 chromakey; |
43 | compat_caddr_t clips; /* actually struct v4l2_clip32 * */ | 53 | compat_caddr_t clips; /* actually struct v4l2_clip32 * */ |
44 | __u32 clipcount; | 54 | __u32 clipcount; |
45 | compat_caddr_t bitmap; | 55 | compat_caddr_t bitmap; |
56 | __u8 global_alpha; | ||
46 | }; | 57 | }; |
47 | 58 | ||
48 | static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) | 59 | static int get_v4l2_window32(struct v4l2_window __user *kp, |
49 | { | 60 | struct v4l2_window32 __user *up, |
50 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) || | 61 | void __user *aux_buf, u32 aux_space) |
51 | copy_from_user(&kp->w, &up->w, sizeof(up->w)) || | ||
52 | get_user(kp->field, &up->field) || | ||
53 | get_user(kp->chromakey, &up->chromakey) || | ||
54 | get_user(kp->clipcount, &up->clipcount)) | ||
55 | return -EFAULT; | ||
56 | if (kp->clipcount > 2048) | ||
57 | return -EINVAL; | ||
58 | if (kp->clipcount) { | ||
59 | struct v4l2_clip32 __user *uclips; | ||
60 | struct v4l2_clip __user *kclips; | ||
61 | int n = kp->clipcount; | ||
62 | compat_caddr_t p; | ||
63 | |||
64 | if (get_user(p, &up->clips)) | ||
65 | return -EFAULT; | ||
66 | uclips = compat_ptr(p); | ||
67 | kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip)); | ||
68 | kp->clips = kclips; | ||
69 | while (--n >= 0) { | ||
70 | if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) | ||
71 | return -EFAULT; | ||
72 | if (put_user(n ? kclips + 1 : NULL, &kclips->next)) | ||
73 | return -EFAULT; | ||
74 | uclips += 1; | ||
75 | kclips += 1; | ||
76 | } | ||
77 | } else | ||
78 | kp->clips = NULL; | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) | ||
83 | { | ||
84 | if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) || | ||
85 | put_user(kp->field, &up->field) || | ||
86 | put_user(kp->chromakey, &up->chromakey) || | ||
87 | put_user(kp->clipcount, &up->clipcount)) | ||
88 | return -EFAULT; | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) | ||
93 | { | ||
94 | if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format))) | ||
95 | return -EFAULT; | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, | ||
100 | struct v4l2_pix_format_mplane __user *up) | ||
101 | { | ||
102 | if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane))) | ||
103 | return -EFAULT; | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) | ||
108 | { | ||
109 | if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format))) | ||
110 | return -EFAULT; | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, | ||
115 | struct v4l2_pix_format_mplane __user *up) | ||
116 | { | 62 | { |
117 | if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane))) | 63 | struct v4l2_clip32 __user *uclips; |
64 | struct v4l2_clip __user *kclips; | ||
65 | compat_caddr_t p; | ||
66 | u32 clipcount; | ||
67 | |||
68 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | ||
69 | copy_in_user(&kp->w, &up->w, sizeof(up->w)) || | ||
70 | assign_in_user(&kp->field, &up->field) || | ||
71 | assign_in_user(&kp->chromakey, &up->chromakey) || | ||
72 | assign_in_user(&kp->global_alpha, &up->global_alpha) || | ||
73 | get_user(clipcount, &up->clipcount) || | ||
74 | put_user(clipcount, &kp->clipcount)) | ||
118 | return -EFAULT; | 75 | return -EFAULT; |
119 | return 0; | 76 | if (clipcount > 2048) |
120 | } | 77 | return -EINVAL; |
78 | if (!clipcount) | ||
79 | return put_user(NULL, &kp->clips); | ||
121 | 80 | ||
122 | static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up) | 81 | if (get_user(p, &up->clips)) |
123 | { | ||
124 | if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format))) | ||
125 | return -EFAULT; | 82 | return -EFAULT; |
126 | return 0; | 83 | uclips = compat_ptr(p); |
127 | } | 84 | if (aux_space < clipcount * sizeof(*kclips)) |
128 | |||
129 | static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up) | ||
130 | { | ||
131 | if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format))) | ||
132 | return -EFAULT; | 85 | return -EFAULT; |
133 | return 0; | 86 | kclips = aux_buf; |
134 | } | 87 | if (put_user(kclips, &kp->clips)) |
135 | |||
136 | static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) | ||
137 | { | ||
138 | if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format))) | ||
139 | return -EFAULT; | 88 | return -EFAULT; |
140 | return 0; | ||
141 | } | ||
142 | 89 | ||
143 | static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) | 90 | while (clipcount--) { |
144 | { | 91 | if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) |
145 | if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format))) | 92 | return -EFAULT; |
146 | return -EFAULT; | 93 | if (put_user(clipcount ? kclips + 1 : NULL, &kclips->next)) |
94 | return -EFAULT; | ||
95 | uclips++; | ||
96 | kclips++; | ||
97 | } | ||
147 | return 0; | 98 | return 0; |
148 | } | 99 | } |
149 | 100 | ||
150 | static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) | 101 | static int put_v4l2_window32(struct v4l2_window __user *kp, |
102 | struct v4l2_window32 __user *up) | ||
151 | { | 103 | { |
152 | if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format))) | 104 | struct v4l2_clip __user *kclips = kp->clips; |
105 | struct v4l2_clip32 __user *uclips; | ||
106 | compat_caddr_t p; | ||
107 | u32 clipcount; | ||
108 | |||
109 | if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) || | ||
110 | assign_in_user(&up->field, &kp->field) || | ||
111 | assign_in_user(&up->chromakey, &kp->chromakey) || | ||
112 | assign_in_user(&up->global_alpha, &kp->global_alpha) || | ||
113 | get_user(clipcount, &kp->clipcount) || | ||
114 | put_user(clipcount, &up->clipcount)) | ||
153 | return -EFAULT; | 115 | return -EFAULT; |
154 | return 0; | 116 | if (!clipcount) |
155 | } | 117 | return 0; |
156 | 118 | ||
157 | static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) | 119 | if (get_user(p, &up->clips)) |
158 | { | ||
159 | if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format))) | ||
160 | return -EFAULT; | 120 | return -EFAULT; |
121 | uclips = compat_ptr(p); | ||
122 | while (clipcount--) { | ||
123 | if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c))) | ||
124 | return -EFAULT; | ||
125 | uclips++; | ||
126 | kclips++; | ||
127 | } | ||
161 | return 0; | 128 | return 0; |
162 | } | 129 | } |
163 | 130 | ||
@@ -191,97 +158,158 @@ struct v4l2_create_buffers32 { | |||
191 | __u32 reserved[8]; | 158 | __u32 reserved[8]; |
192 | }; | 159 | }; |
193 | 160 | ||
194 | static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) | 161 | static int __bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size) |
162 | { | ||
163 | u32 type; | ||
164 | |||
165 | if (get_user(type, &up->type)) | ||
166 | return -EFAULT; | ||
167 | |||
168 | switch (type) { | ||
169 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: | ||
170 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: { | ||
171 | u32 clipcount; | ||
172 | |||
173 | if (get_user(clipcount, &up->fmt.win.clipcount)) | ||
174 | return -EFAULT; | ||
175 | if (clipcount > 2048) | ||
176 | return -EINVAL; | ||
177 | *size = clipcount * sizeof(struct v4l2_clip); | ||
178 | return 0; | ||
179 | } | ||
180 | default: | ||
181 | *size = 0; | ||
182 | return 0; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | static int bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size) | ||
195 | { | 187 | { |
196 | if (get_user(kp->type, &up->type)) | 188 | if (!access_ok(VERIFY_READ, up, sizeof(*up))) |
197 | return -EFAULT; | 189 | return -EFAULT; |
190 | return __bufsize_v4l2_format(up, size); | ||
191 | } | ||
198 | 192 | ||
199 | switch (kp->type) { | 193 | static int __get_v4l2_format32(struct v4l2_format __user *kp, |
194 | struct v4l2_format32 __user *up, | ||
195 | void __user *aux_buf, u32 aux_space) | ||
196 | { | ||
197 | u32 type; | ||
198 | |||
199 | if (get_user(type, &up->type) || put_user(type, &kp->type)) | ||
200 | return -EFAULT; | ||
201 | |||
202 | switch (type) { | ||
200 | case V4L2_BUF_TYPE_VIDEO_CAPTURE: | 203 | case V4L2_BUF_TYPE_VIDEO_CAPTURE: |
201 | case V4L2_BUF_TYPE_VIDEO_OUTPUT: | 204 | case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
202 | return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); | 205 | return copy_in_user(&kp->fmt.pix, &up->fmt.pix, |
206 | sizeof(kp->fmt.pix)) ? -EFAULT : 0; | ||
203 | case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: | 207 | case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: |
204 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: | 208 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: |
205 | return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp, | 209 | return copy_in_user(&kp->fmt.pix_mp, &up->fmt.pix_mp, |
206 | &up->fmt.pix_mp); | 210 | sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; |
207 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: | 211 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
208 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: | 212 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
209 | return get_v4l2_window32(&kp->fmt.win, &up->fmt.win); | 213 | return get_v4l2_window32(&kp->fmt.win, &up->fmt.win, |
214 | aux_buf, aux_space); | ||
210 | case V4L2_BUF_TYPE_VBI_CAPTURE: | 215 | case V4L2_BUF_TYPE_VBI_CAPTURE: |
211 | case V4L2_BUF_TYPE_VBI_OUTPUT: | 216 | case V4L2_BUF_TYPE_VBI_OUTPUT: |
212 | return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); | 217 | return copy_in_user(&kp->fmt.vbi, &up->fmt.vbi, |
218 | sizeof(kp->fmt.vbi)) ? -EFAULT : 0; | ||
213 | case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: | 219 | case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: |
214 | case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: | 220 | case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: |
215 | return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); | 221 | return copy_in_user(&kp->fmt.sliced, &up->fmt.sliced, |
222 | sizeof(kp->fmt.sliced)) ? -EFAULT : 0; | ||
216 | case V4L2_BUF_TYPE_SDR_CAPTURE: | 223 | case V4L2_BUF_TYPE_SDR_CAPTURE: |
217 | case V4L2_BUF_TYPE_SDR_OUTPUT: | 224 | case V4L2_BUF_TYPE_SDR_OUTPUT: |
218 | return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); | 225 | return copy_in_user(&kp->fmt.sdr, &up->fmt.sdr, |
226 | sizeof(kp->fmt.sdr)) ? -EFAULT : 0; | ||
219 | default: | 227 | default: |
220 | pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", | ||
221 | kp->type); | ||
222 | return -EINVAL; | 228 | return -EINVAL; |
223 | } | 229 | } |
224 | } | 230 | } |
225 | 231 | ||
226 | static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) | 232 | static int get_v4l2_format32(struct v4l2_format __user *kp, |
233 | struct v4l2_format32 __user *up, | ||
234 | void __user *aux_buf, u32 aux_space) | ||
227 | { | 235 | { |
228 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) | 236 | if (!access_ok(VERIFY_READ, up, sizeof(*up))) |
229 | return -EFAULT; | 237 | return -EFAULT; |
230 | return __get_v4l2_format32(kp, up); | 238 | return __get_v4l2_format32(kp, up, aux_buf, aux_space); |
231 | } | 239 | } |
232 | 240 | ||
233 | static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) | 241 | static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *up, |
242 | u32 *size) | ||
234 | { | 243 | { |
235 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || | 244 | if (!access_ok(VERIFY_READ, up, sizeof(*up))) |
236 | copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) | ||
237 | return -EFAULT; | 245 | return -EFAULT; |
238 | return __get_v4l2_format32(&kp->format, &up->format); | 246 | return __bufsize_v4l2_format(&up->format, size); |
239 | } | 247 | } |
240 | 248 | ||
241 | static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) | 249 | static int get_v4l2_create32(struct v4l2_create_buffers __user *kp, |
250 | struct v4l2_create_buffers32 __user *up, | ||
251 | void __user *aux_buf, u32 aux_space) | ||
242 | { | 252 | { |
243 | if (put_user(kp->type, &up->type)) | 253 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
254 | copy_in_user(kp, up, | ||
255 | offsetof(struct v4l2_create_buffers32, format))) | ||
244 | return -EFAULT; | 256 | return -EFAULT; |
257 | return __get_v4l2_format32(&kp->format, &up->format, | ||
258 | aux_buf, aux_space); | ||
259 | } | ||
260 | |||
261 | static int __put_v4l2_format32(struct v4l2_format __user *kp, | ||
262 | struct v4l2_format32 __user *up) | ||
263 | { | ||
264 | u32 type; | ||
245 | 265 | ||
246 | switch (kp->type) { | 266 | if (get_user(type, &kp->type)) |
267 | return -EFAULT; | ||
268 | |||
269 | switch (type) { | ||
247 | case V4L2_BUF_TYPE_VIDEO_CAPTURE: | 270 | case V4L2_BUF_TYPE_VIDEO_CAPTURE: |
248 | case V4L2_BUF_TYPE_VIDEO_OUTPUT: | 271 | case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
249 | return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); | 272 | return copy_in_user(&up->fmt.pix, &kp->fmt.pix, |
273 | sizeof(kp->fmt.pix)) ? -EFAULT : 0; | ||
250 | case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: | 274 | case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: |
251 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: | 275 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: |
252 | return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp, | 276 | return copy_in_user(&up->fmt.pix_mp, &kp->fmt.pix_mp, |
253 | &up->fmt.pix_mp); | 277 | sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; |
254 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: | 278 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
255 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: | 279 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
256 | return put_v4l2_window32(&kp->fmt.win, &up->fmt.win); | 280 | return put_v4l2_window32(&kp->fmt.win, &up->fmt.win); |
257 | case V4L2_BUF_TYPE_VBI_CAPTURE: | 281 | case V4L2_BUF_TYPE_VBI_CAPTURE: |
258 | case V4L2_BUF_TYPE_VBI_OUTPUT: | 282 | case V4L2_BUF_TYPE_VBI_OUTPUT: |
259 | return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); | 283 | return copy_in_user(&up->fmt.vbi, &kp->fmt.vbi, |
284 | sizeof(kp->fmt.vbi)) ? -EFAULT : 0; | ||
260 | case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: | 285 | case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: |
261 | case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: | 286 | case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: |
262 | return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); | 287 | return copy_in_user(&up->fmt.sliced, &kp->fmt.sliced, |
288 | sizeof(kp->fmt.sliced)) ? -EFAULT : 0; | ||
263 | case V4L2_BUF_TYPE_SDR_CAPTURE: | 289 | case V4L2_BUF_TYPE_SDR_CAPTURE: |
264 | case V4L2_BUF_TYPE_SDR_OUTPUT: | 290 | case V4L2_BUF_TYPE_SDR_OUTPUT: |
265 | return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); | 291 | return copy_in_user(&up->fmt.sdr, &kp->fmt.sdr, |
292 | sizeof(kp->fmt.sdr)) ? -EFAULT : 0; | ||
266 | default: | 293 | default: |
267 | pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", | ||
268 | kp->type); | ||
269 | return -EINVAL; | 294 | return -EINVAL; |
270 | } | 295 | } |
271 | } | 296 | } |
272 | 297 | ||
273 | static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) | 298 | static int put_v4l2_format32(struct v4l2_format __user *kp, |
299 | struct v4l2_format32 __user *up) | ||
274 | { | 300 | { |
275 | if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32))) | 301 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) |
276 | return -EFAULT; | 302 | return -EFAULT; |
277 | return __put_v4l2_format32(kp, up); | 303 | return __put_v4l2_format32(kp, up); |
278 | } | 304 | } |
279 | 305 | ||
280 | static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) | 306 | static int put_v4l2_create32(struct v4l2_create_buffers __user *kp, |
307 | struct v4l2_create_buffers32 __user *up) | ||
281 | { | 308 | { |
282 | if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) || | 309 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
283 | copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) || | 310 | copy_in_user(up, kp, |
284 | copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) | 311 | offsetof(struct v4l2_create_buffers32, format)) || |
312 | copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved))) | ||
285 | return -EFAULT; | 313 | return -EFAULT; |
286 | return __put_v4l2_format32(&kp->format, &up->format); | 314 | return __put_v4l2_format32(&kp->format, &up->format); |
287 | } | 315 | } |
@@ -295,25 +323,28 @@ struct v4l2_standard32 { | |||
295 | __u32 reserved[4]; | 323 | __u32 reserved[4]; |
296 | }; | 324 | }; |
297 | 325 | ||
298 | static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) | 326 | static int get_v4l2_standard32(struct v4l2_standard __user *kp, |
327 | struct v4l2_standard32 __user *up) | ||
299 | { | 328 | { |
300 | /* other fields are not set by the user, nor used by the driver */ | 329 | /* other fields are not set by the user, nor used by the driver */ |
301 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) || | 330 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
302 | get_user(kp->index, &up->index)) | 331 | assign_in_user(&kp->index, &up->index)) |
303 | return -EFAULT; | 332 | return -EFAULT; |
304 | return 0; | 333 | return 0; |
305 | } | 334 | } |
306 | 335 | ||
307 | static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) | 336 | static int put_v4l2_standard32(struct v4l2_standard __user *kp, |
337 | struct v4l2_standard32 __user *up) | ||
308 | { | 338 | { |
309 | if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) || | 339 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
310 | put_user(kp->index, &up->index) || | 340 | assign_in_user(&up->index, &kp->index) || |
311 | put_user(kp->id, &up->id) || | 341 | assign_in_user(&up->id, &kp->id) || |
312 | copy_to_user(up->name, kp->name, 24) || | 342 | copy_in_user(up->name, kp->name, sizeof(up->name)) || |
313 | copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) || | 343 | copy_in_user(&up->frameperiod, &kp->frameperiod, |
314 | put_user(kp->framelines, &up->framelines) || | 344 | sizeof(up->frameperiod)) || |
315 | copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32))) | 345 | assign_in_user(&up->framelines, &kp->framelines) || |
316 | return -EFAULT; | 346 | copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) |
347 | return -EFAULT; | ||
317 | return 0; | 348 | return 0; |
318 | } | 349 | } |
319 | 350 | ||
@@ -352,134 +383,186 @@ struct v4l2_buffer32 { | |||
352 | __u32 reserved; | 383 | __u32 reserved; |
353 | }; | 384 | }; |
354 | 385 | ||
355 | static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, | 386 | static int get_v4l2_plane32(struct v4l2_plane __user *up, |
356 | enum v4l2_memory memory) | 387 | struct v4l2_plane32 __user *up32, |
388 | enum v4l2_memory memory) | ||
357 | { | 389 | { |
358 | void __user *up_pln; | 390 | compat_ulong_t p; |
359 | compat_long_t p; | ||
360 | 391 | ||
361 | if (copy_in_user(up, up32, 2 * sizeof(__u32)) || | 392 | if (copy_in_user(up, up32, 2 * sizeof(__u32)) || |
362 | copy_in_user(&up->data_offset, &up32->data_offset, | 393 | copy_in_user(&up->data_offset, &up32->data_offset, |
363 | sizeof(__u32))) | 394 | sizeof(up->data_offset))) |
364 | return -EFAULT; | 395 | return -EFAULT; |
365 | 396 | ||
366 | if (memory == V4L2_MEMORY_USERPTR) { | 397 | switch (memory) { |
367 | if (get_user(p, &up32->m.userptr)) | 398 | case V4L2_MEMORY_MMAP: |
368 | return -EFAULT; | 399 | case V4L2_MEMORY_OVERLAY: |
369 | up_pln = compat_ptr(p); | 400 | if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, |
370 | if (put_user((unsigned long)up_pln, &up->m.userptr)) | 401 | sizeof(up32->m.mem_offset))) |
371 | return -EFAULT; | 402 | return -EFAULT; |
372 | } else if (memory == V4L2_MEMORY_DMABUF) { | 403 | break; |
373 | if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int))) | 404 | case V4L2_MEMORY_USERPTR: |
405 | if (get_user(p, &up32->m.userptr) || | ||
406 | put_user((unsigned long)compat_ptr(p), &up->m.userptr)) | ||
374 | return -EFAULT; | 407 | return -EFAULT; |
375 | } else { | 408 | break; |
376 | if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, | 409 | case V4L2_MEMORY_DMABUF: |
377 | sizeof(__u32))) | 410 | if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd))) |
378 | return -EFAULT; | 411 | return -EFAULT; |
412 | break; | ||
379 | } | 413 | } |
380 | 414 | ||
381 | return 0; | 415 | return 0; |
382 | } | 416 | } |
383 | 417 | ||
384 | static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, | 418 | static int put_v4l2_plane32(struct v4l2_plane __user *up, |
385 | enum v4l2_memory memory) | 419 | struct v4l2_plane32 __user *up32, |
420 | enum v4l2_memory memory) | ||
386 | { | 421 | { |
422 | unsigned long p; | ||
423 | |||
387 | if (copy_in_user(up32, up, 2 * sizeof(__u32)) || | 424 | if (copy_in_user(up32, up, 2 * sizeof(__u32)) || |
388 | copy_in_user(&up32->data_offset, &up->data_offset, | 425 | copy_in_user(&up32->data_offset, &up->data_offset, |
389 | sizeof(__u32))) | 426 | sizeof(up->data_offset))) |
390 | return -EFAULT; | 427 | return -EFAULT; |
391 | 428 | ||
392 | /* For MMAP, driver might've set up the offset, so copy it back. | 429 | switch (memory) { |
393 | * USERPTR stays the same (was userspace-provided), so no copying. */ | 430 | case V4L2_MEMORY_MMAP: |
394 | if (memory == V4L2_MEMORY_MMAP) | 431 | case V4L2_MEMORY_OVERLAY: |
395 | if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset, | 432 | if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset, |
396 | sizeof(__u32))) | 433 | sizeof(up->m.mem_offset))) |
397 | return -EFAULT; | 434 | return -EFAULT; |
398 | /* For DMABUF, driver might've set up the fd, so copy it back. */ | 435 | break; |
399 | if (memory == V4L2_MEMORY_DMABUF) | 436 | case V4L2_MEMORY_USERPTR: |
400 | if (copy_in_user(&up32->m.fd, &up->m.fd, | 437 | if (get_user(p, &up->m.userptr) || |
401 | sizeof(int))) | 438 | put_user((compat_ulong_t)ptr_to_compat((__force void *)p), |
439 | &up32->m.userptr)) | ||
440 | return -EFAULT; | ||
441 | break; | ||
442 | case V4L2_MEMORY_DMABUF: | ||
443 | if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd))) | ||
402 | return -EFAULT; | 444 | return -EFAULT; |
445 | break; | ||
446 | } | ||
403 | 447 | ||
404 | return 0; | 448 | return 0; |
405 | } | 449 | } |
406 | 450 | ||
407 | static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) | 451 | static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *up, u32 *size) |
408 | { | 452 | { |
453 | u32 type; | ||
454 | u32 length; | ||
455 | |||
456 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | ||
457 | get_user(type, &up->type) || | ||
458 | get_user(length, &up->length)) | ||
459 | return -EFAULT; | ||
460 | |||
461 | if (V4L2_TYPE_IS_MULTIPLANAR(type)) { | ||
462 | if (length > VIDEO_MAX_PLANES) | ||
463 | return -EINVAL; | ||
464 | |||
465 | /* | ||
466 | * We don't really care if userspace decides to kill itself | ||
467 | * by passing a very big length value | ||
468 | */ | ||
469 | *size = length * sizeof(struct v4l2_plane); | ||
470 | } else { | ||
471 | *size = 0; | ||
472 | } | ||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | static int get_v4l2_buffer32(struct v4l2_buffer __user *kp, | ||
477 | struct v4l2_buffer32 __user *up, | ||
478 | void __user *aux_buf, u32 aux_space) | ||
479 | { | ||
480 | u32 type; | ||
481 | u32 length; | ||
482 | enum v4l2_memory memory; | ||
409 | struct v4l2_plane32 __user *uplane32; | 483 | struct v4l2_plane32 __user *uplane32; |
410 | struct v4l2_plane __user *uplane; | 484 | struct v4l2_plane __user *uplane; |
411 | compat_caddr_t p; | 485 | compat_caddr_t p; |
412 | int num_planes; | ||
413 | int ret; | 486 | int ret; |
414 | 487 | ||
415 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) || | 488 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
416 | get_user(kp->index, &up->index) || | 489 | assign_in_user(&kp->index, &up->index) || |
417 | get_user(kp->type, &up->type) || | 490 | get_user(type, &up->type) || |
418 | get_user(kp->flags, &up->flags) || | 491 | put_user(type, &kp->type) || |
419 | get_user(kp->memory, &up->memory) || | 492 | assign_in_user(&kp->flags, &up->flags) || |
420 | get_user(kp->length, &up->length)) | 493 | get_user(memory, &up->memory) || |
421 | return -EFAULT; | 494 | put_user(memory, &kp->memory) || |
495 | get_user(length, &up->length) || | ||
496 | put_user(length, &kp->length)) | ||
497 | return -EFAULT; | ||
422 | 498 | ||
423 | if (V4L2_TYPE_IS_OUTPUT(kp->type)) | 499 | if (V4L2_TYPE_IS_OUTPUT(type)) |
424 | if (get_user(kp->bytesused, &up->bytesused) || | 500 | if (assign_in_user(&kp->bytesused, &up->bytesused) || |
425 | get_user(kp->field, &up->field) || | 501 | assign_in_user(&kp->field, &up->field) || |
426 | get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || | 502 | assign_in_user(&kp->timestamp.tv_sec, |
427 | get_user(kp->timestamp.tv_usec, | 503 | &up->timestamp.tv_sec) || |
428 | &up->timestamp.tv_usec)) | 504 | assign_in_user(&kp->timestamp.tv_usec, |
505 | &up->timestamp.tv_usec)) | ||
429 | return -EFAULT; | 506 | return -EFAULT; |
430 | 507 | ||
431 | if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { | 508 | if (V4L2_TYPE_IS_MULTIPLANAR(type)) { |
432 | num_planes = kp->length; | 509 | u32 num_planes = length; |
510 | |||
433 | if (num_planes == 0) { | 511 | if (num_planes == 0) { |
434 | kp->m.planes = NULL; | 512 | /* |
435 | /* num_planes == 0 is legal, e.g. when userspace doesn't | 513 | * num_planes == 0 is legal, e.g. when userspace doesn't |
436 | * need planes array on DQBUF*/ | 514 | * need planes array on DQBUF |
437 | return 0; | 515 | */ |
516 | return put_user(NULL, &kp->m.planes); | ||
438 | } | 517 | } |
518 | if (num_planes > VIDEO_MAX_PLANES) | ||
519 | return -EINVAL; | ||
439 | 520 | ||
440 | if (get_user(p, &up->m.planes)) | 521 | if (get_user(p, &up->m.planes)) |
441 | return -EFAULT; | 522 | return -EFAULT; |
442 | 523 | ||
443 | uplane32 = compat_ptr(p); | 524 | uplane32 = compat_ptr(p); |
444 | if (!access_ok(VERIFY_READ, uplane32, | 525 | if (!access_ok(VERIFY_READ, uplane32, |
445 | num_planes * sizeof(struct v4l2_plane32))) | 526 | num_planes * sizeof(*uplane32))) |
446 | return -EFAULT; | 527 | return -EFAULT; |
447 | 528 | ||
448 | /* We don't really care if userspace decides to kill itself | 529 | /* |
449 | * by passing a very big num_planes value */ | 530 | * We don't really care if userspace decides to kill itself |
450 | uplane = compat_alloc_user_space(num_planes * | 531 | * by passing a very big num_planes value |
451 | sizeof(struct v4l2_plane)); | 532 | */ |
452 | kp->m.planes = (__force struct v4l2_plane *)uplane; | 533 | if (aux_space < num_planes * sizeof(*uplane)) |
534 | return -EFAULT; | ||
535 | |||
536 | uplane = aux_buf; | ||
537 | if (put_user((__force struct v4l2_plane *)uplane, | ||
538 | &kp->m.planes)) | ||
539 | return -EFAULT; | ||
453 | 540 | ||
454 | while (--num_planes >= 0) { | 541 | while (num_planes--) { |
455 | ret = get_v4l2_plane32(uplane, uplane32, kp->memory); | 542 | ret = get_v4l2_plane32(uplane, uplane32, memory); |
456 | if (ret) | 543 | if (ret) |
457 | return ret; | 544 | return ret; |
458 | ++uplane; | 545 | uplane++; |
459 | ++uplane32; | 546 | uplane32++; |
460 | } | 547 | } |
461 | } else { | 548 | } else { |
462 | switch (kp->memory) { | 549 | switch (memory) { |
463 | case V4L2_MEMORY_MMAP: | 550 | case V4L2_MEMORY_MMAP: |
464 | if (get_user(kp->m.offset, &up->m.offset)) | 551 | case V4L2_MEMORY_OVERLAY: |
552 | if (assign_in_user(&kp->m.offset, &up->m.offset)) | ||
465 | return -EFAULT; | 553 | return -EFAULT; |
466 | break; | 554 | break; |
467 | case V4L2_MEMORY_USERPTR: | 555 | case V4L2_MEMORY_USERPTR: { |
468 | { | 556 | compat_ulong_t userptr; |
469 | compat_long_t tmp; | ||
470 | 557 | ||
471 | if (get_user(tmp, &up->m.userptr)) | 558 | if (get_user(userptr, &up->m.userptr) || |
472 | return -EFAULT; | 559 | put_user((unsigned long)compat_ptr(userptr), |
473 | 560 | &kp->m.userptr)) | |
474 | kp->m.userptr = (unsigned long)compat_ptr(tmp); | ||
475 | } | ||
476 | break; | ||
477 | case V4L2_MEMORY_OVERLAY: | ||
478 | if (get_user(kp->m.offset, &up->m.offset)) | ||
479 | return -EFAULT; | 561 | return -EFAULT; |
480 | break; | 562 | break; |
563 | } | ||
481 | case V4L2_MEMORY_DMABUF: | 564 | case V4L2_MEMORY_DMABUF: |
482 | if (get_user(kp->m.fd, &up->m.fd)) | 565 | if (assign_in_user(&kp->m.fd, &up->m.fd)) |
483 | return -EFAULT; | 566 | return -EFAULT; |
484 | break; | 567 | break; |
485 | } | 568 | } |
@@ -488,65 +571,70 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user | |||
488 | return 0; | 571 | return 0; |
489 | } | 572 | } |
490 | 573 | ||
491 | static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) | 574 | static int put_v4l2_buffer32(struct v4l2_buffer __user *kp, |
575 | struct v4l2_buffer32 __user *up) | ||
492 | { | 576 | { |
577 | u32 type; | ||
578 | u32 length; | ||
579 | enum v4l2_memory memory; | ||
493 | struct v4l2_plane32 __user *uplane32; | 580 | struct v4l2_plane32 __user *uplane32; |
494 | struct v4l2_plane __user *uplane; | 581 | struct v4l2_plane __user *uplane; |
495 | compat_caddr_t p; | 582 | compat_caddr_t p; |
496 | int num_planes; | ||
497 | int ret; | 583 | int ret; |
498 | 584 | ||
499 | if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) || | 585 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
500 | put_user(kp->index, &up->index) || | 586 | assign_in_user(&up->index, &kp->index) || |
501 | put_user(kp->type, &up->type) || | 587 | get_user(type, &kp->type) || |
502 | put_user(kp->flags, &up->flags) || | 588 | put_user(type, &up->type) || |
503 | put_user(kp->memory, &up->memory)) | 589 | assign_in_user(&up->flags, &kp->flags) || |
504 | return -EFAULT; | 590 | get_user(memory, &kp->memory) || |
591 | put_user(memory, &up->memory)) | ||
592 | return -EFAULT; | ||
505 | 593 | ||
506 | if (put_user(kp->bytesused, &up->bytesused) || | 594 | if (assign_in_user(&up->bytesused, &kp->bytesused) || |
507 | put_user(kp->field, &up->field) || | 595 | assign_in_user(&up->field, &kp->field) || |
508 | put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || | 596 | assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) || |
509 | put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) || | 597 | assign_in_user(&up->timestamp.tv_usec, &kp->timestamp.tv_usec) || |
510 | copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) || | 598 | copy_in_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) || |
511 | put_user(kp->sequence, &up->sequence) || | 599 | assign_in_user(&up->sequence, &kp->sequence) || |
512 | put_user(kp->reserved2, &up->reserved2) || | 600 | assign_in_user(&up->reserved2, &kp->reserved2) || |
513 | put_user(kp->reserved, &up->reserved) || | 601 | assign_in_user(&up->reserved, &kp->reserved) || |
514 | put_user(kp->length, &up->length)) | 602 | get_user(length, &kp->length) || |
515 | return -EFAULT; | 603 | put_user(length, &up->length)) |
604 | return -EFAULT; | ||
605 | |||
606 | if (V4L2_TYPE_IS_MULTIPLANAR(type)) { | ||
607 | u32 num_planes = length; | ||
516 | 608 | ||
517 | if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { | ||
518 | num_planes = kp->length; | ||
519 | if (num_planes == 0) | 609 | if (num_planes == 0) |
520 | return 0; | 610 | return 0; |
521 | 611 | ||
522 | uplane = (__force struct v4l2_plane __user *)kp->m.planes; | 612 | if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes))) |
613 | return -EFAULT; | ||
523 | if (get_user(p, &up->m.planes)) | 614 | if (get_user(p, &up->m.planes)) |
524 | return -EFAULT; | 615 | return -EFAULT; |
525 | uplane32 = compat_ptr(p); | 616 | uplane32 = compat_ptr(p); |
526 | 617 | ||
527 | while (--num_planes >= 0) { | 618 | while (num_planes--) { |
528 | ret = put_v4l2_plane32(uplane, uplane32, kp->memory); | 619 | ret = put_v4l2_plane32(uplane, uplane32, memory); |
529 | if (ret) | 620 | if (ret) |
530 | return ret; | 621 | return ret; |
531 | ++uplane; | 622 | ++uplane; |
532 | ++uplane32; | 623 | ++uplane32; |
533 | } | 624 | } |
534 | } else { | 625 | } else { |
535 | switch (kp->memory) { | 626 | switch (memory) { |
536 | case V4L2_MEMORY_MMAP: | 627 | case V4L2_MEMORY_MMAP: |
537 | if (put_user(kp->m.offset, &up->m.offset)) | 628 | case V4L2_MEMORY_OVERLAY: |
629 | if (assign_in_user(&up->m.offset, &kp->m.offset)) | ||
538 | return -EFAULT; | 630 | return -EFAULT; |
539 | break; | 631 | break; |
540 | case V4L2_MEMORY_USERPTR: | 632 | case V4L2_MEMORY_USERPTR: |
541 | if (put_user(kp->m.userptr, &up->m.userptr)) | 633 | if (assign_in_user(&up->m.userptr, &kp->m.userptr)) |
542 | return -EFAULT; | ||
543 | break; | ||
544 | case V4L2_MEMORY_OVERLAY: | ||
545 | if (put_user(kp->m.offset, &up->m.offset)) | ||
546 | return -EFAULT; | 634 | return -EFAULT; |
547 | break; | 635 | break; |
548 | case V4L2_MEMORY_DMABUF: | 636 | case V4L2_MEMORY_DMABUF: |
549 | if (put_user(kp->m.fd, &up->m.fd)) | 637 | if (assign_in_user(&up->m.fd, &kp->m.fd)) |
550 | return -EFAULT; | 638 | return -EFAULT; |
551 | break; | 639 | break; |
552 | } | 640 | } |
@@ -558,7 +646,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user | |||
558 | struct v4l2_framebuffer32 { | 646 | struct v4l2_framebuffer32 { |
559 | __u32 capability; | 647 | __u32 capability; |
560 | __u32 flags; | 648 | __u32 flags; |
561 | compat_caddr_t base; | 649 | compat_caddr_t base; |
562 | struct { | 650 | struct { |
563 | __u32 width; | 651 | __u32 width; |
564 | __u32 height; | 652 | __u32 height; |
@@ -571,30 +659,33 @@ struct v4l2_framebuffer32 { | |||
571 | } fmt; | 659 | } fmt; |
572 | }; | 660 | }; |
573 | 661 | ||
574 | static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) | 662 | static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, |
663 | struct v4l2_framebuffer32 __user *up) | ||
575 | { | 664 | { |
576 | u32 tmp; | 665 | compat_caddr_t tmp; |
577 | 666 | ||
578 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) || | 667 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
579 | get_user(tmp, &up->base) || | 668 | get_user(tmp, &up->base) || |
580 | get_user(kp->capability, &up->capability) || | 669 | put_user((__force void *)compat_ptr(tmp), &kp->base) || |
581 | get_user(kp->flags, &up->flags) || | 670 | assign_in_user(&kp->capability, &up->capability) || |
582 | copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt))) | 671 | assign_in_user(&kp->flags, &up->flags) || |
583 | return -EFAULT; | 672 | copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt))) |
584 | kp->base = (__force void *)compat_ptr(tmp); | 673 | return -EFAULT; |
585 | return 0; | 674 | return 0; |
586 | } | 675 | } |
587 | 676 | ||
588 | static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) | 677 | static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, |
678 | struct v4l2_framebuffer32 __user *up) | ||
589 | { | 679 | { |
590 | u32 tmp = (u32)((unsigned long)kp->base); | 680 | void *base; |
591 | 681 | ||
592 | if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) || | 682 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
593 | put_user(tmp, &up->base) || | 683 | get_user(base, &kp->base) || |
594 | put_user(kp->capability, &up->capability) || | 684 | put_user(ptr_to_compat(base), &up->base) || |
595 | put_user(kp->flags, &up->flags) || | 685 | assign_in_user(&up->capability, &kp->capability) || |
596 | copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt))) | 686 | assign_in_user(&up->flags, &kp->flags) || |
597 | return -EFAULT; | 687 | copy_in_user(&up->fmt, &kp->fmt, sizeof(kp->fmt))) |
688 | return -EFAULT; | ||
598 | return 0; | 689 | return 0; |
599 | } | 690 | } |
600 | 691 | ||
@@ -606,21 +697,26 @@ struct v4l2_input32 { | |||
606 | __u32 tuner; /* Associated tuner */ | 697 | __u32 tuner; /* Associated tuner */ |
607 | compat_u64 std; | 698 | compat_u64 std; |
608 | __u32 status; | 699 | __u32 status; |
609 | __u32 reserved[4]; | 700 | __u32 capabilities; |
701 | __u32 reserved[3]; | ||
610 | }; | 702 | }; |
611 | 703 | ||
612 | /* The 64-bit v4l2_input struct has extra padding at the end of the struct. | 704 | /* |
613 | Otherwise it is identical to the 32-bit version. */ | 705 | * The 64-bit v4l2_input struct has extra padding at the end of the struct. |
614 | static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) | 706 | * Otherwise it is identical to the 32-bit version. |
707 | */ | ||
708 | static inline int get_v4l2_input32(struct v4l2_input __user *kp, | ||
709 | struct v4l2_input32 __user *up) | ||
615 | { | 710 | { |
616 | if (copy_from_user(kp, up, sizeof(struct v4l2_input32))) | 711 | if (copy_in_user(kp, up, sizeof(*up))) |
617 | return -EFAULT; | 712 | return -EFAULT; |
618 | return 0; | 713 | return 0; |
619 | } | 714 | } |
620 | 715 | ||
621 | static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) | 716 | static inline int put_v4l2_input32(struct v4l2_input __user *kp, |
717 | struct v4l2_input32 __user *up) | ||
622 | { | 718 | { |
623 | if (copy_to_user(up, kp, sizeof(struct v4l2_input32))) | 719 | if (copy_in_user(up, kp, sizeof(*up))) |
624 | return -EFAULT; | 720 | return -EFAULT; |
625 | return 0; | 721 | return 0; |
626 | } | 722 | } |
@@ -644,58 +740,95 @@ struct v4l2_ext_control32 { | |||
644 | }; | 740 | }; |
645 | } __attribute__ ((packed)); | 741 | } __attribute__ ((packed)); |
646 | 742 | ||
647 | /* The following function really belong in v4l2-common, but that causes | 743 | /* Return true if this control is a pointer type. */ |
648 | a circular dependency between modules. We need to think about this, but | 744 | static inline bool ctrl_is_pointer(struct file *file, u32 id) |
649 | for now this will do. */ | ||
650 | |||
651 | /* Return non-zero if this control is a pointer type. Currently only | ||
652 | type STRING is a pointer type. */ | ||
653 | static inline int ctrl_is_pointer(u32 id) | ||
654 | { | 745 | { |
655 | switch (id) { | 746 | struct video_device *vdev = video_devdata(file); |
656 | case V4L2_CID_RDS_TX_PS_NAME: | 747 | struct v4l2_fh *fh = NULL; |
657 | case V4L2_CID_RDS_TX_RADIO_TEXT: | 748 | struct v4l2_ctrl_handler *hdl = NULL; |
658 | return 1; | 749 | struct v4l2_query_ext_ctrl qec = { id }; |
659 | default: | 750 | const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; |
660 | return 0; | 751 | |
752 | if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags)) | ||
753 | fh = file->private_data; | ||
754 | |||
755 | if (fh && fh->ctrl_handler) | ||
756 | hdl = fh->ctrl_handler; | ||
757 | else if (vdev->ctrl_handler) | ||
758 | hdl = vdev->ctrl_handler; | ||
759 | |||
760 | if (hdl) { | ||
761 | struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id); | ||
762 | |||
763 | return ctrl && ctrl->is_ptr; | ||
661 | } | 764 | } |
765 | |||
766 | if (!ops || !ops->vidioc_query_ext_ctrl) | ||
767 | return false; | ||
768 | |||
769 | return !ops->vidioc_query_ext_ctrl(file, fh, &qec) && | ||
770 | (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD); | ||
771 | } | ||
772 | |||
773 | static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *up, | ||
774 | u32 *size) | ||
775 | { | ||
776 | u32 count; | ||
777 | |||
778 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | ||
779 | get_user(count, &up->count)) | ||
780 | return -EFAULT; | ||
781 | if (count > V4L2_CID_MAX_CTRLS) | ||
782 | return -EINVAL; | ||
783 | *size = count * sizeof(struct v4l2_ext_control); | ||
784 | return 0; | ||
662 | } | 785 | } |
663 | 786 | ||
664 | static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) | 787 | static int get_v4l2_ext_controls32(struct file *file, |
788 | struct v4l2_ext_controls __user *kp, | ||
789 | struct v4l2_ext_controls32 __user *up, | ||
790 | void __user *aux_buf, u32 aux_space) | ||
665 | { | 791 | { |
666 | struct v4l2_ext_control32 __user *ucontrols; | 792 | struct v4l2_ext_control32 __user *ucontrols; |
667 | struct v4l2_ext_control __user *kcontrols; | 793 | struct v4l2_ext_control __user *kcontrols; |
668 | int n; | 794 | u32 count; |
795 | u32 n; | ||
669 | compat_caddr_t p; | 796 | compat_caddr_t p; |
670 | 797 | ||
671 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) || | 798 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
672 | get_user(kp->ctrl_class, &up->ctrl_class) || | 799 | assign_in_user(&kp->ctrl_class, &up->ctrl_class) || |
673 | get_user(kp->count, &up->count) || | 800 | get_user(count, &up->count) || |
674 | get_user(kp->error_idx, &up->error_idx) || | 801 | put_user(count, &kp->count) || |
675 | copy_from_user(kp->reserved, up->reserved, | 802 | assign_in_user(&kp->error_idx, &up->error_idx) || |
676 | sizeof(kp->reserved))) | 803 | copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved))) |
677 | return -EFAULT; | 804 | return -EFAULT; |
678 | n = kp->count; | 805 | |
679 | if (n == 0) { | 806 | if (count == 0) |
680 | kp->controls = NULL; | 807 | return put_user(NULL, &kp->controls); |
681 | return 0; | 808 | if (count > V4L2_CID_MAX_CTRLS) |
682 | } | 809 | return -EINVAL; |
683 | if (get_user(p, &up->controls)) | 810 | if (get_user(p, &up->controls)) |
684 | return -EFAULT; | 811 | return -EFAULT; |
685 | ucontrols = compat_ptr(p); | 812 | ucontrols = compat_ptr(p); |
686 | if (!access_ok(VERIFY_READ, ucontrols, | 813 | if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols))) |
687 | n * sizeof(struct v4l2_ext_control32))) | 814 | return -EFAULT; |
815 | if (aux_space < count * sizeof(*kcontrols)) | ||
688 | return -EFAULT; | 816 | return -EFAULT; |
689 | kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control)); | 817 | kcontrols = aux_buf; |
690 | kp->controls = (__force struct v4l2_ext_control *)kcontrols; | 818 | if (put_user((__force struct v4l2_ext_control *)kcontrols, |
691 | while (--n >= 0) { | 819 | &kp->controls)) |
820 | return -EFAULT; | ||
821 | |||
822 | for (n = 0; n < count; n++) { | ||
692 | u32 id; | 823 | u32 id; |
693 | 824 | ||
694 | if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols))) | 825 | if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols))) |
695 | return -EFAULT; | 826 | return -EFAULT; |
827 | |||
696 | if (get_user(id, &kcontrols->id)) | 828 | if (get_user(id, &kcontrols->id)) |
697 | return -EFAULT; | 829 | return -EFAULT; |
698 | if (ctrl_is_pointer(id)) { | 830 | |
831 | if (ctrl_is_pointer(file, id)) { | ||
699 | void __user *s; | 832 | void __user *s; |
700 | 833 | ||
701 | if (get_user(p, &ucontrols->string)) | 834 | if (get_user(p, &ucontrols->string)) |
@@ -710,43 +843,55 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext | |||
710 | return 0; | 843 | return 0; |
711 | } | 844 | } |
712 | 845 | ||
713 | static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) | 846 | static int put_v4l2_ext_controls32(struct file *file, |
847 | struct v4l2_ext_controls __user *kp, | ||
848 | struct v4l2_ext_controls32 __user *up) | ||
714 | { | 849 | { |
715 | struct v4l2_ext_control32 __user *ucontrols; | 850 | struct v4l2_ext_control32 __user *ucontrols; |
716 | struct v4l2_ext_control __user *kcontrols = | 851 | struct v4l2_ext_control __user *kcontrols; |
717 | (__force struct v4l2_ext_control __user *)kp->controls; | 852 | u32 count; |
718 | int n = kp->count; | 853 | u32 n; |
719 | compat_caddr_t p; | 854 | compat_caddr_t p; |
720 | 855 | ||
721 | if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) || | 856 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
722 | put_user(kp->ctrl_class, &up->ctrl_class) || | 857 | assign_in_user(&up->ctrl_class, &kp->ctrl_class) || |
723 | put_user(kp->count, &up->count) || | 858 | get_user(count, &kp->count) || |
724 | put_user(kp->error_idx, &up->error_idx) || | 859 | put_user(count, &up->count) || |
725 | copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) | 860 | assign_in_user(&up->error_idx, &kp->error_idx) || |
726 | return -EFAULT; | 861 | copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)) || |
727 | if (!kp->count) | 862 | get_user(kcontrols, &kp->controls)) |
728 | return 0; | 863 | return -EFAULT; |
729 | 864 | ||
865 | if (!count) | ||
866 | return 0; | ||
730 | if (get_user(p, &up->controls)) | 867 | if (get_user(p, &up->controls)) |
731 | return -EFAULT; | 868 | return -EFAULT; |
732 | ucontrols = compat_ptr(p); | 869 | ucontrols = compat_ptr(p); |
733 | if (!access_ok(VERIFY_WRITE, ucontrols, | 870 | if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols))) |
734 | n * sizeof(struct v4l2_ext_control32))) | ||
735 | return -EFAULT; | 871 | return -EFAULT; |
736 | 872 | ||
737 | while (--n >= 0) { | 873 | for (n = 0; n < count; n++) { |
738 | unsigned size = sizeof(*ucontrols); | 874 | unsigned int size = sizeof(*ucontrols); |
739 | u32 id; | 875 | u32 id; |
740 | 876 | ||
741 | if (get_user(id, &kcontrols->id)) | 877 | if (get_user(id, &kcontrols->id) || |
878 | put_user(id, &ucontrols->id) || | ||
879 | assign_in_user(&ucontrols->size, &kcontrols->size) || | ||
880 | copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2, | ||
881 | sizeof(ucontrols->reserved2))) | ||
742 | return -EFAULT; | 882 | return -EFAULT; |
743 | /* Do not modify the pointer when copying a pointer control. | 883 | |
744 | The contents of the pointer was changed, not the pointer | 884 | /* |
745 | itself. */ | 885 | * Do not modify the pointer when copying a pointer control. |
746 | if (ctrl_is_pointer(id)) | 886 | * The contents of the pointer was changed, not the pointer |
887 | * itself. | ||
888 | */ | ||
889 | if (ctrl_is_pointer(file, id)) | ||
747 | size -= sizeof(ucontrols->value64); | 890 | size -= sizeof(ucontrols->value64); |
891 | |||
748 | if (copy_in_user(ucontrols, kcontrols, size)) | 892 | if (copy_in_user(ucontrols, kcontrols, size)) |
749 | return -EFAULT; | 893 | return -EFAULT; |
894 | |||
750 | ucontrols++; | 895 | ucontrols++; |
751 | kcontrols++; | 896 | kcontrols++; |
752 | } | 897 | } |
@@ -766,18 +911,19 @@ struct v4l2_event32 { | |||
766 | __u32 reserved[8]; | 911 | __u32 reserved[8]; |
767 | }; | 912 | }; |
768 | 913 | ||
769 | static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up) | 914 | static int put_v4l2_event32(struct v4l2_event __user *kp, |
915 | struct v4l2_event32 __user *up) | ||
770 | { | 916 | { |
771 | if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) || | 917 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
772 | put_user(kp->type, &up->type) || | 918 | assign_in_user(&up->type, &kp->type) || |
773 | copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || | 919 | copy_in_user(&up->u, &kp->u, sizeof(kp->u)) || |
774 | put_user(kp->pending, &up->pending) || | 920 | assign_in_user(&up->pending, &kp->pending) || |
775 | put_user(kp->sequence, &up->sequence) || | 921 | assign_in_user(&up->sequence, &kp->sequence) || |
776 | put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || | 922 | assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) || |
777 | put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || | 923 | assign_in_user(&up->timestamp.tv_nsec, &kp->timestamp.tv_nsec) || |
778 | put_user(kp->id, &up->id) || | 924 | assign_in_user(&up->id, &kp->id) || |
779 | copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) | 925 | copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) |
780 | return -EFAULT; | 926 | return -EFAULT; |
781 | return 0; | 927 | return 0; |
782 | } | 928 | } |
783 | 929 | ||
@@ -789,32 +935,35 @@ struct v4l2_edid32 { | |||
789 | compat_caddr_t edid; | 935 | compat_caddr_t edid; |
790 | }; | 936 | }; |
791 | 937 | ||
792 | static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) | 938 | static int get_v4l2_edid32(struct v4l2_edid __user *kp, |
939 | struct v4l2_edid32 __user *up) | ||
793 | { | 940 | { |
794 | u32 tmp; | 941 | compat_uptr_t tmp; |
795 | 942 | ||
796 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) || | 943 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
797 | get_user(kp->pad, &up->pad) || | 944 | assign_in_user(&kp->pad, &up->pad) || |
798 | get_user(kp->start_block, &up->start_block) || | 945 | assign_in_user(&kp->start_block, &up->start_block) || |
799 | get_user(kp->blocks, &up->blocks) || | 946 | assign_in_user(&kp->blocks, &up->blocks) || |
800 | get_user(tmp, &up->edid) || | 947 | get_user(tmp, &up->edid) || |
801 | copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) | 948 | put_user(compat_ptr(tmp), &kp->edid) || |
802 | return -EFAULT; | 949 | copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved))) |
803 | kp->edid = (__force u8 *)compat_ptr(tmp); | 950 | return -EFAULT; |
804 | return 0; | 951 | return 0; |
805 | } | 952 | } |
806 | 953 | ||
807 | static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) | 954 | static int put_v4l2_edid32(struct v4l2_edid __user *kp, |
955 | struct v4l2_edid32 __user *up) | ||
808 | { | 956 | { |
809 | u32 tmp = (u32)((unsigned long)kp->edid); | 957 | void *edid; |
810 | 958 | ||
811 | if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) || | 959 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
812 | put_user(kp->pad, &up->pad) || | 960 | assign_in_user(&up->pad, &kp->pad) || |
813 | put_user(kp->start_block, &up->start_block) || | 961 | assign_in_user(&up->start_block, &kp->start_block) || |
814 | put_user(kp->blocks, &up->blocks) || | 962 | assign_in_user(&up->blocks, &kp->blocks) || |
815 | put_user(tmp, &up->edid) || | 963 | get_user(edid, &kp->edid) || |
816 | copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) | 964 | put_user(ptr_to_compat(edid), &up->edid) || |
817 | return -EFAULT; | 965 | copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) |
966 | return -EFAULT; | ||
818 | return 0; | 967 | return 0; |
819 | } | 968 | } |
820 | 969 | ||
@@ -830,7 +979,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) | |||
830 | #define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32) | 979 | #define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32) |
831 | #define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32) | 980 | #define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32) |
832 | #define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32) | 981 | #define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32) |
833 | #define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32) | 982 | #define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32) |
834 | #define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32) | 983 | #define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32) |
835 | #define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32) | 984 | #define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32) |
836 | #define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32) | 985 | #define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32) |
@@ -846,22 +995,23 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) | |||
846 | #define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32) | 995 | #define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32) |
847 | #define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32) | 996 | #define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32) |
848 | 997 | ||
998 | static int alloc_userspace(unsigned int size, u32 aux_space, | ||
999 | void __user **up_native) | ||
1000 | { | ||
1001 | *up_native = compat_alloc_user_space(size + aux_space); | ||
1002 | if (!*up_native) | ||
1003 | return -ENOMEM; | ||
1004 | if (clear_user(*up_native, size)) | ||
1005 | return -EFAULT; | ||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
849 | static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 1009 | static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
850 | { | 1010 | { |
851 | union { | ||
852 | struct v4l2_format v2f; | ||
853 | struct v4l2_buffer v2b; | ||
854 | struct v4l2_framebuffer v2fb; | ||
855 | struct v4l2_input v2i; | ||
856 | struct v4l2_standard v2s; | ||
857 | struct v4l2_ext_controls v2ecs; | ||
858 | struct v4l2_event v2ev; | ||
859 | struct v4l2_create_buffers v2crt; | ||
860 | struct v4l2_edid v2edid; | ||
861 | unsigned long vx; | ||
862 | int vi; | ||
863 | } karg; | ||
864 | void __user *up = compat_ptr(arg); | 1011 | void __user *up = compat_ptr(arg); |
1012 | void __user *up_native = NULL; | ||
1013 | void __user *aux_buf; | ||
1014 | u32 aux_space; | ||
865 | int compatible_arg = 1; | 1015 | int compatible_arg = 1; |
866 | long err = 0; | 1016 | long err = 0; |
867 | 1017 | ||
@@ -900,30 +1050,52 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar | |||
900 | case VIDIOC_STREAMOFF: | 1050 | case VIDIOC_STREAMOFF: |
901 | case VIDIOC_S_INPUT: | 1051 | case VIDIOC_S_INPUT: |
902 | case VIDIOC_S_OUTPUT: | 1052 | case VIDIOC_S_OUTPUT: |
903 | err = get_user(karg.vi, (s32 __user *)up); | 1053 | err = alloc_userspace(sizeof(unsigned int), 0, &up_native); |
1054 | if (!err && assign_in_user((unsigned int __user *)up_native, | ||
1055 | (compat_uint_t __user *)up)) | ||
1056 | err = -EFAULT; | ||
904 | compatible_arg = 0; | 1057 | compatible_arg = 0; |
905 | break; | 1058 | break; |
906 | 1059 | ||
907 | case VIDIOC_G_INPUT: | 1060 | case VIDIOC_G_INPUT: |
908 | case VIDIOC_G_OUTPUT: | 1061 | case VIDIOC_G_OUTPUT: |
1062 | err = alloc_userspace(sizeof(unsigned int), 0, &up_native); | ||
909 | compatible_arg = 0; | 1063 | compatible_arg = 0; |
910 | break; | 1064 | break; |
911 | 1065 | ||
912 | case VIDIOC_G_EDID: | 1066 | case VIDIOC_G_EDID: |
913 | case VIDIOC_S_EDID: | 1067 | case VIDIOC_S_EDID: |
914 | err = get_v4l2_edid32(&karg.v2edid, up); | 1068 | err = alloc_userspace(sizeof(struct v4l2_edid), 0, &up_native); |
1069 | if (!err) | ||
1070 | err = get_v4l2_edid32(up_native, up); | ||
915 | compatible_arg = 0; | 1071 | compatible_arg = 0; |
916 | break; | 1072 | break; |
917 | 1073 | ||
918 | case VIDIOC_G_FMT: | 1074 | case VIDIOC_G_FMT: |
919 | case VIDIOC_S_FMT: | 1075 | case VIDIOC_S_FMT: |
920 | case VIDIOC_TRY_FMT: | 1076 | case VIDIOC_TRY_FMT: |
921 | err = get_v4l2_format32(&karg.v2f, up); | 1077 | err = bufsize_v4l2_format(up, &aux_space); |
1078 | if (!err) | ||
1079 | err = alloc_userspace(sizeof(struct v4l2_format), | ||
1080 | aux_space, &up_native); | ||
1081 | if (!err) { | ||
1082 | aux_buf = up_native + sizeof(struct v4l2_format); | ||
1083 | err = get_v4l2_format32(up_native, up, | ||
1084 | aux_buf, aux_space); | ||
1085 | } | ||
922 | compatible_arg = 0; | 1086 | compatible_arg = 0; |
923 | break; | 1087 | break; |
924 | 1088 | ||
925 | case VIDIOC_CREATE_BUFS: | 1089 | case VIDIOC_CREATE_BUFS: |
926 | err = get_v4l2_create32(&karg.v2crt, up); | 1090 | err = bufsize_v4l2_create(up, &aux_space); |
1091 | if (!err) | ||
1092 | err = alloc_userspace(sizeof(struct v4l2_create_buffers), | ||
1093 | aux_space, &up_native); | ||
1094 | if (!err) { | ||
1095 | aux_buf = up_native + sizeof(struct v4l2_create_buffers); | ||
1096 | err = get_v4l2_create32(up_native, up, | ||
1097 | aux_buf, aux_space); | ||
1098 | } | ||
927 | compatible_arg = 0; | 1099 | compatible_arg = 0; |
928 | break; | 1100 | break; |
929 | 1101 | ||
@@ -931,36 +1103,63 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar | |||
931 | case VIDIOC_QUERYBUF: | 1103 | case VIDIOC_QUERYBUF: |
932 | case VIDIOC_QBUF: | 1104 | case VIDIOC_QBUF: |
933 | case VIDIOC_DQBUF: | 1105 | case VIDIOC_DQBUF: |
934 | err = get_v4l2_buffer32(&karg.v2b, up); | 1106 | err = bufsize_v4l2_buffer(up, &aux_space); |
1107 | if (!err) | ||
1108 | err = alloc_userspace(sizeof(struct v4l2_buffer), | ||
1109 | aux_space, &up_native); | ||
1110 | if (!err) { | ||
1111 | aux_buf = up_native + sizeof(struct v4l2_buffer); | ||
1112 | err = get_v4l2_buffer32(up_native, up, | ||
1113 | aux_buf, aux_space); | ||
1114 | } | ||
935 | compatible_arg = 0; | 1115 | compatible_arg = 0; |
936 | break; | 1116 | break; |
937 | 1117 | ||
938 | case VIDIOC_S_FBUF: | 1118 | case VIDIOC_S_FBUF: |
939 | err = get_v4l2_framebuffer32(&karg.v2fb, up); | 1119 | err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, |
1120 | &up_native); | ||
1121 | if (!err) | ||
1122 | err = get_v4l2_framebuffer32(up_native, up); | ||
940 | compatible_arg = 0; | 1123 | compatible_arg = 0; |
941 | break; | 1124 | break; |
942 | 1125 | ||
943 | case VIDIOC_G_FBUF: | 1126 | case VIDIOC_G_FBUF: |
1127 | err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, | ||
1128 | &up_native); | ||
944 | compatible_arg = 0; | 1129 | compatible_arg = 0; |
945 | break; | 1130 | break; |
946 | 1131 | ||
947 | case VIDIOC_ENUMSTD: | 1132 | case VIDIOC_ENUMSTD: |
948 | err = get_v4l2_standard32(&karg.v2s, up); | 1133 | err = alloc_userspace(sizeof(struct v4l2_standard), 0, |
1134 | &up_native); | ||
1135 | if (!err) | ||
1136 | err = get_v4l2_standard32(up_native, up); | ||
949 | compatible_arg = 0; | 1137 | compatible_arg = 0; |
950 | break; | 1138 | break; |
951 | 1139 | ||
952 | case VIDIOC_ENUMINPUT: | 1140 | case VIDIOC_ENUMINPUT: |
953 | err = get_v4l2_input32(&karg.v2i, up); | 1141 | err = alloc_userspace(sizeof(struct v4l2_input), 0, &up_native); |
1142 | if (!err) | ||
1143 | err = get_v4l2_input32(up_native, up); | ||
954 | compatible_arg = 0; | 1144 | compatible_arg = 0; |
955 | break; | 1145 | break; |
956 | 1146 | ||
957 | case VIDIOC_G_EXT_CTRLS: | 1147 | case VIDIOC_G_EXT_CTRLS: |
958 | case VIDIOC_S_EXT_CTRLS: | 1148 | case VIDIOC_S_EXT_CTRLS: |
959 | case VIDIOC_TRY_EXT_CTRLS: | 1149 | case VIDIOC_TRY_EXT_CTRLS: |
960 | err = get_v4l2_ext_controls32(&karg.v2ecs, up); | 1150 | err = bufsize_v4l2_ext_controls(up, &aux_space); |
1151 | if (!err) | ||
1152 | err = alloc_userspace(sizeof(struct v4l2_ext_controls), | ||
1153 | aux_space, &up_native); | ||
1154 | if (!err) { | ||
1155 | aux_buf = up_native + sizeof(struct v4l2_ext_controls); | ||
1156 | err = get_v4l2_ext_controls32(file, up_native, up, | ||
1157 | aux_buf, aux_space); | ||
1158 | } | ||
961 | compatible_arg = 0; | 1159 | compatible_arg = 0; |
962 | break; | 1160 | break; |
963 | case VIDIOC_DQEVENT: | 1161 | case VIDIOC_DQEVENT: |
1162 | err = alloc_userspace(sizeof(struct v4l2_event), 0, &up_native); | ||
964 | compatible_arg = 0; | 1163 | compatible_arg = 0; |
965 | break; | 1164 | break; |
966 | } | 1165 | } |
@@ -969,22 +1168,26 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar | |||
969 | 1168 | ||
970 | if (compatible_arg) | 1169 | if (compatible_arg) |
971 | err = native_ioctl(file, cmd, (unsigned long)up); | 1170 | err = native_ioctl(file, cmd, (unsigned long)up); |
972 | else { | 1171 | else |
973 | mm_segment_t old_fs = get_fs(); | 1172 | err = native_ioctl(file, cmd, (unsigned long)up_native); |
974 | 1173 | ||
975 | set_fs(KERNEL_DS); | 1174 | if (err == -ENOTTY) |
976 | err = native_ioctl(file, cmd, (unsigned long)&karg); | 1175 | return err; |
977 | set_fs(old_fs); | ||
978 | } | ||
979 | 1176 | ||
980 | /* Special case: even after an error we need to put the | 1177 | /* |
981 | results back for these ioctls since the error_idx will | 1178 | * Special case: even after an error we need to put the |
982 | contain information on which control failed. */ | 1179 | * results back for these ioctls since the error_idx will |
1180 | * contain information on which control failed. | ||
1181 | */ | ||
983 | switch (cmd) { | 1182 | switch (cmd) { |
984 | case VIDIOC_G_EXT_CTRLS: | 1183 | case VIDIOC_G_EXT_CTRLS: |
985 | case VIDIOC_S_EXT_CTRLS: | 1184 | case VIDIOC_S_EXT_CTRLS: |
986 | case VIDIOC_TRY_EXT_CTRLS: | 1185 | case VIDIOC_TRY_EXT_CTRLS: |
987 | if (put_v4l2_ext_controls32(&karg.v2ecs, up)) | 1186 | if (put_v4l2_ext_controls32(file, up_native, up)) |
1187 | err = -EFAULT; | ||
1188 | break; | ||
1189 | case VIDIOC_S_EDID: | ||
1190 | if (put_v4l2_edid32(up_native, up)) | ||
988 | err = -EFAULT; | 1191 | err = -EFAULT; |
989 | break; | 1192 | break; |
990 | } | 1193 | } |
@@ -996,44 +1199,46 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar | |||
996 | case VIDIOC_S_OUTPUT: | 1199 | case VIDIOC_S_OUTPUT: |
997 | case VIDIOC_G_INPUT: | 1200 | case VIDIOC_G_INPUT: |
998 | case VIDIOC_G_OUTPUT: | 1201 | case VIDIOC_G_OUTPUT: |
999 | err = put_user(((s32)karg.vi), (s32 __user *)up); | 1202 | if (assign_in_user((compat_uint_t __user *)up, |
1203 | ((unsigned int __user *)up_native))) | ||
1204 | err = -EFAULT; | ||
1000 | break; | 1205 | break; |
1001 | 1206 | ||
1002 | case VIDIOC_G_FBUF: | 1207 | case VIDIOC_G_FBUF: |
1003 | err = put_v4l2_framebuffer32(&karg.v2fb, up); | 1208 | err = put_v4l2_framebuffer32(up_native, up); |
1004 | break; | 1209 | break; |
1005 | 1210 | ||
1006 | case VIDIOC_DQEVENT: | 1211 | case VIDIOC_DQEVENT: |
1007 | err = put_v4l2_event32(&karg.v2ev, up); | 1212 | err = put_v4l2_event32(up_native, up); |
1008 | break; | 1213 | break; |
1009 | 1214 | ||
1010 | case VIDIOC_G_EDID: | 1215 | case VIDIOC_G_EDID: |
1011 | case VIDIOC_S_EDID: | 1216 | err = put_v4l2_edid32(up_native, up); |
1012 | err = put_v4l2_edid32(&karg.v2edid, up); | ||
1013 | break; | 1217 | break; |
1014 | 1218 | ||
1015 | case VIDIOC_G_FMT: | 1219 | case VIDIOC_G_FMT: |
1016 | case VIDIOC_S_FMT: | 1220 | case VIDIOC_S_FMT: |
1017 | case VIDIOC_TRY_FMT: | 1221 | case VIDIOC_TRY_FMT: |
1018 | err = put_v4l2_format32(&karg.v2f, up); | 1222 | err = put_v4l2_format32(up_native, up); |
1019 | break; | 1223 | break; |
1020 | 1224 | ||
1021 | case VIDIOC_CREATE_BUFS: | 1225 | case VIDIOC_CREATE_BUFS: |
1022 | err = put_v4l2_create32(&karg.v2crt, up); | 1226 | err = put_v4l2_create32(up_native, up); |
1023 | break; | 1227 | break; |
1024 | 1228 | ||
1229 | case VIDIOC_PREPARE_BUF: | ||
1025 | case VIDIOC_QUERYBUF: | 1230 | case VIDIOC_QUERYBUF: |
1026 | case VIDIOC_QBUF: | 1231 | case VIDIOC_QBUF: |
1027 | case VIDIOC_DQBUF: | 1232 | case VIDIOC_DQBUF: |
1028 | err = put_v4l2_buffer32(&karg.v2b, up); | 1233 | err = put_v4l2_buffer32(up_native, up); |
1029 | break; | 1234 | break; |
1030 | 1235 | ||
1031 | case VIDIOC_ENUMSTD: | 1236 | case VIDIOC_ENUMSTD: |
1032 | err = put_v4l2_standard32(&karg.v2s, up); | 1237 | err = put_v4l2_standard32(up_native, up); |
1033 | break; | 1238 | break; |
1034 | 1239 | ||
1035 | case VIDIOC_ENUMINPUT: | 1240 | case VIDIOC_ENUMINPUT: |
1036 | err = put_v4l2_input32(&karg.v2i, up); | 1241 | err = put_v4l2_input32(up_native, up); |
1037 | break; | 1242 | break; |
1038 | } | 1243 | } |
1039 | return err; | 1244 | return err; |
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 7486af2c8ae4..5e2a7e59f578 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c | |||
@@ -2783,8 +2783,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, | |||
2783 | 2783 | ||
2784 | /* Handles IOCTL */ | 2784 | /* Handles IOCTL */ |
2785 | err = func(file, cmd, parg); | 2785 | err = func(file, cmd, parg); |
2786 | if (err == -ENOIOCTLCMD) | 2786 | if (err == -ENOTTY || err == -ENOIOCTLCMD) { |
2787 | err = -ENOTTY; | 2787 | err = -ENOTTY; |
2788 | goto out; | ||
2789 | } | ||
2790 | |||
2788 | if (err == 0) { | 2791 | if (err == 0) { |
2789 | if (cmd == VIDIOC_DQBUF) | 2792 | if (cmd == VIDIOC_DQBUF) |
2790 | trace_v4l2_dqbuf(video_devdata(file)->minor, parg); | 2793 | trace_v4l2_dqbuf(video_devdata(file)->minor, parg); |
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c index 23886e8fbfd8..df966509ebb2 100644 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c | |||
@@ -594,6 +594,12 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, | |||
594 | b->flags & V4L2_BUF_FLAG_LAST) | 594 | b->flags & V4L2_BUF_FLAG_LAST) |
595 | q->last_buffer_dequeued = true; | 595 | q->last_buffer_dequeued = true; |
596 | 596 | ||
597 | /* | ||
598 | * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be | ||
599 | * cleared. | ||
600 | */ | ||
601 | b->flags &= ~V4L2_BUF_FLAG_DONE; | ||
602 | |||
597 | return ret; | 603 | return ret; |
598 | } | 604 | } |
599 | 605 | ||
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c index 6a0f6ec67c6b..ee7847a1ca06 100644 --- a/drivers/mfd/cros_ec_spi.c +++ b/drivers/mfd/cros_ec_spi.c | |||
@@ -660,6 +660,7 @@ static int cros_ec_spi_probe(struct spi_device *spi) | |||
660 | sizeof(struct ec_response_get_protocol_info); | 660 | sizeof(struct ec_response_get_protocol_info); |
661 | ec_dev->dout_size = sizeof(struct ec_host_request); | 661 | ec_dev->dout_size = sizeof(struct ec_host_request); |
662 | 662 | ||
663 | ec_spi->last_transfer_ns = ktime_get_ns(); | ||
663 | 664 | ||
664 | err = cros_ec_register(ec_dev); | 665 | err = cros_ec_register(ec_dev); |
665 | if (err) { | 666 | if (err) { |
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c index 1365eeefcd1e..321e734b3d9b 100644 --- a/drivers/mfd/palmas.c +++ b/drivers/mfd/palmas.c | |||
@@ -441,12 +441,19 @@ static void palmas_power_off(void) | |||
441 | of_property_read_bool(node, "ti,palmas-override-powerhold"); | 441 | of_property_read_bool(node, "ti,palmas-override-powerhold"); |
442 | 442 | ||
443 | if (override_powerhold) { | 443 | if (override_powerhold) { |
444 | u32 powerhold_mask; | ||
445 | |||
446 | if (of_device_is_compatible(node, "ti,tps65917")) | ||
447 | powerhold_mask = TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK; | ||
448 | else | ||
449 | powerhold_mask = PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK; | ||
450 | |||
444 | addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE, | 451 | addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE, |
445 | PALMAS_PRIMARY_SECONDARY_PAD2); | 452 | PALMAS_PRIMARY_SECONDARY_PAD2); |
446 | slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE); | 453 | slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE); |
447 | 454 | ||
448 | ret = regmap_update_bits(palmas_dev->regmap[slave], addr, | 455 | ret = regmap_update_bits(palmas_dev->regmap[slave], addr, |
449 | PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0); | 456 | powerhold_mask, 0); |
450 | if (ret) | 457 | if (ret) |
451 | pr_err("%s: Unable to write PALMAS_PRIMARY_SECONDARY_PAD2 %d\n", | 458 | pr_err("%s: Unable to write PALMAS_PRIMARY_SECONDARY_PAD2 %d\n", |
452 | __func__, ret); | 459 | __func__, ret); |
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c index 0a1606480023..cc832d309599 100644 --- a/drivers/mfd/twl4030-audio.c +++ b/drivers/mfd/twl4030-audio.c | |||
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void) | |||
159 | EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk); | 159 | EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk); |
160 | 160 | ||
161 | static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata, | 161 | static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata, |
162 | struct device_node *node) | 162 | struct device_node *parent) |
163 | { | 163 | { |
164 | struct device_node *node; | ||
165 | |||
164 | if (pdata && pdata->codec) | 166 | if (pdata && pdata->codec) |
165 | return true; | 167 | return true; |
166 | 168 | ||
167 | if (of_find_node_by_name(node, "codec")) | 169 | node = of_get_child_by_name(parent, "codec"); |
170 | if (node) { | ||
171 | of_node_put(node); | ||
168 | return true; | 172 | return true; |
173 | } | ||
169 | 174 | ||
170 | return false; | 175 | return false; |
171 | } | 176 | } |
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index 08a693cd38cc..72aab60ae846 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c | |||
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = { | |||
97 | }; | 97 | }; |
98 | 98 | ||
99 | 99 | ||
100 | static bool twl6040_has_vibra(struct device_node *node) | 100 | static bool twl6040_has_vibra(struct device_node *parent) |
101 | { | 101 | { |
102 | #ifdef CONFIG_OF | 102 | struct device_node *node; |
103 | if (of_find_node_by_name(node, "vibra")) | 103 | |
104 | node = of_get_child_by_name(parent, "vibra"); | ||
105 | if (node) { | ||
106 | of_node_put(node); | ||
104 | return true; | 107 | return true; |
105 | #endif | 108 | } |
109 | |||
106 | return false; | 110 | return false; |
107 | } | 111 | } |
108 | 112 | ||
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 0c6c17a1c59e..ba2f6d1d7db7 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
@@ -1329,6 +1329,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, | |||
1329 | /* There should only be one entry, but go through the list | 1329 | /* There should only be one entry, but go through the list |
1330 | * anyway | 1330 | * anyway |
1331 | */ | 1331 | */ |
1332 | if (afu->phb == NULL) | ||
1333 | return result; | ||
1334 | |||
1332 | list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { | 1335 | list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { |
1333 | if (!afu_dev->driver) | 1336 | if (!afu_dev->driver) |
1334 | continue; | 1337 | continue; |
@@ -1369,6 +1372,10 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, | |||
1369 | */ | 1372 | */ |
1370 | for (i = 0; i < adapter->slices; i++) { | 1373 | for (i = 0; i < adapter->slices; i++) { |
1371 | afu = adapter->afu[i]; | 1374 | afu = adapter->afu[i]; |
1375 | /* | ||
1376 | * Tell the AFU drivers; but we don't care what they | ||
1377 | * say, we're going away. | ||
1378 | */ | ||
1372 | cxl_vphb_error_detected(afu, state); | 1379 | cxl_vphb_error_detected(afu, state); |
1373 | } | 1380 | } |
1374 | return PCI_ERS_RESULT_DISCONNECT; | 1381 | return PCI_ERS_RESULT_DISCONNECT; |
@@ -1492,6 +1499,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) | |||
1492 | if (cxl_afu_select_best_mode(afu)) | 1499 | if (cxl_afu_select_best_mode(afu)) |
1493 | goto err; | 1500 | goto err; |
1494 | 1501 | ||
1502 | if (afu->phb == NULL) | ||
1503 | continue; | ||
1504 | |||
1495 | cxl_pci_vphb_reconfigure(afu); | 1505 | cxl_pci_vphb_reconfigure(afu); |
1496 | 1506 | ||
1497 | list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { | 1507 | list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { |
@@ -1556,6 +1566,9 @@ static void cxl_pci_resume(struct pci_dev *pdev) | |||
1556 | for (i = 0; i < adapter->slices; i++) { | 1566 | for (i = 0; i < adapter->slices; i++) { |
1557 | afu = adapter->afu[i]; | 1567 | afu = adapter->afu[i]; |
1558 | 1568 | ||
1569 | if (afu->phb == NULL) | ||
1570 | continue; | ||
1571 | |||
1559 | list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { | 1572 | list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { |
1560 | if (afu_dev->driver && afu_dev->driver->err_handler && | 1573 | if (afu_dev->driver && afu_dev->driver->err_handler && |
1561 | afu_dev->driver->err_handler->resume) | 1574 | afu_dev->driver->err_handler->resume) |
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 90e94a028a49..83b1226471c1 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c | |||
@@ -584,6 +584,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) | |||
584 | { | 584 | { |
585 | struct sdhci_host *host; | 585 | struct sdhci_host *host; |
586 | struct device_node *np; | 586 | struct device_node *np; |
587 | struct sdhci_pltfm_host *pltfm_host; | ||
588 | struct sdhci_esdhc *esdhc; | ||
587 | int ret; | 589 | int ret; |
588 | 590 | ||
589 | np = pdev->dev.of_node; | 591 | np = pdev->dev.of_node; |
@@ -600,6 +602,14 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) | |||
600 | 602 | ||
601 | sdhci_get_of_property(pdev); | 603 | sdhci_get_of_property(pdev); |
602 | 604 | ||
605 | pltfm_host = sdhci_priv(host); | ||
606 | esdhc = pltfm_host->priv; | ||
607 | if (esdhc->vendor_ver == VENDOR_V_22) | ||
608 | host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; | ||
609 | |||
610 | if (esdhc->vendor_ver > VENDOR_V_22) | ||
611 | host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; | ||
612 | |||
603 | if (of_device_is_compatible(np, "fsl,p5040-esdhc") || | 613 | if (of_device_is_compatible(np, "fsl,p5040-esdhc") || |
604 | of_device_is_compatible(np, "fsl,p5020-esdhc") || | 614 | of_device_is_compatible(np, "fsl,p5020-esdhc") || |
605 | of_device_is_compatible(np, "fsl,p4080-esdhc") || | 615 | of_device_is_compatible(np, "fsl,p4080-esdhc") || |
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index 4a07ba1195b5..d125d19a35e4 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c | |||
@@ -1922,16 +1922,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) | |||
1922 | tmp &= ~ACC_CONTROL_PARTIAL_PAGE; | 1922 | tmp &= ~ACC_CONTROL_PARTIAL_PAGE; |
1923 | tmp &= ~ACC_CONTROL_RD_ERASED; | 1923 | tmp &= ~ACC_CONTROL_RD_ERASED; |
1924 | tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; | 1924 | tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; |
1925 | if (ctrl->features & BRCMNAND_HAS_PREFETCH) { | 1925 | if (ctrl->features & BRCMNAND_HAS_PREFETCH) |
1926 | /* | 1926 | tmp &= ~ACC_CONTROL_PREFETCH; |
1927 | * FIXME: Flash DMA + prefetch may see spurious erased-page ECC | 1927 | |
1928 | * errors | ||
1929 | */ | ||
1930 | if (has_flash_dma(ctrl)) | ||
1931 | tmp &= ~ACC_CONTROL_PREFETCH; | ||
1932 | else | ||
1933 | tmp |= ACC_CONTROL_PREFETCH; | ||
1934 | } | ||
1935 | nand_writereg(ctrl, offs, tmp); | 1928 | nand_writereg(ctrl, offs, tmp); |
1936 | 1929 | ||
1937 | return 0; | 1930 | return 0; |
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c index de31514df282..d38527e0a2f2 100644 --- a/drivers/mtd/nand/denali_pci.c +++ b/drivers/mtd/nand/denali_pci.c | |||
@@ -119,3 +119,7 @@ static struct pci_driver denali_pci_driver = { | |||
119 | }; | 119 | }; |
120 | 120 | ||
121 | module_pci_driver(denali_pci_driver); | 121 | module_pci_driver(denali_pci_driver); |
122 | |||
123 | MODULE_DESCRIPTION("PCI driver for Denali NAND controller"); | ||
124 | MODULE_AUTHOR("Intel Corporation and its suppliers"); | ||
125 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index e561fbcb93b3..a21b2e48d35b 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -2062,6 +2062,7 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd, | |||
2062 | static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | 2062 | static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, |
2063 | struct mtd_oob_ops *ops) | 2063 | struct mtd_oob_ops *ops) |
2064 | { | 2064 | { |
2065 | unsigned int max_bitflips = 0; | ||
2065 | int page, realpage, chipnr; | 2066 | int page, realpage, chipnr; |
2066 | struct nand_chip *chip = mtd->priv; | 2067 | struct nand_chip *chip = mtd->priv; |
2067 | struct mtd_ecc_stats stats; | 2068 | struct mtd_ecc_stats stats; |
@@ -2122,6 +2123,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | |||
2122 | nand_wait_ready(mtd); | 2123 | nand_wait_ready(mtd); |
2123 | } | 2124 | } |
2124 | 2125 | ||
2126 | max_bitflips = max_t(unsigned int, max_bitflips, ret); | ||
2127 | |||
2125 | readlen -= len; | 2128 | readlen -= len; |
2126 | if (!readlen) | 2129 | if (!readlen) |
2127 | break; | 2130 | break; |
@@ -2147,7 +2150,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | |||
2147 | if (mtd->ecc_stats.failed - stats.failed) | 2150 | if (mtd->ecc_stats.failed - stats.failed) |
2148 | return -EBADMSG; | 2151 | return -EBADMSG; |
2149 | 2152 | ||
2150 | return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; | 2153 | return max_bitflips; |
2151 | } | 2154 | } |
2152 | 2155 | ||
2153 | /** | 2156 | /** |
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index 824711845c44..3bb9b34d9e77 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c | |||
@@ -1046,8 +1046,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, | |||
1046 | 1046 | ||
1047 | /* Add ECC info retrieval from DT */ | 1047 | /* Add ECC info retrieval from DT */ |
1048 | for (i = 0; i < ARRAY_SIZE(strengths); i++) { | 1048 | for (i = 0; i < ARRAY_SIZE(strengths); i++) { |
1049 | if (ecc->strength <= strengths[i]) | 1049 | if (ecc->strength <= strengths[i]) { |
1050 | /* | ||
1051 | * Update ecc->strength value with the actual strength | ||
1052 | * that will be used by the ECC engine. | ||
1053 | */ | ||
1054 | ecc->strength = strengths[i]; | ||
1050 | break; | 1055 | break; |
1056 | } | ||
1051 | } | 1057 | } |
1052 | 1058 | ||
1053 | if (i >= ARRAY_SIZE(strengths)) { | 1059 | if (i >= ARRAY_SIZE(strengths)) { |
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index ebf46ad2d513..b2fb0528c092 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c | |||
@@ -99,6 +99,8 @@ struct ubiblock { | |||
99 | 99 | ||
100 | /* Linked list of all ubiblock instances */ | 100 | /* Linked list of all ubiblock instances */ |
101 | static LIST_HEAD(ubiblock_devices); | 101 | static LIST_HEAD(ubiblock_devices); |
102 | static DEFINE_IDR(ubiblock_minor_idr); | ||
103 | /* Protects ubiblock_devices and ubiblock_minor_idr */ | ||
102 | static DEFINE_MUTEX(devices_mutex); | 104 | static DEFINE_MUTEX(devices_mutex); |
103 | static int ubiblock_major; | 105 | static int ubiblock_major; |
104 | 106 | ||
@@ -354,8 +356,6 @@ static struct blk_mq_ops ubiblock_mq_ops = { | |||
354 | .map_queue = blk_mq_map_queue, | 356 | .map_queue = blk_mq_map_queue, |
355 | }; | 357 | }; |
356 | 358 | ||
357 | static DEFINE_IDR(ubiblock_minor_idr); | ||
358 | |||
359 | int ubiblock_create(struct ubi_volume_info *vi) | 359 | int ubiblock_create(struct ubi_volume_info *vi) |
360 | { | 360 | { |
361 | struct ubiblock *dev; | 361 | struct ubiblock *dev; |
@@ -368,14 +368,15 @@ int ubiblock_create(struct ubi_volume_info *vi) | |||
368 | /* Check that the volume isn't already handled */ | 368 | /* Check that the volume isn't already handled */ |
369 | mutex_lock(&devices_mutex); | 369 | mutex_lock(&devices_mutex); |
370 | if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { | 370 | if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { |
371 | mutex_unlock(&devices_mutex); | 371 | ret = -EEXIST; |
372 | return -EEXIST; | 372 | goto out_unlock; |
373 | } | 373 | } |
374 | mutex_unlock(&devices_mutex); | ||
375 | 374 | ||
376 | dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL); | 375 | dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL); |
377 | if (!dev) | 376 | if (!dev) { |
378 | return -ENOMEM; | 377 | ret = -ENOMEM; |
378 | goto out_unlock; | ||
379 | } | ||
379 | 380 | ||
380 | mutex_init(&dev->dev_mutex); | 381 | mutex_init(&dev->dev_mutex); |
381 | 382 | ||
@@ -440,14 +441,13 @@ int ubiblock_create(struct ubi_volume_info *vi) | |||
440 | goto out_free_queue; | 441 | goto out_free_queue; |
441 | } | 442 | } |
442 | 443 | ||
443 | mutex_lock(&devices_mutex); | ||
444 | list_add_tail(&dev->list, &ubiblock_devices); | 444 | list_add_tail(&dev->list, &ubiblock_devices); |
445 | mutex_unlock(&devices_mutex); | ||
446 | 445 | ||
447 | /* Must be the last step: anyone can call file ops from now on */ | 446 | /* Must be the last step: anyone can call file ops from now on */ |
448 | add_disk(dev->gd); | 447 | add_disk(dev->gd); |
449 | dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", | 448 | dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", |
450 | dev->ubi_num, dev->vol_id, vi->name); | 449 | dev->ubi_num, dev->vol_id, vi->name); |
450 | mutex_unlock(&devices_mutex); | ||
451 | return 0; | 451 | return 0; |
452 | 452 | ||
453 | out_free_queue: | 453 | out_free_queue: |
@@ -460,6 +460,8 @@ out_put_disk: | |||
460 | put_disk(dev->gd); | 460 | put_disk(dev->gd); |
461 | out_free_dev: | 461 | out_free_dev: |
462 | kfree(dev); | 462 | kfree(dev); |
463 | out_unlock: | ||
464 | mutex_unlock(&devices_mutex); | ||
463 | 465 | ||
464 | return ret; | 466 | return ret; |
465 | } | 467 | } |
@@ -481,30 +483,36 @@ static void ubiblock_cleanup(struct ubiblock *dev) | |||
481 | int ubiblock_remove(struct ubi_volume_info *vi) | 483 | int ubiblock_remove(struct ubi_volume_info *vi) |
482 | { | 484 | { |
483 | struct ubiblock *dev; | 485 | struct ubiblock *dev; |
486 | int ret; | ||
484 | 487 | ||
485 | mutex_lock(&devices_mutex); | 488 | mutex_lock(&devices_mutex); |
486 | dev = find_dev_nolock(vi->ubi_num, vi->vol_id); | 489 | dev = find_dev_nolock(vi->ubi_num, vi->vol_id); |
487 | if (!dev) { | 490 | if (!dev) { |
488 | mutex_unlock(&devices_mutex); | 491 | ret = -ENODEV; |
489 | return -ENODEV; | 492 | goto out_unlock; |
490 | } | 493 | } |
491 | 494 | ||
492 | /* Found a device, let's lock it so we can check if it's busy */ | 495 | /* Found a device, let's lock it so we can check if it's busy */ |
493 | mutex_lock(&dev->dev_mutex); | 496 | mutex_lock(&dev->dev_mutex); |
494 | if (dev->refcnt > 0) { | 497 | if (dev->refcnt > 0) { |
495 | mutex_unlock(&dev->dev_mutex); | 498 | ret = -EBUSY; |
496 | mutex_unlock(&devices_mutex); | 499 | goto out_unlock_dev; |
497 | return -EBUSY; | ||
498 | } | 500 | } |
499 | 501 | ||
500 | /* Remove from device list */ | 502 | /* Remove from device list */ |
501 | list_del(&dev->list); | 503 | list_del(&dev->list); |
502 | mutex_unlock(&devices_mutex); | ||
503 | |||
504 | ubiblock_cleanup(dev); | 504 | ubiblock_cleanup(dev); |
505 | mutex_unlock(&dev->dev_mutex); | 505 | mutex_unlock(&dev->dev_mutex); |
506 | mutex_unlock(&devices_mutex); | ||
507 | |||
506 | kfree(dev); | 508 | kfree(dev); |
507 | return 0; | 509 | return 0; |
510 | |||
511 | out_unlock_dev: | ||
512 | mutex_unlock(&dev->dev_mutex); | ||
513 | out_unlock: | ||
514 | mutex_unlock(&devices_mutex); | ||
515 | return ret; | ||
508 | } | 516 | } |
509 | 517 | ||
510 | static int ubiblock_resize(struct ubi_volume_info *vi) | 518 | static int ubiblock_resize(struct ubi_volume_info *vi) |
@@ -633,6 +641,7 @@ static void ubiblock_remove_all(void) | |||
633 | struct ubiblock *next; | 641 | struct ubiblock *next; |
634 | struct ubiblock *dev; | 642 | struct ubiblock *dev; |
635 | 643 | ||
644 | mutex_lock(&devices_mutex); | ||
636 | list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { | 645 | list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { |
637 | /* The module is being forcefully removed */ | 646 | /* The module is being forcefully removed */ |
638 | WARN_ON(dev->desc); | 647 | WARN_ON(dev->desc); |
@@ -641,6 +650,7 @@ static void ubiblock_remove_all(void) | |||
641 | ubiblock_cleanup(dev); | 650 | ubiblock_cleanup(dev); |
642 | kfree(dev); | 651 | kfree(dev); |
643 | } | 652 | } |
653 | mutex_unlock(&devices_mutex); | ||
644 | } | 654 | } |
645 | 655 | ||
646 | int __init ubiblock_init(void) | 656 | int __init ubiblock_init(void) |
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 27e2352fcc42..b227f81e4a7e 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c | |||
@@ -430,7 +430,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev) | |||
430 | dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", | 430 | dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", |
431 | rc); | 431 | rc); |
432 | 432 | ||
433 | return rc; | 433 | return (rc > 0) ? 0 : rc; |
434 | } | 434 | } |
435 | 435 | ||
436 | static void gs_usb_xmit_callback(struct urb *urb) | 436 | static void gs_usb_xmit_callback(struct urb *urb) |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index ce44a033f63b..64cc86a82b2d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |||
@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) | |||
184 | void *cmd_head = pcan_usb_fd_cmd_buffer(dev); | 184 | void *cmd_head = pcan_usb_fd_cmd_buffer(dev); |
185 | int err = 0; | 185 | int err = 0; |
186 | u8 *packet_ptr; | 186 | u8 *packet_ptr; |
187 | int i, n = 1, packet_len; | 187 | int packet_len; |
188 | ptrdiff_t cmd_len; | 188 | ptrdiff_t cmd_len; |
189 | 189 | ||
190 | /* usb device unregistered? */ | 190 | /* usb device unregistered? */ |
@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) | |||
201 | } | 201 | } |
202 | 202 | ||
203 | packet_ptr = cmd_head; | 203 | packet_ptr = cmd_head; |
204 | packet_len = cmd_len; | ||
204 | 205 | ||
205 | /* firmware is not able to re-assemble 512 bytes buffer in full-speed */ | 206 | /* firmware is not able to re-assemble 512 bytes buffer in full-speed */ |
206 | if ((dev->udev->speed != USB_SPEED_HIGH) && | 207 | if (unlikely(dev->udev->speed != USB_SPEED_HIGH)) |
207 | (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) { | 208 | packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE); |
208 | packet_len = PCAN_UFD_LOSPD_PKT_SIZE; | ||
209 | n += cmd_len / packet_len; | ||
210 | } else { | ||
211 | packet_len = cmd_len; | ||
212 | } | ||
213 | 209 | ||
214 | for (i = 0; i < n; i++) { | 210 | do { |
215 | err = usb_bulk_msg(dev->udev, | 211 | err = usb_bulk_msg(dev->udev, |
216 | usb_sndbulkpipe(dev->udev, | 212 | usb_sndbulkpipe(dev->udev, |
217 | PCAN_USBPRO_EP_CMDOUT), | 213 | PCAN_USBPRO_EP_CMDOUT), |
@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) | |||
224 | } | 220 | } |
225 | 221 | ||
226 | packet_ptr += packet_len; | 222 | packet_ptr += packet_len; |
227 | } | 223 | cmd_len -= packet_len; |
224 | |||
225 | if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE) | ||
226 | packet_len = cmd_len; | ||
227 | |||
228 | } while (packet_len > 0); | ||
228 | 229 | ||
229 | return err; | 230 | return err; |
230 | } | 231 | } |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4744919440e0..a38a9cb3d544 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -2014,6 +2014,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) | |||
2014 | return 0; | 2014 | return 0; |
2015 | } | 2015 | } |
2016 | 2016 | ||
2017 | static void bnxt_init_cp_rings(struct bnxt *bp) | ||
2018 | { | ||
2019 | int i; | ||
2020 | |||
2021 | for (i = 0; i < bp->cp_nr_rings; i++) { | ||
2022 | struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; | ||
2023 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; | ||
2024 | |||
2025 | ring->fw_ring_id = INVALID_HW_RING_ID; | ||
2026 | } | ||
2027 | } | ||
2028 | |||
2017 | static int bnxt_init_rx_rings(struct bnxt *bp) | 2029 | static int bnxt_init_rx_rings(struct bnxt *bp) |
2018 | { | 2030 | { |
2019 | int i, rc = 0; | 2031 | int i, rc = 0; |
@@ -3977,6 +3989,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) | |||
3977 | 3989 | ||
3978 | static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) | 3990 | static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) |
3979 | { | 3991 | { |
3992 | bnxt_init_cp_rings(bp); | ||
3980 | bnxt_init_rx_rings(bp); | 3993 | bnxt_init_rx_rings(bp); |
3981 | bnxt_init_tx_rings(bp); | 3994 | bnxt_init_tx_rings(bp); |
3982 | bnxt_init_ring_grps(bp, irq_re_init); | 3995 | bnxt_init_ring_grps(bp, irq_re_init); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3613469dc5c6..ab53e0cfb4dc 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -14228,7 +14228,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) | |||
14228 | /* Reset PHY, otherwise the read DMA engine will be in a mode that | 14228 | /* Reset PHY, otherwise the read DMA engine will be in a mode that |
14229 | * breaks all requests to 256 bytes. | 14229 | * breaks all requests to 256 bytes. |
14230 | */ | 14230 | */ |
14231 | if (tg3_asic_rev(tp) == ASIC_REV_57766) | 14231 | if (tg3_asic_rev(tp) == ASIC_REV_57766 || |
14232 | tg3_asic_rev(tp) == ASIC_REV_5717 || | ||
14233 | tg3_asic_rev(tp) == ASIC_REV_5719) | ||
14232 | reset_phy = true; | 14234 | reset_phy = true; |
14233 | 14235 | ||
14234 | err = tg3_restart_hw(tp, reset_phy); | 14236 | err = tg3_restart_hw(tp, reset_phy); |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 9e59663a6ead..0f6811860ad5 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c | |||
@@ -1930,13 +1930,13 @@ static void | |||
1930 | bfa_ioc_send_enable(struct bfa_ioc *ioc) | 1930 | bfa_ioc_send_enable(struct bfa_ioc *ioc) |
1931 | { | 1931 | { |
1932 | struct bfi_ioc_ctrl_req enable_req; | 1932 | struct bfi_ioc_ctrl_req enable_req; |
1933 | struct timeval tv; | ||
1934 | 1933 | ||
1935 | bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, | 1934 | bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, |
1936 | bfa_ioc_portid(ioc)); | 1935 | bfa_ioc_portid(ioc)); |
1937 | enable_req.clscode = htons(ioc->clscode); | 1936 | enable_req.clscode = htons(ioc->clscode); |
1938 | do_gettimeofday(&tv); | 1937 | enable_req.rsvd = htons(0); |
1939 | enable_req.tv_sec = ntohl(tv.tv_sec); | 1938 | /* overflow in 2106 */ |
1939 | enable_req.tv_sec = ntohl(ktime_get_real_seconds()); | ||
1940 | bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); | 1940 | bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); |
1941 | } | 1941 | } |
1942 | 1942 | ||
@@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc) | |||
1947 | 1947 | ||
1948 | bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, | 1948 | bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, |
1949 | bfa_ioc_portid(ioc)); | 1949 | bfa_ioc_portid(ioc)); |
1950 | disable_req.clscode = htons(ioc->clscode); | ||
1951 | disable_req.rsvd = htons(0); | ||
1952 | /* overflow in 2106 */ | ||
1953 | disable_req.tv_sec = ntohl(ktime_get_real_seconds()); | ||
1950 | bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); | 1954 | bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); |
1951 | } | 1955 | } |
1952 | 1956 | ||
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 8fc246ea1fb8..a4ad782007ce 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c | |||
@@ -324,7 +324,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf, | |||
324 | return PTR_ERR(kern_buf); | 324 | return PTR_ERR(kern_buf); |
325 | 325 | ||
326 | rc = sscanf(kern_buf, "%x:%x", &addr, &len); | 326 | rc = sscanf(kern_buf, "%x:%x", &addr, &len); |
327 | if (rc < 2) { | 327 | if (rc < 2 || len > UINT_MAX >> 2) { |
328 | netdev_warn(bnad->netdev, "failed to read user buffer\n"); | 328 | netdev_warn(bnad->netdev, "failed to read user buffer\n"); |
329 | kfree(kern_buf); | 329 | kfree(kern_buf); |
330 | return -EINVAL; | 330 | return -EINVAL; |
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 91a5a0ae9cd7..1908a38e7f31 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
@@ -1362,6 +1362,9 @@ out: | |||
1362 | * Checks to see of the link status of the hardware has changed. If a | 1362 | * Checks to see of the link status of the hardware has changed. If a |
1363 | * change in link status has been detected, then we read the PHY registers | 1363 | * change in link status has been detected, then we read the PHY registers |
1364 | * to get the current speed/duplex if link exists. | 1364 | * to get the current speed/duplex if link exists. |
1365 | * | ||
1366 | * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link | ||
1367 | * up). | ||
1365 | **/ | 1368 | **/ |
1366 | static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | 1369 | static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) |
1367 | { | 1370 | { |
@@ -1377,7 +1380,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1377 | * Change or Rx Sequence Error interrupt. | 1380 | * Change or Rx Sequence Error interrupt. |
1378 | */ | 1381 | */ |
1379 | if (!mac->get_link_status) | 1382 | if (!mac->get_link_status) |
1380 | return 0; | 1383 | return 1; |
1381 | 1384 | ||
1382 | /* First we want to see if the MII Status Register reports | 1385 | /* First we want to see if the MII Status Register reports |
1383 | * link. If so, then we want to get the current speed/duplex | 1386 | * link. If so, then we want to get the current speed/duplex |
@@ -1585,10 +1588,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1585 | * different link partner. | 1588 | * different link partner. |
1586 | */ | 1589 | */ |
1587 | ret_val = e1000e_config_fc_after_link_up(hw); | 1590 | ret_val = e1000e_config_fc_after_link_up(hw); |
1588 | if (ret_val) | 1591 | if (ret_val) { |
1589 | e_dbg("Error configuring flow control\n"); | 1592 | e_dbg("Error configuring flow control\n"); |
1593 | return ret_val; | ||
1594 | } | ||
1590 | 1595 | ||
1591 | return ret_val; | 1596 | return 1; |
1592 | } | 1597 | } |
1593 | 1598 | ||
1594 | static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) | 1599 | static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index acfb8b1f88a7..a8f9d0012d82 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c | |||
@@ -126,6 +126,9 @@ process_mbx: | |||
126 | struct fm10k_mbx_info *mbx = &vf_info->mbx; | 126 | struct fm10k_mbx_info *mbx = &vf_info->mbx; |
127 | u16 glort = vf_info->glort; | 127 | u16 glort = vf_info->glort; |
128 | 128 | ||
129 | /* process the SM mailbox first to drain outgoing messages */ | ||
130 | hw->mbx.ops.process(hw, &hw->mbx); | ||
131 | |||
129 | /* verify port mapping is valid, if not reset port */ | 132 | /* verify port mapping is valid, if not reset port */ |
130 | if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) | 133 | if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) |
131 | hw->iov.ops.reset_lport(hw, vf_info); | 134 | hw->iov.ops.reset_lport(hw, vf_info); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b5b228c9a030..06b38f50980c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -4201,8 +4201,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi) | |||
4201 | if (!vsi->netdev) | 4201 | if (!vsi->netdev) |
4202 | return; | 4202 | return; |
4203 | 4203 | ||
4204 | for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) | 4204 | for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { |
4205 | napi_enable(&vsi->q_vectors[q_idx]->napi); | 4205 | struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; |
4206 | |||
4207 | if (q_vector->rx.ring || q_vector->tx.ring) | ||
4208 | napi_enable(&q_vector->napi); | ||
4209 | } | ||
4206 | } | 4210 | } |
4207 | 4211 | ||
4208 | /** | 4212 | /** |
@@ -4216,8 +4220,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) | |||
4216 | if (!vsi->netdev) | 4220 | if (!vsi->netdev) |
4217 | return; | 4221 | return; |
4218 | 4222 | ||
4219 | for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) | 4223 | for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { |
4220 | napi_disable(&vsi->q_vectors[q_idx]->napi); | 4224 | struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; |
4225 | |||
4226 | if (q_vector->rx.ring || q_vector->tx.ring) | ||
4227 | napi_disable(&q_vector->napi); | ||
4228 | } | ||
4221 | } | 4229 | } |
4222 | 4230 | ||
4223 | /** | 4231 | /** |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index c55552c3d2f9..02b23f6277fb 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -3005,6 +3005,8 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
3005 | /* Setup and initialize a copy of the hw vlan table array */ | 3005 | /* Setup and initialize a copy of the hw vlan table array */ |
3006 | adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), | 3006 | adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), |
3007 | GFP_ATOMIC); | 3007 | GFP_ATOMIC); |
3008 | if (!adapter->shadow_vfta) | ||
3009 | return -ENOMEM; | ||
3008 | 3010 | ||
3009 | /* This call may decrease the number of queues */ | 3011 | /* This call may decrease the number of queues */ |
3010 | if (igb_init_interrupt_scheme(adapter, true)) { | 3012 | if (igb_init_interrupt_scheme(adapter, true)) { |
@@ -3172,7 +3174,7 @@ static int __igb_close(struct net_device *netdev, bool suspending) | |||
3172 | 3174 | ||
3173 | static int igb_close(struct net_device *netdev) | 3175 | static int igb_close(struct net_device *netdev) |
3174 | { | 3176 | { |
3175 | if (netif_device_present(netdev)) | 3177 | if (netif_device_present(netdev) || netdev->dismantle) |
3176 | return __igb_close(netdev, false); | 3178 | return __igb_close(netdev, false); |
3177 | return 0; | 3179 | return 0; |
3178 | } | 3180 | } |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index ce61b36b94f1..105dd00ddc1a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | |||
@@ -3620,10 +3620,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, | |||
3620 | fw_cmd.ver_build = build; | 3620 | fw_cmd.ver_build = build; |
3621 | fw_cmd.ver_sub = sub; | 3621 | fw_cmd.ver_sub = sub; |
3622 | fw_cmd.hdr.checksum = 0; | 3622 | fw_cmd.hdr.checksum = 0; |
3623 | fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, | ||
3624 | (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); | ||
3625 | fw_cmd.pad = 0; | 3623 | fw_cmd.pad = 0; |
3626 | fw_cmd.pad2 = 0; | 3624 | fw_cmd.pad2 = 0; |
3625 | fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, | ||
3626 | (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); | ||
3627 | 3627 | ||
3628 | for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { | 3628 | for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { |
3629 | ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, | 3629 | ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 31f864fb30c1..a75f2e3ce86f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | |||
@@ -564,6 +564,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, | |||
564 | /* convert offset from words to bytes */ | 564 | /* convert offset from words to bytes */ |
565 | buffer.address = cpu_to_be32((offset + current_word) * 2); | 565 | buffer.address = cpu_to_be32((offset + current_word) * 2); |
566 | buffer.length = cpu_to_be16(words_to_read * 2); | 566 | buffer.length = cpu_to_be16(words_to_read * 2); |
567 | buffer.pad2 = 0; | ||
568 | buffer.pad3 = 0; | ||
567 | 569 | ||
568 | status = ixgbe_host_interface_command(hw, (u32 *)&buffer, | 570 | status = ixgbe_host_interface_command(hw, (u32 *)&buffer, |
569 | sizeof(buffer), | 571 | sizeof(buffer), |
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index fc2fb25343f4..c122b3b99cd8 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c | |||
@@ -241,7 +241,8 @@ static int orion_mdio_probe(struct platform_device *pdev) | |||
241 | dev->regs + MVMDIO_ERR_INT_MASK); | 241 | dev->regs + MVMDIO_ERR_INT_MASK); |
242 | 242 | ||
243 | } else if (dev->err_interrupt == -EPROBE_DEFER) { | 243 | } else if (dev->err_interrupt == -EPROBE_DEFER) { |
244 | return -EPROBE_DEFER; | 244 | ret = -EPROBE_DEFER; |
245 | goto out_mdio; | ||
245 | } | 246 | } |
246 | 247 | ||
247 | mutex_init(&dev->lock); | 248 | mutex_init(&dev->lock); |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 15056f06754a..7430dd44019e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -914,6 +914,10 @@ static void mvneta_port_disable(struct mvneta_port *pp) | |||
914 | val &= ~MVNETA_GMAC0_PORT_ENABLE; | 914 | val &= ~MVNETA_GMAC0_PORT_ENABLE; |
915 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); | 915 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); |
916 | 916 | ||
917 | pp->link = 0; | ||
918 | pp->duplex = -1; | ||
919 | pp->speed = 0; | ||
920 | |||
917 | udelay(200); | 921 | udelay(200); |
918 | } | 922 | } |
919 | 923 | ||
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 4f34e1b79705..ac92685dd4e5 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -5666,6 +5666,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev) | |||
5666 | int id = port->id; | 5666 | int id = port->id; |
5667 | bool allmulti = dev->flags & IFF_ALLMULTI; | 5667 | bool allmulti = dev->flags & IFF_ALLMULTI; |
5668 | 5668 | ||
5669 | retry: | ||
5669 | mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); | 5670 | mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); |
5670 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); | 5671 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); |
5671 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); | 5672 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); |
@@ -5673,9 +5674,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev) | |||
5673 | /* Remove all port->id's mcast enries */ | 5674 | /* Remove all port->id's mcast enries */ |
5674 | mvpp2_prs_mcast_del_all(priv, id); | 5675 | mvpp2_prs_mcast_del_all(priv, id); |
5675 | 5676 | ||
5676 | if (allmulti && !netdev_mc_empty(dev)) { | 5677 | if (!allmulti) { |
5677 | netdev_for_each_mc_addr(ha, dev) | 5678 | netdev_for_each_mc_addr(ha, dev) { |
5678 | mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); | 5679 | if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) { |
5680 | allmulti = true; | ||
5681 | goto retry; | ||
5682 | } | ||
5683 | } | ||
5679 | } | 5684 | } |
5680 | } | 5685 | } |
5681 | 5686 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 168823dde79f..d8359ffba026 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
@@ -280,6 +280,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) | |||
280 | u64 in_param = 0; | 280 | u64 in_param = 0; |
281 | int err; | 281 | int err; |
282 | 282 | ||
283 | if (!cnt) | ||
284 | return; | ||
285 | |||
283 | if (mlx4_is_mfunc(dev)) { | 286 | if (mlx4_is_mfunc(dev)) { |
284 | set_param_l(&in_param, base_qpn); | 287 | set_param_l(&in_param, base_qpn); |
285 | set_param_h(&in_param, cnt); | 288 | set_param_h(&in_param, cnt); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index f9640d5ce6ba..b4f3cb55605e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -3850,7 +3850,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) | |||
3850 | struct list_head *head = &mbx->cmd_q; | 3850 | struct list_head *head = &mbx->cmd_q; |
3851 | struct qlcnic_cmd_args *cmd = NULL; | 3851 | struct qlcnic_cmd_args *cmd = NULL; |
3852 | 3852 | ||
3853 | spin_lock(&mbx->queue_lock); | 3853 | spin_lock_bh(&mbx->queue_lock); |
3854 | 3854 | ||
3855 | while (!list_empty(head)) { | 3855 | while (!list_empty(head)) { |
3856 | cmd = list_entry(head->next, struct qlcnic_cmd_args, list); | 3856 | cmd = list_entry(head->next, struct qlcnic_cmd_args, list); |
@@ -3861,7 +3861,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) | |||
3861 | qlcnic_83xx_notify_cmd_completion(adapter, cmd); | 3861 | qlcnic_83xx_notify_cmd_completion(adapter, cmd); |
3862 | } | 3862 | } |
3863 | 3863 | ||
3864 | spin_unlock(&mbx->queue_lock); | 3864 | spin_unlock_bh(&mbx->queue_lock); |
3865 | } | 3865 | } |
3866 | 3866 | ||
3867 | static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) | 3867 | static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) |
@@ -3897,12 +3897,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, | |||
3897 | { | 3897 | { |
3898 | struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; | 3898 | struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; |
3899 | 3899 | ||
3900 | spin_lock(&mbx->queue_lock); | 3900 | spin_lock_bh(&mbx->queue_lock); |
3901 | 3901 | ||
3902 | list_del(&cmd->list); | 3902 | list_del(&cmd->list); |
3903 | mbx->num_cmds--; | 3903 | mbx->num_cmds--; |
3904 | 3904 | ||
3905 | spin_unlock(&mbx->queue_lock); | 3905 | spin_unlock_bh(&mbx->queue_lock); |
3906 | 3906 | ||
3907 | qlcnic_83xx_notify_cmd_completion(adapter, cmd); | 3907 | qlcnic_83xx_notify_cmd_completion(adapter, cmd); |
3908 | } | 3908 | } |
@@ -3967,7 +3967,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, | |||
3967 | init_completion(&cmd->completion); | 3967 | init_completion(&cmd->completion); |
3968 | cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; | 3968 | cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; |
3969 | 3969 | ||
3970 | spin_lock(&mbx->queue_lock); | 3970 | spin_lock_bh(&mbx->queue_lock); |
3971 | 3971 | ||
3972 | list_add_tail(&cmd->list, &mbx->cmd_q); | 3972 | list_add_tail(&cmd->list, &mbx->cmd_q); |
3973 | mbx->num_cmds++; | 3973 | mbx->num_cmds++; |
@@ -3975,7 +3975,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, | |||
3975 | *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; | 3975 | *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; |
3976 | queue_work(mbx->work_q, &mbx->work); | 3976 | queue_work(mbx->work_q, &mbx->work); |
3977 | 3977 | ||
3978 | spin_unlock(&mbx->queue_lock); | 3978 | spin_unlock_bh(&mbx->queue_lock); |
3979 | 3979 | ||
3980 | return 0; | 3980 | return 0; |
3981 | } | 3981 | } |
@@ -4071,15 +4071,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) | |||
4071 | mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; | 4071 | mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; |
4072 | spin_unlock_irqrestore(&mbx->aen_lock, flags); | 4072 | spin_unlock_irqrestore(&mbx->aen_lock, flags); |
4073 | 4073 | ||
4074 | spin_lock(&mbx->queue_lock); | 4074 | spin_lock_bh(&mbx->queue_lock); |
4075 | 4075 | ||
4076 | if (list_empty(head)) { | 4076 | if (list_empty(head)) { |
4077 | spin_unlock(&mbx->queue_lock); | 4077 | spin_unlock_bh(&mbx->queue_lock); |
4078 | return; | 4078 | return; |
4079 | } | 4079 | } |
4080 | cmd = list_entry(head->next, struct qlcnic_cmd_args, list); | 4080 | cmd = list_entry(head->next, struct qlcnic_cmd_args, list); |
4081 | 4081 | ||
4082 | spin_unlock(&mbx->queue_lock); | 4082 | spin_unlock_bh(&mbx->queue_lock); |
4083 | 4083 | ||
4084 | mbx_ops->encode_cmd(adapter, cmd); | 4084 | mbx_ops->encode_cmd(adapter, cmd); |
4085 | mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); | 4085 | mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index c5ea1018cb47..3783c40f568b 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -1387,7 +1387,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond) | |||
1387 | { | 1387 | { |
1388 | void __iomem *ioaddr = tp->mmio_addr; | 1388 | void __iomem *ioaddr = tp->mmio_addr; |
1389 | 1389 | ||
1390 | return RTL_R8(IBISR0) & 0x02; | 1390 | return RTL_R8(IBISR0) & 0x20; |
1391 | } | 1391 | } |
1392 | 1392 | ||
1393 | static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) | 1393 | static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) |
@@ -1395,7 +1395,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) | |||
1395 | void __iomem *ioaddr = tp->mmio_addr; | 1395 | void __iomem *ioaddr = tp->mmio_addr; |
1396 | 1396 | ||
1397 | RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); | 1397 | RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); |
1398 | rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); | 1398 | rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000); |
1399 | RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); | 1399 | RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); |
1400 | RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); | 1400 | RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); |
1401 | } | 1401 | } |
@@ -2205,19 +2205,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) | |||
2205 | void __iomem *ioaddr = tp->mmio_addr; | 2205 | void __iomem *ioaddr = tp->mmio_addr; |
2206 | dma_addr_t paddr = tp->counters_phys_addr; | 2206 | dma_addr_t paddr = tp->counters_phys_addr; |
2207 | u32 cmd; | 2207 | u32 cmd; |
2208 | bool ret; | ||
2209 | 2208 | ||
2210 | RTL_W32(CounterAddrHigh, (u64)paddr >> 32); | 2209 | RTL_W32(CounterAddrHigh, (u64)paddr >> 32); |
2210 | RTL_R32(CounterAddrHigh); | ||
2211 | cmd = (u64)paddr & DMA_BIT_MASK(32); | 2211 | cmd = (u64)paddr & DMA_BIT_MASK(32); |
2212 | RTL_W32(CounterAddrLow, cmd); | 2212 | RTL_W32(CounterAddrLow, cmd); |
2213 | RTL_W32(CounterAddrLow, cmd | counter_cmd); | 2213 | RTL_W32(CounterAddrLow, cmd | counter_cmd); |
2214 | 2214 | ||
2215 | ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); | 2215 | return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); |
2216 | |||
2217 | RTL_W32(CounterAddrLow, 0); | ||
2218 | RTL_W32(CounterAddrHigh, 0); | ||
2219 | |||
2220 | return ret; | ||
2221 | } | 2216 | } |
2222 | 2217 | ||
2223 | static bool rtl8169_reset_counters(struct net_device *dev) | 2218 | static bool rtl8169_reset_counters(struct net_device *dev) |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 479af106aaeb..424d1dee55c9 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -3176,18 +3176,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
3176 | /* ioremap the TSU registers */ | 3176 | /* ioremap the TSU registers */ |
3177 | if (mdp->cd->tsu) { | 3177 | if (mdp->cd->tsu) { |
3178 | struct resource *rtsu; | 3178 | struct resource *rtsu; |
3179 | |||
3179 | rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 3180 | rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
3180 | mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); | 3181 | if (!rtsu) { |
3181 | if (IS_ERR(mdp->tsu_addr)) { | 3182 | dev_err(&pdev->dev, "no TSU resource\n"); |
3182 | ret = PTR_ERR(mdp->tsu_addr); | 3183 | ret = -ENODEV; |
3184 | goto out_release; | ||
3185 | } | ||
3186 | /* We can only request the TSU region for the first port | ||
3187 | * of the two sharing this TSU for the probe to succeed... | ||
3188 | */ | ||
3189 | if (devno % 2 == 0 && | ||
3190 | !devm_request_mem_region(&pdev->dev, rtsu->start, | ||
3191 | resource_size(rtsu), | ||
3192 | dev_name(&pdev->dev))) { | ||
3193 | dev_err(&pdev->dev, "can't request TSU resource.\n"); | ||
3194 | ret = -EBUSY; | ||
3195 | goto out_release; | ||
3196 | } | ||
3197 | mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, | ||
3198 | resource_size(rtsu)); | ||
3199 | if (!mdp->tsu_addr) { | ||
3200 | dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); | ||
3201 | ret = -ENOMEM; | ||
3183 | goto out_release; | 3202 | goto out_release; |
3184 | } | 3203 | } |
3185 | mdp->port = devno % 2; | 3204 | mdp->port = devno % 2; |
3186 | ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; | 3205 | ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; |
3187 | } | 3206 | } |
3188 | 3207 | ||
3189 | /* initialize first or needed device */ | 3208 | /* Need to init only the first port of the two sharing a TSU */ |
3190 | if (!devno || pd->needs_init) { | 3209 | if (devno % 2 == 0) { |
3191 | if (mdp->cd->chip_reset) | 3210 | if (mdp->cd->chip_reset) |
3192 | mdp->cd->chip_reset(ndev); | 3211 | mdp->cd->chip_reset(ndev); |
3193 | 3212 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 4b100ef4af9f..5adaf537513b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -272,8 +272,14 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
272 | { | 272 | { |
273 | char *phy_bus_name = priv->plat->phy_bus_name; | 273 | char *phy_bus_name = priv->plat->phy_bus_name; |
274 | unsigned long flags; | 274 | unsigned long flags; |
275 | int interface = priv->plat->interface; | ||
275 | bool ret = false; | 276 | bool ret = false; |
276 | 277 | ||
278 | if ((interface != PHY_INTERFACE_MODE_MII) && | ||
279 | (interface != PHY_INTERFACE_MODE_GMII) && | ||
280 | !phy_interface_mode_is_rgmii(interface)) | ||
281 | goto out; | ||
282 | |||
277 | /* Using PCS we cannot dial with the phy registers at this stage | 283 | /* Using PCS we cannot dial with the phy registers at this stage |
278 | * so we do not support extra feature like EEE. | 284 | * so we do not support extra feature like EEE. |
279 | */ | 285 | */ |
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 4f5c024c6192..5d5c0c433f3e 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig | |||
@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC | |||
34 | config XILINX_LL_TEMAC | 34 | config XILINX_LL_TEMAC |
35 | tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" | 35 | tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" |
36 | depends on (PPC || MICROBLAZE) | 36 | depends on (PPC || MICROBLAZE) |
37 | depends on !64BIT || BROKEN | ||
37 | select PHYLIB | 38 | select PHYLIB |
38 | ---help--- | 39 | ---help--- |
39 | This driver supports the Xilinx 10/100/1000 LocalLink TEMAC | 40 | This driver supports the Xilinx 10/100/1000 LocalLink TEMAC |
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index a0849f49bbec..c0192f97ecc8 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c | |||
@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr | |||
418 | memset(rd, 0, sizeof(*rd)); | 418 | memset(rd, 0, sizeof(*rd)); |
419 | rd->hw = hwmap + i; | 419 | rd->hw = hwmap + i; |
420 | rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); | 420 | rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); |
421 | if (rd->buf == NULL || | 421 | if (rd->buf) |
422 | !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { | 422 | busaddr = pci_map_single(pdev, rd->buf, len, dir); |
423 | if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) { | ||
423 | if (rd->buf) { | 424 | if (rd->buf) { |
424 | net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", | 425 | net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", |
425 | __func__, rd->buf); | 426 | __func__, rd->buf); |
@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr | |||
430 | rd = r->rd + j; | 431 | rd = r->rd + j; |
431 | busaddr = rd_get_addr(rd); | 432 | busaddr = rd_get_addr(rd); |
432 | rd_set_addr_status(rd, 0, 0); | 433 | rd_set_addr_status(rd, 0, 0); |
433 | if (busaddr) | 434 | pci_unmap_single(pdev, busaddr, len, dir); |
434 | pci_unmap_single(pdev, busaddr, len, dir); | ||
435 | kfree(rd->buf); | 435 | kfree(rd->buf); |
436 | rd->buf = NULL; | 436 | rd->buf = NULL; |
437 | } | 437 | } |
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 2d020a3ec0b5..37333d38b576 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c | |||
@@ -105,7 +105,7 @@ static int at803x_set_wol(struct phy_device *phydev, | |||
105 | mac = (const u8 *) ndev->dev_addr; | 105 | mac = (const u8 *) ndev->dev_addr; |
106 | 106 | ||
107 | if (!is_valid_ether_addr(mac)) | 107 | if (!is_valid_ether_addr(mac)) |
108 | return -EFAULT; | 108 | return -EINVAL; |
109 | 109 | ||
110 | for (i = 0; i < 3; i++) { | 110 | for (i = 0; i < 3; i++) { |
111 | phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, | 111 | phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index c8b85f1069ff..920391165f18 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -541,6 +541,7 @@ static int ksz9031_read_status(struct phy_device *phydev) | |||
541 | phydev->link = 0; | 541 | phydev->link = 0; |
542 | if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) | 542 | if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) |
543 | phydev->drv->config_intr(phydev); | 543 | phydev->drv->config_intr(phydev); |
544 | return genphy_config_aneg(phydev); | ||
544 | } | 545 | } |
545 | 546 | ||
546 | return 0; | 547 | return 0; |
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 4e0068e775f9..b7b859c3a0c7 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c | |||
@@ -860,6 +860,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, | |||
860 | struct pppoe_hdr *ph; | 860 | struct pppoe_hdr *ph; |
861 | struct net_device *dev; | 861 | struct net_device *dev; |
862 | char *start; | 862 | char *start; |
863 | int hlen; | ||
863 | 864 | ||
864 | lock_sock(sk); | 865 | lock_sock(sk); |
865 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { | 866 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { |
@@ -878,16 +879,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, | |||
878 | if (total_len > (dev->mtu + dev->hard_header_len)) | 879 | if (total_len > (dev->mtu + dev->hard_header_len)) |
879 | goto end; | 880 | goto end; |
880 | 881 | ||
881 | 882 | hlen = LL_RESERVED_SPACE(dev); | |
882 | skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, | 883 | skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len + |
883 | 0, GFP_KERNEL); | 884 | dev->needed_tailroom, 0, GFP_KERNEL); |
884 | if (!skb) { | 885 | if (!skb) { |
885 | error = -ENOMEM; | 886 | error = -ENOMEM; |
886 | goto end; | 887 | goto end; |
887 | } | 888 | } |
888 | 889 | ||
889 | /* Reserve space for headers. */ | 890 | /* Reserve space for headers. */ |
890 | skb_reserve(skb, dev->hard_header_len); | 891 | skb_reserve(skb, hlen); |
891 | skb_reset_network_header(skb); | 892 | skb_reset_network_header(skb); |
892 | 893 | ||
893 | skb->dev = dev; | 894 | skb->dev = dev; |
@@ -948,7 +949,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) | |||
948 | /* Copy the data if there is no space for the header or if it's | 949 | /* Copy the data if there is no space for the header or if it's |
949 | * read-only. | 950 | * read-only. |
950 | */ | 951 | */ |
951 | if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) | 952 | if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph))) |
952 | goto abort; | 953 | goto abort; |
953 | 954 | ||
954 | __skb_push(skb, sizeof(*ph)); | 955 | __skb_push(skb, sizeof(*ph)); |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 1228d0da4075..72cb30828a12 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -825,6 +825,9 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ | |||
825 | goto error2; | 825 | goto error2; |
826 | } | 826 | } |
827 | 827 | ||
828 | /* Device-specific flags */ | ||
829 | ctx->drvflags = drvflags; | ||
830 | |||
828 | /* | 831 | /* |
829 | * Some Huawei devices have been observed to come out of reset in NDP32 mode. | 832 | * Some Huawei devices have been observed to come out of reset in NDP32 mode. |
830 | * Let's check if this is the case, and set the device to NDP16 mode again if | 833 | * Let's check if this is the case, and set the device to NDP16 mode again if |
@@ -873,9 +876,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ | |||
873 | /* finish setting up the device specific data */ | 876 | /* finish setting up the device specific data */ |
874 | cdc_ncm_setup(dev); | 877 | cdc_ncm_setup(dev); |
875 | 878 | ||
876 | /* Device-specific flags */ | ||
877 | ctx->drvflags = drvflags; | ||
878 | |||
879 | /* Allocate the delayed NDP if needed. */ | 879 | /* Allocate the delayed NDP if needed. */ |
880 | if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { | 880 | if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { |
881 | ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL); | 881 | ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL); |
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index e221bfcee76b..947bea81d924 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c | |||
@@ -293,12 +293,9 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
293 | { | 293 | { |
294 | int len = skb->len; | 294 | int len = skb->len; |
295 | 295 | ||
296 | if (skb_headroom(skb) < 2) { | 296 | if (skb_cow_head(skb, 2)) { |
297 | struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags); | ||
298 | dev_kfree_skb_any(skb); | 297 | dev_kfree_skb_any(skb); |
299 | skb = skb2; | 298 | return NULL; |
300 | if (!skb) | ||
301 | return NULL; | ||
302 | } | 299 | } |
303 | skb_push(skb, 2); | 300 | skb_push(skb, 2); |
304 | 301 | ||
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 226668ead0d8..ebdee8f01f65 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c | |||
@@ -1859,6 +1859,7 @@ static int lan78xx_reset(struct lan78xx_net *dev) | |||
1859 | buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; | 1859 | buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; |
1860 | dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; | 1860 | dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; |
1861 | dev->rx_qlen = 4; | 1861 | dev->rx_qlen = 4; |
1862 | dev->tx_qlen = 4; | ||
1862 | } | 1863 | } |
1863 | 1864 | ||
1864 | ret = lan78xx_write_reg(dev, BURST_CAP, buf); | 1865 | ret = lan78xx_write_reg(dev, BURST_CAP, buf); |
@@ -2050,14 +2051,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, | |||
2050 | { | 2051 | { |
2051 | u32 tx_cmd_a, tx_cmd_b; | 2052 | u32 tx_cmd_a, tx_cmd_b; |
2052 | 2053 | ||
2053 | if (skb_headroom(skb) < TX_OVERHEAD) { | 2054 | if (skb_cow_head(skb, TX_OVERHEAD)) { |
2054 | struct sk_buff *skb2; | ||
2055 | |||
2056 | skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags); | ||
2057 | dev_kfree_skb_any(skb); | 2055 | dev_kfree_skb_any(skb); |
2058 | skb = skb2; | 2056 | return NULL; |
2059 | if (!skb) | ||
2060 | return NULL; | ||
2061 | } | 2057 | } |
2062 | 2058 | ||
2063 | if (lan78xx_linearize(skb) < 0) | 2059 | if (lan78xx_linearize(skb) < 0) |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index e325ca3ad565..b0ea8dee5f06 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -410,6 +410,10 @@ static const struct usb_device_id products[] = { | |||
410 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), | 410 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), |
411 | .driver_info = (unsigned long)&qmi_wwan_info, | 411 | .driver_info = (unsigned long)&qmi_wwan_info, |
412 | }, | 412 | }, |
413 | { /* Motorola Mapphone devices with MDM6600 */ | ||
414 | USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff), | ||
415 | .driver_info = (unsigned long)&qmi_wwan_info, | ||
416 | }, | ||
413 | 417 | ||
414 | /* 2. Combined interface devices matching on class+protocol */ | 418 | /* 2. Combined interface devices matching on class+protocol */ |
415 | { /* Huawei E367 and possibly others in "Windows mode" */ | 419 | { /* Huawei E367 and possibly others in "Windows mode" */ |
@@ -733,6 +737,7 @@ static const struct usb_device_id products[] = { | |||
733 | {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ | 737 | {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ |
734 | {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ | 738 | {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ |
735 | {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ | 739 | {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ |
740 | {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ | ||
736 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ | 741 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ |
737 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ | 742 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ |
738 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ | 743 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 1c27e6fb99f9..89950f5cea71 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -25,12 +25,13 @@ | |||
25 | #include <uapi/linux/mdio.h> | 25 | #include <uapi/linux/mdio.h> |
26 | #include <linux/mdio.h> | 26 | #include <linux/mdio.h> |
27 | #include <linux/usb/cdc.h> | 27 | #include <linux/usb/cdc.h> |
28 | #include <linux/suspend.h> | ||
28 | 29 | ||
29 | /* Information for net-next */ | 30 | /* Information for net-next */ |
30 | #define NETNEXT_VERSION "08" | 31 | #define NETNEXT_VERSION "08" |
31 | 32 | ||
32 | /* Information for net */ | 33 | /* Information for net */ |
33 | #define NET_VERSION "2" | 34 | #define NET_VERSION "3" |
34 | 35 | ||
35 | #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION | 36 | #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION |
36 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" | 37 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" |
@@ -604,6 +605,9 @@ struct r8152 { | |||
604 | struct delayed_work schedule; | 605 | struct delayed_work schedule; |
605 | struct mii_if_info mii; | 606 | struct mii_if_info mii; |
606 | struct mutex control; /* use for hw setting */ | 607 | struct mutex control; /* use for hw setting */ |
608 | #ifdef CONFIG_PM_SLEEP | ||
609 | struct notifier_block pm_notifier; | ||
610 | #endif | ||
607 | 611 | ||
608 | struct rtl_ops { | 612 | struct rtl_ops { |
609 | void (*init)(struct r8152 *); | 613 | void (*init)(struct r8152 *); |
@@ -1207,6 +1211,7 @@ static void intr_callback(struct urb *urb) | |||
1207 | } | 1211 | } |
1208 | } else { | 1212 | } else { |
1209 | if (netif_carrier_ok(tp->netdev)) { | 1213 | if (netif_carrier_ok(tp->netdev)) { |
1214 | netif_stop_queue(tp->netdev); | ||
1210 | set_bit(RTL8152_LINK_CHG, &tp->flags); | 1215 | set_bit(RTL8152_LINK_CHG, &tp->flags); |
1211 | schedule_delayed_work(&tp->schedule, 0); | 1216 | schedule_delayed_work(&tp->schedule, 0); |
1212 | } | 1217 | } |
@@ -1277,6 +1282,7 @@ static int alloc_all_mem(struct r8152 *tp) | |||
1277 | spin_lock_init(&tp->rx_lock); | 1282 | spin_lock_init(&tp->rx_lock); |
1278 | spin_lock_init(&tp->tx_lock); | 1283 | spin_lock_init(&tp->tx_lock); |
1279 | INIT_LIST_HEAD(&tp->tx_free); | 1284 | INIT_LIST_HEAD(&tp->tx_free); |
1285 | INIT_LIST_HEAD(&tp->rx_done); | ||
1280 | skb_queue_head_init(&tp->tx_queue); | 1286 | skb_queue_head_init(&tp->tx_queue); |
1281 | skb_queue_head_init(&tp->rx_queue); | 1287 | skb_queue_head_init(&tp->rx_queue); |
1282 | 1288 | ||
@@ -1941,7 +1947,6 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) | |||
1941 | __le32 tmp[2]; | 1947 | __le32 tmp[2]; |
1942 | u32 ocp_data; | 1948 | u32 ocp_data; |
1943 | 1949 | ||
1944 | clear_bit(RTL8152_SET_RX_MODE, &tp->flags); | ||
1945 | netif_stop_queue(netdev); | 1950 | netif_stop_queue(netdev); |
1946 | ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); | 1951 | ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); |
1947 | ocp_data &= ~RCR_ACPT_ALL; | 1952 | ocp_data &= ~RCR_ACPT_ALL; |
@@ -2427,8 +2432,6 @@ static void rtl_phy_reset(struct r8152 *tp) | |||
2427 | u16 data; | 2432 | u16 data; |
2428 | int i; | 2433 | int i; |
2429 | 2434 | ||
2430 | clear_bit(PHY_RESET, &tp->flags); | ||
2431 | |||
2432 | data = r8152_mdio_read(tp, MII_BMCR); | 2435 | data = r8152_mdio_read(tp, MII_BMCR); |
2433 | 2436 | ||
2434 | /* don't reset again before the previous one complete */ | 2437 | /* don't reset again before the previous one complete */ |
@@ -2458,23 +2461,23 @@ static void r8153_teredo_off(struct r8152 *tp) | |||
2458 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0); | 2461 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0); |
2459 | } | 2462 | } |
2460 | 2463 | ||
2461 | static void r8152b_disable_aldps(struct r8152 *tp) | 2464 | static void r8152_aldps_en(struct r8152 *tp, bool enable) |
2462 | { | ||
2463 | ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE); | ||
2464 | msleep(20); | ||
2465 | } | ||
2466 | |||
2467 | static inline void r8152b_enable_aldps(struct r8152 *tp) | ||
2468 | { | 2465 | { |
2469 | ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS | | 2466 | if (enable) { |
2470 | LINKENA | DIS_SDSAVE); | 2467 | ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS | |
2468 | LINKENA | DIS_SDSAVE); | ||
2469 | } else { | ||
2470 | ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | | ||
2471 | DIS_SDSAVE); | ||
2472 | msleep(20); | ||
2473 | } | ||
2471 | } | 2474 | } |
2472 | 2475 | ||
2473 | static void rtl8152_disable(struct r8152 *tp) | 2476 | static void rtl8152_disable(struct r8152 *tp) |
2474 | { | 2477 | { |
2475 | r8152b_disable_aldps(tp); | 2478 | r8152_aldps_en(tp, false); |
2476 | rtl_disable(tp); | 2479 | rtl_disable(tp); |
2477 | r8152b_enable_aldps(tp); | 2480 | r8152_aldps_en(tp, true); |
2478 | } | 2481 | } |
2479 | 2482 | ||
2480 | static void r8152b_hw_phy_cfg(struct r8152 *tp) | 2483 | static void r8152b_hw_phy_cfg(struct r8152 *tp) |
@@ -2786,30 +2789,26 @@ static void r8153_enter_oob(struct r8152 *tp) | |||
2786 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); | 2789 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); |
2787 | } | 2790 | } |
2788 | 2791 | ||
2789 | static void r8153_disable_aldps(struct r8152 *tp) | 2792 | static void r8153_aldps_en(struct r8152 *tp, bool enable) |
2790 | { | 2793 | { |
2791 | u16 data; | 2794 | u16 data; |
2792 | 2795 | ||
2793 | data = ocp_reg_read(tp, OCP_POWER_CFG); | 2796 | data = ocp_reg_read(tp, OCP_POWER_CFG); |
2794 | data &= ~EN_ALDPS; | 2797 | if (enable) { |
2795 | ocp_reg_write(tp, OCP_POWER_CFG, data); | 2798 | data |= EN_ALDPS; |
2796 | msleep(20); | 2799 | ocp_reg_write(tp, OCP_POWER_CFG, data); |
2797 | } | 2800 | } else { |
2798 | 2801 | data &= ~EN_ALDPS; | |
2799 | static void r8153_enable_aldps(struct r8152 *tp) | 2802 | ocp_reg_write(tp, OCP_POWER_CFG, data); |
2800 | { | 2803 | msleep(20); |
2801 | u16 data; | 2804 | } |
2802 | |||
2803 | data = ocp_reg_read(tp, OCP_POWER_CFG); | ||
2804 | data |= EN_ALDPS; | ||
2805 | ocp_reg_write(tp, OCP_POWER_CFG, data); | ||
2806 | } | 2805 | } |
2807 | 2806 | ||
2808 | static void rtl8153_disable(struct r8152 *tp) | 2807 | static void rtl8153_disable(struct r8152 *tp) |
2809 | { | 2808 | { |
2810 | r8153_disable_aldps(tp); | 2809 | r8153_aldps_en(tp, false); |
2811 | rtl_disable(tp); | 2810 | rtl_disable(tp); |
2812 | r8153_enable_aldps(tp); | 2811 | r8153_aldps_en(tp, true); |
2813 | usb_enable_lpm(tp->udev); | 2812 | usb_enable_lpm(tp->udev); |
2814 | } | 2813 | } |
2815 | 2814 | ||
@@ -2887,10 +2886,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) | |||
2887 | r8152_mdio_write(tp, MII_ADVERTISE, anar); | 2886 | r8152_mdio_write(tp, MII_ADVERTISE, anar); |
2888 | r8152_mdio_write(tp, MII_BMCR, bmcr); | 2887 | r8152_mdio_write(tp, MII_BMCR, bmcr); |
2889 | 2888 | ||
2890 | if (test_bit(PHY_RESET, &tp->flags)) { | 2889 | if (test_and_clear_bit(PHY_RESET, &tp->flags)) { |
2891 | int i; | 2890 | int i; |
2892 | 2891 | ||
2893 | clear_bit(PHY_RESET, &tp->flags); | ||
2894 | for (i = 0; i < 50; i++) { | 2892 | for (i = 0; i < 50; i++) { |
2895 | msleep(20); | 2893 | msleep(20); |
2896 | if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0) | 2894 | if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0) |
@@ -2899,7 +2897,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) | |||
2899 | } | 2897 | } |
2900 | 2898 | ||
2901 | out: | 2899 | out: |
2902 | |||
2903 | return ret; | 2900 | return ret; |
2904 | } | 2901 | } |
2905 | 2902 | ||
@@ -2908,9 +2905,9 @@ static void rtl8152_up(struct r8152 *tp) | |||
2908 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 2905 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
2909 | return; | 2906 | return; |
2910 | 2907 | ||
2911 | r8152b_disable_aldps(tp); | 2908 | r8152_aldps_en(tp, false); |
2912 | r8152b_exit_oob(tp); | 2909 | r8152b_exit_oob(tp); |
2913 | r8152b_enable_aldps(tp); | 2910 | r8152_aldps_en(tp, true); |
2914 | } | 2911 | } |
2915 | 2912 | ||
2916 | static void rtl8152_down(struct r8152 *tp) | 2913 | static void rtl8152_down(struct r8152 *tp) |
@@ -2921,9 +2918,9 @@ static void rtl8152_down(struct r8152 *tp) | |||
2921 | } | 2918 | } |
2922 | 2919 | ||
2923 | r8152_power_cut_en(tp, false); | 2920 | r8152_power_cut_en(tp, false); |
2924 | r8152b_disable_aldps(tp); | 2921 | r8152_aldps_en(tp, false); |
2925 | r8152b_enter_oob(tp); | 2922 | r8152b_enter_oob(tp); |
2926 | r8152b_enable_aldps(tp); | 2923 | r8152_aldps_en(tp, true); |
2927 | } | 2924 | } |
2928 | 2925 | ||
2929 | static void rtl8153_up(struct r8152 *tp) | 2926 | static void rtl8153_up(struct r8152 *tp) |
@@ -2932,9 +2929,9 @@ static void rtl8153_up(struct r8152 *tp) | |||
2932 | return; | 2929 | return; |
2933 | 2930 | ||
2934 | r8153_u1u2en(tp, false); | 2931 | r8153_u1u2en(tp, false); |
2935 | r8153_disable_aldps(tp); | 2932 | r8153_aldps_en(tp, false); |
2936 | r8153_first_init(tp); | 2933 | r8153_first_init(tp); |
2937 | r8153_enable_aldps(tp); | 2934 | r8153_aldps_en(tp, true); |
2938 | r8153_u2p3en(tp, true); | 2935 | r8153_u2p3en(tp, true); |
2939 | r8153_u1u2en(tp, true); | 2936 | r8153_u1u2en(tp, true); |
2940 | usb_enable_lpm(tp->udev); | 2937 | usb_enable_lpm(tp->udev); |
@@ -2950,9 +2947,9 @@ static void rtl8153_down(struct r8152 *tp) | |||
2950 | r8153_u1u2en(tp, false); | 2947 | r8153_u1u2en(tp, false); |
2951 | r8153_u2p3en(tp, false); | 2948 | r8153_u2p3en(tp, false); |
2952 | r8153_power_cut_en(tp, false); | 2949 | r8153_power_cut_en(tp, false); |
2953 | r8153_disable_aldps(tp); | 2950 | r8153_aldps_en(tp, false); |
2954 | r8153_enter_oob(tp); | 2951 | r8153_enter_oob(tp); |
2955 | r8153_enable_aldps(tp); | 2952 | r8153_aldps_en(tp, true); |
2956 | } | 2953 | } |
2957 | 2954 | ||
2958 | static bool rtl8152_in_nway(struct r8152 *tp) | 2955 | static bool rtl8152_in_nway(struct r8152 *tp) |
@@ -2986,7 +2983,6 @@ static void set_carrier(struct r8152 *tp) | |||
2986 | struct net_device *netdev = tp->netdev; | 2983 | struct net_device *netdev = tp->netdev; |
2987 | u8 speed; | 2984 | u8 speed; |
2988 | 2985 | ||
2989 | clear_bit(RTL8152_LINK_CHG, &tp->flags); | ||
2990 | speed = rtl8152_get_speed(tp); | 2986 | speed = rtl8152_get_speed(tp); |
2991 | 2987 | ||
2992 | if (speed & LINK_STATUS) { | 2988 | if (speed & LINK_STATUS) { |
@@ -3000,6 +2996,9 @@ static void set_carrier(struct r8152 *tp) | |||
3000 | napi_enable(&tp->napi); | 2996 | napi_enable(&tp->napi); |
3001 | netif_wake_queue(netdev); | 2997 | netif_wake_queue(netdev); |
3002 | netif_info(tp, link, netdev, "carrier on\n"); | 2998 | netif_info(tp, link, netdev, "carrier on\n"); |
2999 | } else if (netif_queue_stopped(netdev) && | ||
3000 | skb_queue_len(&tp->tx_queue) < tp->tx_qlen) { | ||
3001 | netif_wake_queue(netdev); | ||
3003 | } | 3002 | } |
3004 | } else { | 3003 | } else { |
3005 | if (netif_carrier_ok(netdev)) { | 3004 | if (netif_carrier_ok(netdev)) { |
@@ -3033,20 +3032,18 @@ static void rtl_work_func_t(struct work_struct *work) | |||
3033 | goto out1; | 3032 | goto out1; |
3034 | } | 3033 | } |
3035 | 3034 | ||
3036 | if (test_bit(RTL8152_LINK_CHG, &tp->flags)) | 3035 | if (test_and_clear_bit(RTL8152_LINK_CHG, &tp->flags)) |
3037 | set_carrier(tp); | 3036 | set_carrier(tp); |
3038 | 3037 | ||
3039 | if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) | 3038 | if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags)) |
3040 | _rtl8152_set_rx_mode(tp->netdev); | 3039 | _rtl8152_set_rx_mode(tp->netdev); |
3041 | 3040 | ||
3042 | /* don't schedule napi before linking */ | 3041 | /* don't schedule napi before linking */ |
3043 | if (test_bit(SCHEDULE_NAPI, &tp->flags) && | 3042 | if (test_and_clear_bit(SCHEDULE_NAPI, &tp->flags) && |
3044 | netif_carrier_ok(tp->netdev)) { | 3043 | netif_carrier_ok(tp->netdev)) |
3045 | clear_bit(SCHEDULE_NAPI, &tp->flags); | ||
3046 | napi_schedule(&tp->napi); | 3044 | napi_schedule(&tp->napi); |
3047 | } | ||
3048 | 3045 | ||
3049 | if (test_bit(PHY_RESET, &tp->flags)) | 3046 | if (test_and_clear_bit(PHY_RESET, &tp->flags)) |
3050 | rtl_phy_reset(tp); | 3047 | rtl_phy_reset(tp); |
3051 | 3048 | ||
3052 | mutex_unlock(&tp->control); | 3049 | mutex_unlock(&tp->control); |
@@ -3055,6 +3052,33 @@ out1: | |||
3055 | usb_autopm_put_interface(tp->intf); | 3052 | usb_autopm_put_interface(tp->intf); |
3056 | } | 3053 | } |
3057 | 3054 | ||
3055 | #ifdef CONFIG_PM_SLEEP | ||
3056 | static int rtl_notifier(struct notifier_block *nb, unsigned long action, | ||
3057 | void *data) | ||
3058 | { | ||
3059 | struct r8152 *tp = container_of(nb, struct r8152, pm_notifier); | ||
3060 | |||
3061 | switch (action) { | ||
3062 | case PM_HIBERNATION_PREPARE: | ||
3063 | case PM_SUSPEND_PREPARE: | ||
3064 | usb_autopm_get_interface(tp->intf); | ||
3065 | break; | ||
3066 | |||
3067 | case PM_POST_HIBERNATION: | ||
3068 | case PM_POST_SUSPEND: | ||
3069 | usb_autopm_put_interface(tp->intf); | ||
3070 | break; | ||
3071 | |||
3072 | case PM_POST_RESTORE: | ||
3073 | case PM_RESTORE_PREPARE: | ||
3074 | default: | ||
3075 | break; | ||
3076 | } | ||
3077 | |||
3078 | return NOTIFY_DONE; | ||
3079 | } | ||
3080 | #endif | ||
3081 | |||
3058 | static int rtl8152_open(struct net_device *netdev) | 3082 | static int rtl8152_open(struct net_device *netdev) |
3059 | { | 3083 | { |
3060 | struct r8152 *tp = netdev_priv(netdev); | 3084 | struct r8152 *tp = netdev_priv(netdev); |
@@ -3097,6 +3121,10 @@ static int rtl8152_open(struct net_device *netdev) | |||
3097 | mutex_unlock(&tp->control); | 3121 | mutex_unlock(&tp->control); |
3098 | 3122 | ||
3099 | usb_autopm_put_interface(tp->intf); | 3123 | usb_autopm_put_interface(tp->intf); |
3124 | #ifdef CONFIG_PM_SLEEP | ||
3125 | tp->pm_notifier.notifier_call = rtl_notifier; | ||
3126 | register_pm_notifier(&tp->pm_notifier); | ||
3127 | #endif | ||
3100 | 3128 | ||
3101 | out: | 3129 | out: |
3102 | return res; | 3130 | return res; |
@@ -3107,6 +3135,9 @@ static int rtl8152_close(struct net_device *netdev) | |||
3107 | struct r8152 *tp = netdev_priv(netdev); | 3135 | struct r8152 *tp = netdev_priv(netdev); |
3108 | int res = 0; | 3136 | int res = 0; |
3109 | 3137 | ||
3138 | #ifdef CONFIG_PM_SLEEP | ||
3139 | unregister_pm_notifier(&tp->pm_notifier); | ||
3140 | #endif | ||
3110 | napi_disable(&tp->napi); | 3141 | napi_disable(&tp->napi); |
3111 | clear_bit(WORK_ENABLE, &tp->flags); | 3142 | clear_bit(WORK_ENABLE, &tp->flags); |
3112 | usb_kill_urb(tp->intr_urb); | 3143 | usb_kill_urb(tp->intr_urb); |
@@ -3245,7 +3276,7 @@ static void r8152b_init(struct r8152 *tp) | |||
3245 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 3276 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
3246 | return; | 3277 | return; |
3247 | 3278 | ||
3248 | r8152b_disable_aldps(tp); | 3279 | r8152_aldps_en(tp, false); |
3249 | 3280 | ||
3250 | if (tp->version == RTL_VER_01) { | 3281 | if (tp->version == RTL_VER_01) { |
3251 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); | 3282 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); |
@@ -3267,7 +3298,7 @@ static void r8152b_init(struct r8152 *tp) | |||
3267 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); | 3298 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); |
3268 | 3299 | ||
3269 | r8152b_enable_eee(tp); | 3300 | r8152b_enable_eee(tp); |
3270 | r8152b_enable_aldps(tp); | 3301 | r8152_aldps_en(tp, true); |
3271 | r8152b_enable_fc(tp); | 3302 | r8152b_enable_fc(tp); |
3272 | rtl_tally_reset(tp); | 3303 | rtl_tally_reset(tp); |
3273 | 3304 | ||
@@ -3285,7 +3316,7 @@ static void r8153_init(struct r8152 *tp) | |||
3285 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 3316 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
3286 | return; | 3317 | return; |
3287 | 3318 | ||
3288 | r8153_disable_aldps(tp); | 3319 | r8153_aldps_en(tp, false); |
3289 | r8153_u1u2en(tp, false); | 3320 | r8153_u1u2en(tp, false); |
3290 | 3321 | ||
3291 | for (i = 0; i < 500; i++) { | 3322 | for (i = 0; i < 500; i++) { |
@@ -3374,7 +3405,7 @@ static void r8153_init(struct r8152 *tp) | |||
3374 | EEE_SPDWN_EN); | 3405 | EEE_SPDWN_EN); |
3375 | 3406 | ||
3376 | r8153_enable_eee(tp); | 3407 | r8153_enable_eee(tp); |
3377 | r8153_enable_aldps(tp); | 3408 | r8153_aldps_en(tp, true); |
3378 | r8152b_enable_fc(tp); | 3409 | r8152b_enable_fc(tp); |
3379 | rtl_tally_reset(tp); | 3410 | rtl_tally_reset(tp); |
3380 | r8153_u2p3en(tp, true); | 3411 | r8153_u2p3en(tp, true); |
@@ -3560,8 +3591,18 @@ static int rtl8152_resume(struct usb_interface *intf) | |||
3560 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); | 3591 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); |
3561 | napi_disable(&tp->napi); | 3592 | napi_disable(&tp->napi); |
3562 | set_bit(WORK_ENABLE, &tp->flags); | 3593 | set_bit(WORK_ENABLE, &tp->flags); |
3563 | if (netif_carrier_ok(tp->netdev)) | 3594 | |
3564 | rtl_start_rx(tp); | 3595 | if (netif_carrier_ok(tp->netdev)) { |
3596 | if (rtl8152_get_speed(tp) & LINK_STATUS) { | ||
3597 | rtl_start_rx(tp); | ||
3598 | } else { | ||
3599 | netif_carrier_off(tp->netdev); | ||
3600 | tp->rtl_ops.disable(tp); | ||
3601 | netif_info(tp, link, tp->netdev, | ||
3602 | "linking down\n"); | ||
3603 | } | ||
3604 | } | ||
3605 | |||
3565 | napi_enable(&tp->napi); | 3606 | napi_enable(&tp->napi); |
3566 | } else { | 3607 | } else { |
3567 | tp->rtl_ops.up(tp); | 3608 | tp->rtl_ops.up(tp); |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 30033dbe6662..c5f375befd2f 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
@@ -2193,13 +2193,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, | |||
2193 | { | 2193 | { |
2194 | u32 tx_cmd_a, tx_cmd_b; | 2194 | u32 tx_cmd_a, tx_cmd_b; |
2195 | 2195 | ||
2196 | if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { | 2196 | if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) { |
2197 | struct sk_buff *skb2 = | ||
2198 | skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); | ||
2199 | dev_kfree_skb_any(skb); | 2197 | dev_kfree_skb_any(skb); |
2200 | skb = skb2; | 2198 | return NULL; |
2201 | if (!skb) | ||
2202 | return NULL; | ||
2203 | } | 2199 | } |
2204 | 2200 | ||
2205 | tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; | 2201 | tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; |
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 4a1e9c489f1f..aadfe1d1c37e 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c | |||
@@ -456,14 +456,9 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
456 | 456 | ||
457 | len = skb->len; | 457 | len = skb->len; |
458 | 458 | ||
459 | if (skb_headroom(skb) < SR_TX_OVERHEAD) { | 459 | if (skb_cow_head(skb, SR_TX_OVERHEAD)) { |
460 | struct sk_buff *skb2; | ||
461 | |||
462 | skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags); | ||
463 | dev_kfree_skb_any(skb); | 460 | dev_kfree_skb_any(skb); |
464 | skb = skb2; | 461 | return NULL; |
465 | if (!skb) | ||
466 | return NULL; | ||
467 | } | 462 | } |
468 | 463 | ||
469 | __skb_push(skb, SR_TX_OVERHEAD); | 464 | __skb_push(skb, SR_TX_OVERHEAD); |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 0cbf520cea77..82bf85ae5d08 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -1563,7 +1563,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, | |||
1563 | rq->rx_ring[i].basePA); | 1563 | rq->rx_ring[i].basePA); |
1564 | rq->rx_ring[i].base = NULL; | 1564 | rq->rx_ring[i].base = NULL; |
1565 | } | 1565 | } |
1566 | rq->buf_info[i] = NULL; | ||
1567 | } | 1566 | } |
1568 | 1567 | ||
1569 | if (rq->comp_ring.base) { | 1568 | if (rq->comp_ring.base) { |
@@ -1578,6 +1577,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, | |||
1578 | (rq->rx_ring[0].size + rq->rx_ring[1].size); | 1577 | (rq->rx_ring[0].size + rq->rx_ring[1].size); |
1579 | dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], | 1578 | dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], |
1580 | rq->buf_info_pa); | 1579 | rq->buf_info_pa); |
1580 | rq->buf_info[0] = rq->buf_info[1] = NULL; | ||
1581 | } | 1581 | } |
1582 | } | 1582 | } |
1583 | 1583 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 738d541a2255..348ed1b0e58b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | |||
@@ -1127,7 +1127,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) | |||
1127 | } | 1127 | } |
1128 | if (0 == tmp) { | 1128 | if (0 == tmp) { |
1129 | read_addr = REG_DBI_RDATA + addr % 4; | 1129 | read_addr = REG_DBI_RDATA + addr % 4; |
1130 | ret = rtl_read_word(rtlpriv, read_addr); | 1130 | ret = rtl_read_byte(rtlpriv, read_addr); |
1131 | } | 1131 | } |
1132 | return ret; | 1132 | return ret; |
1133 | } | 1133 | } |
@@ -1169,7 +1169,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw) | |||
1169 | } | 1169 | } |
1170 | 1170 | ||
1171 | tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); | 1171 | tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); |
1172 | _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7)); | 1172 | _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) | |
1173 | ASPM_L1_LATENCY << 3); | ||
1173 | 1174 | ||
1174 | tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); | 1175 | tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); |
1175 | _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); | 1176 | _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); |
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index b6faf624480e..d676d055feda 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h | |||
@@ -99,6 +99,7 @@ | |||
99 | #define RTL_USB_MAX_RX_COUNT 100 | 99 | #define RTL_USB_MAX_RX_COUNT 100 |
100 | #define QBSS_LOAD_SIZE 5 | 100 | #define QBSS_LOAD_SIZE 5 |
101 | #define MAX_WMMELE_LENGTH 64 | 101 | #define MAX_WMMELE_LENGTH 64 |
102 | #define ASPM_L1_LATENCY 7 | ||
102 | 103 | ||
103 | #define TOTAL_CAM_ENTRY 32 | 104 | #define TOTAL_CAM_ENTRY 32 |
104 | 105 | ||
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index fd221cc4cb79..eb7a9e62371c 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -86,6 +86,8 @@ struct netfront_cb { | |||
86 | /* IRQ name is queue name with "-tx" or "-rx" appended */ | 86 | /* IRQ name is queue name with "-tx" or "-rx" appended */ |
87 | #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) | 87 | #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) |
88 | 88 | ||
89 | static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); | ||
90 | |||
89 | struct netfront_stats { | 91 | struct netfront_stats { |
90 | u64 packets; | 92 | u64 packets; |
91 | u64 bytes; | 93 | u64 bytes; |
@@ -2037,10 +2039,12 @@ static void netback_changed(struct xenbus_device *dev, | |||
2037 | break; | 2039 | break; |
2038 | 2040 | ||
2039 | case XenbusStateClosed: | 2041 | case XenbusStateClosed: |
2042 | wake_up_all(&module_unload_q); | ||
2040 | if (dev->state == XenbusStateClosed) | 2043 | if (dev->state == XenbusStateClosed) |
2041 | break; | 2044 | break; |
2042 | /* Missed the backend's CLOSING state -- fallthrough */ | 2045 | /* Missed the backend's CLOSING state -- fallthrough */ |
2043 | case XenbusStateClosing: | 2046 | case XenbusStateClosing: |
2047 | wake_up_all(&module_unload_q); | ||
2044 | xenbus_frontend_closed(dev); | 2048 | xenbus_frontend_closed(dev); |
2045 | break; | 2049 | break; |
2046 | } | 2050 | } |
@@ -2146,6 +2150,20 @@ static int xennet_remove(struct xenbus_device *dev) | |||
2146 | 2150 | ||
2147 | dev_dbg(&dev->dev, "%s\n", dev->nodename); | 2151 | dev_dbg(&dev->dev, "%s\n", dev->nodename); |
2148 | 2152 | ||
2153 | if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { | ||
2154 | xenbus_switch_state(dev, XenbusStateClosing); | ||
2155 | wait_event(module_unload_q, | ||
2156 | xenbus_read_driver_state(dev->otherend) == | ||
2157 | XenbusStateClosing); | ||
2158 | |||
2159 | xenbus_switch_state(dev, XenbusStateClosed); | ||
2160 | wait_event(module_unload_q, | ||
2161 | xenbus_read_driver_state(dev->otherend) == | ||
2162 | XenbusStateClosed || | ||
2163 | xenbus_read_driver_state(dev->otherend) == | ||
2164 | XenbusStateUnknown); | ||
2165 | } | ||
2166 | |||
2149 | xennet_disconnect_backend(info); | 2167 | xennet_disconnect_backend(info); |
2150 | 2168 | ||
2151 | unregister_netdev(info->netdev); | 2169 | unregister_netdev(info->netdev); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d6ceb8b91cd6..1c8aedf21370 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -2976,10 +2976,16 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) | |||
2976 | mutex_unlock(&dev->shutdown_lock); | 2976 | mutex_unlock(&dev->shutdown_lock); |
2977 | } | 2977 | } |
2978 | 2978 | ||
2979 | static void nvme_dev_remove(struct nvme_dev *dev) | 2979 | static void nvme_remove_namespaces(struct nvme_dev *dev) |
2980 | { | 2980 | { |
2981 | struct nvme_ns *ns, *next; | 2981 | struct nvme_ns *ns, *next; |
2982 | 2982 | ||
2983 | list_for_each_entry_safe(ns, next, &dev->namespaces, list) | ||
2984 | nvme_ns_remove(ns); | ||
2985 | } | ||
2986 | |||
2987 | static void nvme_dev_remove(struct nvme_dev *dev) | ||
2988 | { | ||
2983 | if (nvme_io_incapable(dev)) { | 2989 | if (nvme_io_incapable(dev)) { |
2984 | /* | 2990 | /* |
2985 | * If the device is not capable of IO (surprise hot-removal, | 2991 | * If the device is not capable of IO (surprise hot-removal, |
@@ -2989,8 +2995,7 @@ static void nvme_dev_remove(struct nvme_dev *dev) | |||
2989 | */ | 2995 | */ |
2990 | nvme_dev_shutdown(dev); | 2996 | nvme_dev_shutdown(dev); |
2991 | } | 2997 | } |
2992 | list_for_each_entry_safe(ns, next, &dev->namespaces, list) | 2998 | nvme_remove_namespaces(dev); |
2993 | nvme_ns_remove(ns); | ||
2994 | } | 2999 | } |
2995 | 3000 | ||
2996 | static int nvme_setup_prp_pools(struct nvme_dev *dev) | 3001 | static int nvme_setup_prp_pools(struct nvme_dev *dev) |
@@ -3174,7 +3179,7 @@ static void nvme_probe_work(struct work_struct *work) | |||
3174 | */ | 3179 | */ |
3175 | if (dev->online_queues < 2) { | 3180 | if (dev->online_queues < 2) { |
3176 | dev_warn(dev->dev, "IO queues not created\n"); | 3181 | dev_warn(dev->dev, "IO queues not created\n"); |
3177 | nvme_dev_remove(dev); | 3182 | nvme_remove_namespaces(dev); |
3178 | } else { | 3183 | } else { |
3179 | nvme_unfreeze_queues(dev); | 3184 | nvme_unfreeze_queues(dev); |
3180 | nvme_dev_add(dev); | 3185 | nvme_dev_add(dev); |
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index d0c2759076a2..312cb5b74dec 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c | |||
@@ -1654,3 +1654,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask) | |||
1654 | iounmap(base_addr); | 1654 | iounmap(base_addr); |
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | |||
1658 | /* | ||
1659 | * The design of the Diva management card in rp34x0 machines (rp3410, rp3440) | ||
1660 | * seems rushed, so that many built-in components simply don't work. | ||
1661 | * The following quirks disable the serial AUX port and the built-in ATI RV100 | ||
1662 | * Radeon 7000 graphics card which both don't have any external connectors and | ||
1663 | * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as | ||
1664 | * such makes those machines the only PARISC machines on which we can't use | ||
1665 | * ttyS0 as boot console. | ||
1666 | */ | ||
1667 | static void quirk_diva_ati_card(struct pci_dev *dev) | ||
1668 | { | ||
1669 | if (dev->subsystem_vendor != PCI_VENDOR_ID_HP || | ||
1670 | dev->subsystem_device != 0x1292) | ||
1671 | return; | ||
1672 | |||
1673 | dev_info(&dev->dev, "Hiding Diva built-in ATI card"); | ||
1674 | dev->device = 0; | ||
1675 | } | ||
1676 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY, | ||
1677 | quirk_diva_ati_card); | ||
1678 | |||
1679 | static void quirk_diva_aux_disable(struct pci_dev *dev) | ||
1680 | { | ||
1681 | if (dev->subsystem_vendor != PCI_VENDOR_ID_HP || | ||
1682 | dev->subsystem_device != 0x1291) | ||
1683 | return; | ||
1684 | |||
1685 | dev_info(&dev->dev, "Hiding Diva built-in AUX serial device"); | ||
1686 | dev->device = 0; | ||
1687 | } | ||
1688 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX, | ||
1689 | quirk_diva_aux_disable); | ||
diff --git a/drivers/pci/controller/pci-layerscape.c b/drivers/pci/controller/pci-layerscape.c index 7f57e3f730ea..ba1f541e8704 100644 --- a/drivers/pci/controller/pci-layerscape.c +++ b/drivers/pci/controller/pci-layerscape.c | |||
@@ -78,6 +78,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie) | |||
78 | iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); | 78 | iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); |
79 | } | 79 | } |
80 | 80 | ||
81 | /* Drop MSG TLP except for Vendor MSG */ | ||
82 | static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) | ||
83 | { | ||
84 | u32 val; | ||
85 | |||
86 | val = ioread32(pcie->dbi + PCIE_STRFMR1); | ||
87 | val &= 0xDFFFFFFF; | ||
88 | iowrite32(val, pcie->dbi + PCIE_STRFMR1); | ||
89 | } | ||
90 | |||
81 | static int ls1021_pcie_link_up(struct dw_pcie *pci) | 91 | static int ls1021_pcie_link_up(struct dw_pcie *pci) |
82 | { | 92 | { |
83 | u32 state; | 93 | u32 state; |
@@ -99,7 +109,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp) | |||
99 | { | 109 | { |
100 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 110 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
101 | struct ls_pcie *pcie = to_ls_pcie(pci); | 111 | struct ls_pcie *pcie = to_ls_pcie(pci); |
102 | u32 val, index[2]; | 112 | u32 index[2]; |
103 | 113 | ||
104 | pcie->scfg = syscon_regmap_lookup_by_phandle(pci->dev->of_node, | 114 | pcie->scfg = syscon_regmap_lookup_by_phandle(pci->dev->of_node, |
105 | "fsl,pcie-scfg"); | 115 | "fsl,pcie-scfg"); |
@@ -118,13 +128,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp) | |||
118 | 128 | ||
119 | dw_pcie_setup_rc(pp); | 129 | dw_pcie_setup_rc(pp); |
120 | 130 | ||
121 | /* | 131 | ls_pcie_drop_msg_tlp(pcie); |
122 | * LS1021A Workaround for internal TKT228622 | ||
123 | * to fix the INTx hang issue | ||
124 | */ | ||
125 | val = ioread32(pcie->dbi + PCIE_STRFMR1); | ||
126 | val &= 0xffff; | ||
127 | iowrite32(val, pcie->dbi + PCIE_STRFMR1); | ||
128 | } | 132 | } |
129 | 133 | ||
130 | static int ls_pcie_link_up(struct dw_pcie *pci) | 134 | static int ls_pcie_link_up(struct dw_pcie *pci) |
@@ -150,6 +154,7 @@ static void ls_pcie_host_init(struct pcie_port *pp) | |||
150 | iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); | 154 | iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); |
151 | ls_pcie_fix_class(pcie); | 155 | ls_pcie_fix_class(pcie); |
152 | ls_pcie_clear_multifunction(pcie); | 156 | ls_pcie_clear_multifunction(pcie); |
157 | ls_pcie_drop_msg_tlp(pcie); | ||
153 | iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); | 158 | iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); |
154 | } | 159 | } |
155 | 160 | ||
@@ -216,6 +221,7 @@ static const struct of_device_id ls_pcie_of_match[] = { | |||
216 | { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, | 221 | { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, |
217 | { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, | 222 | { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, |
218 | { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, | 223 | { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, |
224 | { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, | ||
219 | { }, | 225 | { }, |
220 | }; | 226 | }; |
221 | MODULE_DEVICE_TABLE(of, ls_pcie_of_match); | 227 | MODULE_DEVICE_TABLE(of, ls_pcie_of_match); |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 357527712539..7680fc0349fc 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -161,7 +161,6 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) | |||
161 | pci_device_add(virtfn, virtfn->bus); | 161 | pci_device_add(virtfn, virtfn->bus); |
162 | mutex_unlock(&iov->dev->sriov->lock); | 162 | mutex_unlock(&iov->dev->sriov->lock); |
163 | 163 | ||
164 | pci_bus_add_device(virtfn); | ||
165 | sprintf(buf, "virtfn%u", id); | 164 | sprintf(buf, "virtfn%u", id); |
166 | rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); | 165 | rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); |
167 | if (rc) | 166 | if (rc) |
@@ -172,6 +171,8 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) | |||
172 | 171 | ||
173 | kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE); | 172 | kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE); |
174 | 173 | ||
174 | pci_bus_add_device(virtfn); | ||
175 | |||
175 | return 0; | 176 | return 0; |
176 | 177 | ||
177 | failed2: | 178 | failed2: |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index fca925543fae..32bd8ab79d53 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -944,7 +944,12 @@ static int pci_pm_thaw_noirq(struct device *dev) | |||
944 | if (pci_has_legacy_pm_support(pci_dev)) | 944 | if (pci_has_legacy_pm_support(pci_dev)) |
945 | return pci_legacy_resume_early(dev); | 945 | return pci_legacy_resume_early(dev); |
946 | 946 | ||
947 | pci_update_current_state(pci_dev, PCI_D0); | 947 | /* |
948 | * pci_restore_state() requires the device to be in D0 (because of MSI | ||
949 | * restoration among other things), so force it into D0 in case the | ||
950 | * driver's "freeze" callbacks put it into a low-power state directly. | ||
951 | */ | ||
952 | pci_set_power_state(pci_dev, PCI_D0); | ||
948 | pci_restore_state(pci_dev); | 953 | pci_restore_state(pci_dev); |
949 | 954 | ||
950 | if (drv && drv->pm && drv->pm->thaw_noirq) | 955 | if (drv && drv->pm && drv->pm->thaw_noirq) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 1a14ca8965e6..295bf1472d02 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -3850,6 +3850,10 @@ static bool pci_bus_resetable(struct pci_bus *bus) | |||
3850 | { | 3850 | { |
3851 | struct pci_dev *dev; | 3851 | struct pci_dev *dev; |
3852 | 3852 | ||
3853 | |||
3854 | if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) | ||
3855 | return false; | ||
3856 | |||
3853 | list_for_each_entry(dev, &bus->devices, bus_list) { | 3857 | list_for_each_entry(dev, &bus->devices, bus_list) { |
3854 | if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || | 3858 | if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || |
3855 | (dev->subordinate && !pci_bus_resetable(dev->subordinate))) | 3859 | (dev->subordinate && !pci_bus_resetable(dev->subordinate))) |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 4e14de0f0f98..ca5dbf03e388 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -388,7 +388,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, | |||
388 | * If the error is reported by an end point, we think this | 388 | * If the error is reported by an end point, we think this |
389 | * error is related to the upstream link of the end point. | 389 | * error is related to the upstream link of the end point. |
390 | */ | 390 | */ |
391 | pci_walk_bus(dev->bus, cb, &result_data); | 391 | if (state == pci_channel_io_normal) |
392 | /* | ||
393 | * the error is non fatal so the bus is ok, just invoke | ||
394 | * the callback for the function that logged the error. | ||
395 | */ | ||
396 | cb(dev, &result_data); | ||
397 | else | ||
398 | pci_walk_bus(dev->bus, cb, &result_data); | ||
392 | } | 399 | } |
393 | 400 | ||
394 | return result_data.result; | 401 | return result_data.result; |
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index d65b2ce8efc6..4908f9a70835 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c | |||
@@ -397,6 +397,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index) | |||
397 | if (ret) | 397 | if (ret) |
398 | return ERR_PTR(-ENODEV); | 398 | return ERR_PTR(-ENODEV); |
399 | 399 | ||
400 | /* This phy type handled by the usb-phy subsystem for now */ | ||
401 | if (of_device_is_compatible(args.np, "usb-nop-xceiv")) | ||
402 | return ERR_PTR(-ENODEV); | ||
403 | |||
400 | mutex_lock(&phy_provider_mutex); | 404 | mutex_lock(&phy_provider_mutex); |
401 | phy_provider = of_phy_provider_lookup(args.np); | 405 | phy_provider = of_phy_provider_lookup(args.np); |
402 | if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { | 406 | if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { |
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index b58d3f29148a..6908b6ce2074 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c | |||
@@ -1338,6 +1338,22 @@ static void st_gpio_irq_unmask(struct irq_data *d) | |||
1338 | writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); | 1338 | writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | static int st_gpio_irq_request_resources(struct irq_data *d) | ||
1342 | { | ||
1343 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
1344 | |||
1345 | st_gpio_direction_input(gc, d->hwirq); | ||
1346 | |||
1347 | return gpiochip_lock_as_irq(gc, d->hwirq); | ||
1348 | } | ||
1349 | |||
1350 | static void st_gpio_irq_release_resources(struct irq_data *d) | ||
1351 | { | ||
1352 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
1353 | |||
1354 | gpiochip_unlock_as_irq(gc, d->hwirq); | ||
1355 | } | ||
1356 | |||
1341 | static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) | 1357 | static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) |
1342 | { | 1358 | { |
1343 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | 1359 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
@@ -1493,12 +1509,14 @@ static struct gpio_chip st_gpio_template = { | |||
1493 | }; | 1509 | }; |
1494 | 1510 | ||
1495 | static struct irq_chip st_gpio_irqchip = { | 1511 | static struct irq_chip st_gpio_irqchip = { |
1496 | .name = "GPIO", | 1512 | .name = "GPIO", |
1497 | .irq_disable = st_gpio_irq_mask, | 1513 | .irq_request_resources = st_gpio_irq_request_resources, |
1498 | .irq_mask = st_gpio_irq_mask, | 1514 | .irq_release_resources = st_gpio_irq_release_resources, |
1499 | .irq_unmask = st_gpio_irq_unmask, | 1515 | .irq_disable = st_gpio_irq_mask, |
1500 | .irq_set_type = st_gpio_irq_set_type, | 1516 | .irq_mask = st_gpio_irq_mask, |
1501 | .flags = IRQCHIP_SKIP_SET_WAKE, | 1517 | .irq_unmask = st_gpio_irq_unmask, |
1518 | .irq_set_type = st_gpio_irq_set_type, | ||
1519 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
1502 | }; | 1520 | }; |
1503 | 1521 | ||
1504 | static int st_gpiolib_register_bank(struct st_pinctrl *info, | 1522 | static int st_gpiolib_register_bank(struct st_pinctrl *info, |
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c index a5b009673d0e..5eb719e73e9e 100644 --- a/drivers/power/reset/zx-reboot.c +++ b/drivers/power/reset/zx-reboot.c | |||
@@ -78,3 +78,7 @@ static struct platform_driver zx_reboot_driver = { | |||
78 | }, | 78 | }, |
79 | }; | 79 | }; |
80 | module_platform_driver(zx_reboot_driver); | 80 | module_platform_driver(zx_reboot_driver); |
81 | |||
82 | MODULE_DESCRIPTION("ZTE SoCs reset driver"); | ||
83 | MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>"); | ||
84 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 8d85cf385e09..733d85686639 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
@@ -764,7 +764,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) | |||
764 | } | 764 | } |
765 | 765 | ||
766 | timerqueue_add(&rtc->timerqueue, &timer->node); | 766 | timerqueue_add(&rtc->timerqueue, &timer->node); |
767 | if (!next) { | 767 | if (!next || ktime_before(timer->node.expires, next->expires)) { |
768 | struct rtc_wkalrm alarm; | 768 | struct rtc_wkalrm alarm; |
769 | int err; | 769 | int err; |
770 | alarm.time = rtc_ktime_to_tm(timer->node.expires); | 770 | alarm.time = rtc_ktime_to_tm(timer->node.expires); |
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c index df39ce02a99d..229dd2fe8f45 100644 --- a/drivers/rtc/rtc-opal.c +++ b/drivers/rtc/rtc-opal.c | |||
@@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms) | |||
58 | static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) | 58 | static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) |
59 | { | 59 | { |
60 | long rc = OPAL_BUSY; | 60 | long rc = OPAL_BUSY; |
61 | int retries = 10; | ||
61 | u32 y_m_d; | 62 | u32 y_m_d; |
62 | u64 h_m_s_ms; | 63 | u64 h_m_s_ms; |
63 | __be32 __y_m_d; | 64 | __be32 __y_m_d; |
@@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) | |||
67 | rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); | 68 | rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); |
68 | if (rc == OPAL_BUSY_EVENT) | 69 | if (rc == OPAL_BUSY_EVENT) |
69 | opal_poll_events(NULL); | 70 | opal_poll_events(NULL); |
70 | else | 71 | else if (retries-- && (rc == OPAL_HARDWARE |
72 | || rc == OPAL_INTERNAL_ERROR)) | ||
71 | msleep(10); | 73 | msleep(10); |
74 | else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) | ||
75 | break; | ||
72 | } | 76 | } |
73 | 77 | ||
74 | if (rc != OPAL_SUCCESS) | 78 | if (rc != OPAL_SUCCESS) |
@@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) | |||
84 | static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) | 88 | static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) |
85 | { | 89 | { |
86 | long rc = OPAL_BUSY; | 90 | long rc = OPAL_BUSY; |
91 | int retries = 10; | ||
87 | u32 y_m_d = 0; | 92 | u32 y_m_d = 0; |
88 | u64 h_m_s_ms = 0; | 93 | u64 h_m_s_ms = 0; |
89 | 94 | ||
@@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) | |||
92 | rc = opal_rtc_write(y_m_d, h_m_s_ms); | 97 | rc = opal_rtc_write(y_m_d, h_m_s_ms); |
93 | if (rc == OPAL_BUSY_EVENT) | 98 | if (rc == OPAL_BUSY_EVENT) |
94 | opal_poll_events(NULL); | 99 | opal_poll_events(NULL); |
95 | else | 100 | else if (retries-- && (rc == OPAL_HARDWARE |
101 | || rc == OPAL_INTERNAL_ERROR)) | ||
96 | msleep(10); | 102 | msleep(10); |
103 | else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) | ||
104 | break; | ||
97 | } | 105 | } |
98 | 106 | ||
99 | return rc == OPAL_SUCCESS ? 0 : -EIO; | 107 | return rc == OPAL_SUCCESS ? 0 : -EIO; |
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index e1687e19c59f..a30f24cb6c83 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c | |||
@@ -308,7 +308,8 @@ static int pl031_remove(struct amba_device *adev) | |||
308 | 308 | ||
309 | dev_pm_clear_wake_irq(&adev->dev); | 309 | dev_pm_clear_wake_irq(&adev->dev); |
310 | device_init_wakeup(&adev->dev, false); | 310 | device_init_wakeup(&adev->dev, false); |
311 | free_irq(adev->irq[0], ldata); | 311 | if (adev->irq[0]) |
312 | free_irq(adev->irq[0], ldata); | ||
312 | rtc_device_unregister(ldata->rtc); | 313 | rtc_device_unregister(ldata->rtc); |
313 | iounmap(ldata->base); | 314 | iounmap(ldata->base); |
314 | kfree(ldata); | 315 | kfree(ldata); |
@@ -381,12 +382,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) | |||
381 | goto out_no_rtc; | 382 | goto out_no_rtc; |
382 | } | 383 | } |
383 | 384 | ||
384 | if (request_irq(adev->irq[0], pl031_interrupt, | 385 | if (adev->irq[0]) { |
385 | vendor->irqflags, "rtc-pl031", ldata)) { | 386 | ret = request_irq(adev->irq[0], pl031_interrupt, |
386 | ret = -EIO; | 387 | vendor->irqflags, "rtc-pl031", ldata); |
387 | goto out_no_irq; | 388 | if (ret) |
389 | goto out_no_irq; | ||
390 | dev_pm_set_wake_irq(&adev->dev, adev->irq[0]); | ||
388 | } | 391 | } |
389 | dev_pm_set_wake_irq(&adev->dev, adev->irq[0]); | ||
390 | return 0; | 392 | return 0; |
391 | 393 | ||
392 | out_no_irq: | 394 | out_no_irq: |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index bf3c1b2301db..0d6888cbd96e 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -2680,17 +2680,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, | |||
2680 | char daddr[16]; | 2680 | char daddr[16]; |
2681 | struct af_iucv_trans_hdr *iucv_hdr; | 2681 | struct af_iucv_trans_hdr *iucv_hdr; |
2682 | 2682 | ||
2683 | skb_pull(skb, 14); | ||
2684 | card->dev->header_ops->create(skb, card->dev, 0, | ||
2685 | card->dev->dev_addr, card->dev->dev_addr, | ||
2686 | card->dev->addr_len); | ||
2687 | skb_pull(skb, 14); | ||
2688 | iucv_hdr = (struct af_iucv_trans_hdr *)skb->data; | ||
2689 | memset(hdr, 0, sizeof(struct qeth_hdr)); | 2683 | memset(hdr, 0, sizeof(struct qeth_hdr)); |
2690 | hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; | 2684 | hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; |
2691 | hdr->hdr.l3.ext_flags = 0; | 2685 | hdr->hdr.l3.ext_flags = 0; |
2692 | hdr->hdr.l3.length = skb->len; | 2686 | hdr->hdr.l3.length = skb->len - ETH_HLEN; |
2693 | hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; | 2687 | hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; |
2688 | |||
2689 | iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN); | ||
2694 | memset(daddr, 0, sizeof(daddr)); | 2690 | memset(daddr, 0, sizeof(daddr)); |
2695 | daddr[0] = 0xfe; | 2691 | daddr[0] = 0xfe; |
2696 | daddr[1] = 0x80; | 2692 | daddr[1] = 0x80; |
@@ -2873,10 +2869,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2873 | if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && | 2869 | if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && |
2874 | (skb_shinfo(skb)->nr_frags == 0)) { | 2870 | (skb_shinfo(skb)->nr_frags == 0)) { |
2875 | new_skb = skb; | 2871 | new_skb = skb; |
2876 | if (new_skb->protocol == ETH_P_AF_IUCV) | 2872 | data_offset = ETH_HLEN; |
2877 | data_offset = 0; | ||
2878 | else | ||
2879 | data_offset = ETH_HLEN; | ||
2880 | hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); | 2873 | hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); |
2881 | if (!hdr) | 2874 | if (!hdr) |
2882 | goto tx_drop; | 2875 | goto tx_drop; |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 8c758c36fc70..766a9176b4ad 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -1363,13 +1363,13 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) | |||
1363 | * will ensure that i/o is queisced and the card is flushed in that | 1363 | * will ensure that i/o is queisced and the card is flushed in that |
1364 | * case. | 1364 | * case. |
1365 | */ | 1365 | */ |
1366 | aac_free_irq(aac); | ||
1366 | aac_fib_map_free(aac); | 1367 | aac_fib_map_free(aac); |
1367 | pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); | 1368 | pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); |
1368 | aac->comm_addr = NULL; | 1369 | aac->comm_addr = NULL; |
1369 | aac->comm_phys = 0; | 1370 | aac->comm_phys = 0; |
1370 | kfree(aac->queues); | 1371 | kfree(aac->queues); |
1371 | aac->queues = NULL; | 1372 | aac->queues = NULL; |
1372 | aac_free_irq(aac); | ||
1373 | kfree(aac->fsa_dev); | 1373 | kfree(aac->fsa_dev); |
1374 | aac->fsa_dev = NULL; | 1374 | aac->fsa_dev = NULL; |
1375 | quirks = aac_get_driver_ident(index)->quirks; | 1375 | quirks = aac_get_driver_ident(index)->quirks; |
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 804806e1cbb4..7a48905b8195 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
@@ -1339,6 +1339,7 @@ static void release_offload_resources(struct cxgbi_sock *csk) | |||
1339 | csk, csk->state, csk->flags, csk->tid); | 1339 | csk, csk->state, csk->flags, csk->tid); |
1340 | 1340 | ||
1341 | cxgbi_sock_free_cpl_skbs(csk); | 1341 | cxgbi_sock_free_cpl_skbs(csk); |
1342 | cxgbi_sock_purge_write_queue(csk); | ||
1342 | if (csk->wr_cred != csk->wr_max_cred) { | 1343 | if (csk->wr_cred != csk->wr_max_cred) { |
1343 | cxgbi_sock_purge_wr_queue(csk); | 1344 | cxgbi_sock_purge_wr_queue(csk); |
1344 | cxgbi_sock_reset_wr_list(csk); | 1345 | cxgbi_sock_reset_wr_list(csk); |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 0c87f341fed4..910b795fc5eb 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -3638,6 +3638,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, | |||
3638 | if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) | 3638 | if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) |
3639 | hpsa_get_ioaccel_status(h, scsi3addr, this_device); | 3639 | hpsa_get_ioaccel_status(h, scsi3addr, this_device); |
3640 | volume_offline = hpsa_volume_offline(h, scsi3addr); | 3640 | volume_offline = hpsa_volume_offline(h, scsi3addr); |
3641 | this_device->volume_offline = volume_offline; | ||
3641 | if (volume_offline == HPSA_LV_FAILED) { | 3642 | if (volume_offline == HPSA_LV_FAILED) { |
3642 | rc = HPSA_LV_FAILED; | 3643 | rc = HPSA_LV_FAILED; |
3643 | dev_err(&h->pdev->dev, | 3644 | dev_err(&h->pdev->dev, |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c1ccf1ee99ea..efce04df2109 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1727,7 +1727,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) | |||
1727 | 1727 | ||
1728 | if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { | 1728 | if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { |
1729 | reason = FAILURE_SESSION_IN_RECOVERY; | 1729 | reason = FAILURE_SESSION_IN_RECOVERY; |
1730 | sc->result = DID_REQUEUE; | 1730 | sc->result = DID_REQUEUE << 16; |
1731 | goto fault; | 1731 | goto fault; |
1732 | } | 1732 | } |
1733 | 1733 | ||
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index fc8f9b446556..fd8fe1202dbe 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -7491,7 +7491,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
7491 | did, vport->port_state, ndlp->nlp_flag); | 7491 | did, vport->port_state, ndlp->nlp_flag); |
7492 | 7492 | ||
7493 | phba->fc_stat.elsRcvPRLI++; | 7493 | phba->fc_stat.elsRcvPRLI++; |
7494 | if (vport->port_state < LPFC_DISC_AUTH) { | 7494 | if ((vport->port_state < LPFC_DISC_AUTH) && |
7495 | (vport->fc_flag & FC_FABRIC)) { | ||
7495 | rjt_err = LSRJT_UNABLE_TPC; | 7496 | rjt_err = LSRJT_UNABLE_TPC; |
7496 | rjt_exp = LSEXP_NOTHING_MORE; | 7497 | rjt_exp = LSEXP_NOTHING_MORE; |
7497 | break; | 7498 | break; |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index d3668aa555d5..be901f6db6d3 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -4777,7 +4777,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
4777 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | 4777 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
4778 | if ((ndlp->nlp_flag & NLP_DEFER_RM) && | 4778 | if ((ndlp->nlp_flag & NLP_DEFER_RM) && |
4779 | !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && | 4779 | !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && |
4780 | !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { | 4780 | !(ndlp->nlp_flag & NLP_RPI_REGISTERED) && |
4781 | phba->sli_rev != LPFC_SLI_REV4) { | ||
4781 | /* For this case we need to cleanup the default rpi | 4782 | /* For this case we need to cleanup the default rpi |
4782 | * allocated by the firmware. | 4783 | * allocated by the firmware. |
4783 | */ | 4784 | */ |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index f224cdb2fce4..507869bc0673 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -3180,7 +3180,7 @@ struct lpfc_mbx_get_port_name { | |||
3180 | #define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 | 3180 | #define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 |
3181 | #define MB_CQE_STATUS_DMA_FAILED 0x5 | 3181 | #define MB_CQE_STATUS_DMA_FAILED 0x5 |
3182 | 3182 | ||
3183 | #define LPFC_MBX_WR_CONFIG_MAX_BDE 8 | 3183 | #define LPFC_MBX_WR_CONFIG_MAX_BDE 1 |
3184 | struct lpfc_mbx_wr_object { | 3184 | struct lpfc_mbx_wr_object { |
3185 | struct mbox_header header; | 3185 | struct mbox_header header; |
3186 | union { | 3186 | union { |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index e333029e4b6c..e111c3d8c5d6 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -4588,6 +4588,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
4588 | } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { | 4588 | } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { |
4589 | scmd->result = DID_RESET << 16; | 4589 | scmd->result = DID_RESET << 16; |
4590 | break; | 4590 | break; |
4591 | } else if ((scmd->device->channel == RAID_CHANNEL) && | ||
4592 | (scsi_state == (MPI2_SCSI_STATE_TERMINATED | | ||
4593 | MPI2_SCSI_STATE_NO_SCSI_STATUS))) { | ||
4594 | scmd->result = DID_RESET << 16; | ||
4595 | break; | ||
4591 | } | 4596 | } |
4592 | scmd->result = DID_SOFT_ERROR << 16; | 4597 | scmd->result = DID_SOFT_ERROR << 16; |
4593 | break; | 4598 | break; |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 38f77e127349..0f0ff75755e0 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -160,7 +160,6 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ | |||
160 | struct list_head rq_list; /* head of request list */ | 160 | struct list_head rq_list; /* head of request list */ |
161 | struct fasync_struct *async_qp; /* used by asynchronous notification */ | 161 | struct fasync_struct *async_qp; /* used by asynchronous notification */ |
162 | Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ | 162 | Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ |
163 | char low_dma; /* as in parent but possibly overridden to 1 */ | ||
164 | char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ | 163 | char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ |
165 | char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ | 164 | char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ |
166 | unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ | 165 | unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ |
@@ -932,24 +931,14 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
932 | /* strange ..., for backward compatibility */ | 931 | /* strange ..., for backward compatibility */ |
933 | return sfp->timeout_user; | 932 | return sfp->timeout_user; |
934 | case SG_SET_FORCE_LOW_DMA: | 933 | case SG_SET_FORCE_LOW_DMA: |
935 | result = get_user(val, ip); | 934 | /* |
936 | if (result) | 935 | * N.B. This ioctl never worked properly, but failed to |
937 | return result; | 936 | * return an error value. So returning '0' to keep compability |
938 | if (val) { | 937 | * with legacy applications. |
939 | sfp->low_dma = 1; | 938 | */ |
940 | if ((0 == sfp->low_dma) && !sfp->res_in_use) { | ||
941 | val = (int) sfp->reserve.bufflen; | ||
942 | sg_remove_scat(sfp, &sfp->reserve); | ||
943 | sg_build_reserve(sfp, val); | ||
944 | } | ||
945 | } else { | ||
946 | if (atomic_read(&sdp->detaching)) | ||
947 | return -ENODEV; | ||
948 | sfp->low_dma = sdp->device->host->unchecked_isa_dma; | ||
949 | } | ||
950 | return 0; | 939 | return 0; |
951 | case SG_GET_LOW_DMA: | 940 | case SG_GET_LOW_DMA: |
952 | return put_user((int) sfp->low_dma, ip); | 941 | return put_user((int) sdp->device->host->unchecked_isa_dma, ip); |
953 | case SG_GET_SCSI_ID: | 942 | case SG_GET_SCSI_ID: |
954 | if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) | 943 | if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) |
955 | return -EFAULT; | 944 | return -EFAULT; |
@@ -1870,6 +1859,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1870 | int sg_tablesize = sfp->parentdp->sg_tablesize; | 1859 | int sg_tablesize = sfp->parentdp->sg_tablesize; |
1871 | int blk_size = buff_size, order; | 1860 | int blk_size = buff_size, order; |
1872 | gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; | 1861 | gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; |
1862 | struct sg_device *sdp = sfp->parentdp; | ||
1873 | 1863 | ||
1874 | if (blk_size < 0) | 1864 | if (blk_size < 0) |
1875 | return -EFAULT; | 1865 | return -EFAULT; |
@@ -1895,7 +1885,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1895 | scatter_elem_sz_prev = num; | 1885 | scatter_elem_sz_prev = num; |
1896 | } | 1886 | } |
1897 | 1887 | ||
1898 | if (sfp->low_dma) | 1888 | if (sdp->device->host->unchecked_isa_dma) |
1899 | gfp_mask |= GFP_DMA; | 1889 | gfp_mask |= GFP_DMA; |
1900 | 1890 | ||
1901 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) | 1891 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) |
@@ -2158,8 +2148,6 @@ sg_add_sfp(Sg_device * sdp) | |||
2158 | sfp->timeout = SG_DEFAULT_TIMEOUT; | 2148 | sfp->timeout = SG_DEFAULT_TIMEOUT; |
2159 | sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; | 2149 | sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; |
2160 | sfp->force_packid = SG_DEF_FORCE_PACK_ID; | 2150 | sfp->force_packid = SG_DEF_FORCE_PACK_ID; |
2161 | sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? | ||
2162 | sdp->device->host->unchecked_isa_dma : 1; | ||
2163 | sfp->cmd_q = SG_DEF_COMMAND_Q; | 2151 | sfp->cmd_q = SG_DEF_COMMAND_Q; |
2164 | sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; | 2152 | sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; |
2165 | sfp->parentdp = sdp; | 2153 | sfp->parentdp = sdp; |
@@ -2618,7 +2606,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) | |||
2618 | jiffies_to_msecs(fp->timeout), | 2606 | jiffies_to_msecs(fp->timeout), |
2619 | fp->reserve.bufflen, | 2607 | fp->reserve.bufflen, |
2620 | (int) fp->reserve.k_use_sg, | 2608 | (int) fp->reserve.k_use_sg, |
2621 | (int) fp->low_dma); | 2609 | (int) sdp->device->host->unchecked_isa_dma); |
2622 | seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", | 2610 | seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", |
2623 | (int) fp->cmd_q, (int) fp->force_packid, | 2611 | (int) fp->cmd_q, (int) fp->force_packid, |
2624 | (int) fp->keep_orphan); | 2612 | (int) fp->keep_orphan); |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 1b9008cab6eb..e244608c8d5c 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -4421,12 +4421,15 @@ static int ufshcd_config_vreg(struct device *dev, | |||
4421 | struct ufs_vreg *vreg, bool on) | 4421 | struct ufs_vreg *vreg, bool on) |
4422 | { | 4422 | { |
4423 | int ret = 0; | 4423 | int ret = 0; |
4424 | struct regulator *reg = vreg->reg; | 4424 | struct regulator *reg; |
4425 | const char *name = vreg->name; | 4425 | const char *name; |
4426 | int min_uV, uA_load; | 4426 | int min_uV, uA_load; |
4427 | 4427 | ||
4428 | BUG_ON(!vreg); | 4428 | BUG_ON(!vreg); |
4429 | 4429 | ||
4430 | reg = vreg->reg; | ||
4431 | name = vreg->name; | ||
4432 | |||
4430 | if (regulator_count_voltages(reg) > 0) { | 4433 | if (regulator_count_voltages(reg) > 0) { |
4431 | min_uV = on ? vreg->min_uV : 0; | 4434 | min_uV = on ? vreg->min_uV : 0; |
4432 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); | 4435 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 0e5723ab47f0..d17ec6775718 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
@@ -1228,12 +1228,23 @@ static int spi_imx_remove(struct platform_device *pdev) | |||
1228 | { | 1228 | { |
1229 | struct spi_master *master = platform_get_drvdata(pdev); | 1229 | struct spi_master *master = platform_get_drvdata(pdev); |
1230 | struct spi_imx_data *spi_imx = spi_master_get_devdata(master); | 1230 | struct spi_imx_data *spi_imx = spi_master_get_devdata(master); |
1231 | int ret; | ||
1231 | 1232 | ||
1232 | spi_bitbang_stop(&spi_imx->bitbang); | 1233 | spi_bitbang_stop(&spi_imx->bitbang); |
1233 | 1234 | ||
1235 | ret = clk_enable(spi_imx->clk_per); | ||
1236 | if (ret) | ||
1237 | return ret; | ||
1238 | |||
1239 | ret = clk_enable(spi_imx->clk_ipg); | ||
1240 | if (ret) { | ||
1241 | clk_disable(spi_imx->clk_per); | ||
1242 | return ret; | ||
1243 | } | ||
1244 | |||
1234 | writel(0, spi_imx->base + MXC_CSPICTRL); | 1245 | writel(0, spi_imx->base + MXC_CSPICTRL); |
1235 | clk_unprepare(spi_imx->clk_ipg); | 1246 | clk_disable_unprepare(spi_imx->clk_ipg); |
1236 | clk_unprepare(spi_imx->clk_per); | 1247 | clk_disable_unprepare(spi_imx->clk_per); |
1237 | spi_imx_sdma_exit(spi_imx); | 1248 | spi_imx_sdma_exit(spi_imx); |
1238 | spi_master_put(master); | 1249 | spi_master_put(master); |
1239 | 1250 | ||
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index 3009121173cd..3c6ea5c3ddd2 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c | |||
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
271 | while (remaining_words) { | 271 | while (remaining_words) { |
272 | int n_words, tx_words, rx_words; | 272 | int n_words, tx_words, rx_words; |
273 | u32 sr; | 273 | u32 sr; |
274 | int stalled; | ||
274 | 275 | ||
275 | n_words = min(remaining_words, xspi->buffer_size); | 276 | n_words = min(remaining_words, xspi->buffer_size); |
276 | 277 | ||
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
299 | 300 | ||
300 | /* Read out all the data from the Rx FIFO */ | 301 | /* Read out all the data from the Rx FIFO */ |
301 | rx_words = n_words; | 302 | rx_words = n_words; |
303 | stalled = 10; | ||
302 | while (rx_words) { | 304 | while (rx_words) { |
305 | if (rx_words == n_words && !(stalled--) && | ||
306 | !(sr & XSPI_SR_TX_EMPTY_MASK) && | ||
307 | (sr & XSPI_SR_RX_EMPTY_MASK)) { | ||
308 | dev_err(&spi->dev, | ||
309 | "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n"); | ||
310 | xspi_init_hw(xspi); | ||
311 | return -EIO; | ||
312 | } | ||
313 | |||
303 | if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) { | 314 | if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) { |
304 | xilinx_spi_rx(xspi); | 315 | xilinx_spi_rx(xspi); |
305 | rx_words--; | 316 | rx_words--; |
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index e4530ac6d5d4..28c9afe538ca 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c | |||
@@ -753,10 +753,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
753 | break; | 753 | break; |
754 | case ASHMEM_SET_SIZE: | 754 | case ASHMEM_SET_SIZE: |
755 | ret = -EINVAL; | 755 | ret = -EINVAL; |
756 | mutex_lock(&ashmem_mutex); | ||
756 | if (!asma->file) { | 757 | if (!asma->file) { |
757 | ret = 0; | 758 | ret = 0; |
758 | asma->size = (size_t)arg; | 759 | asma->size = (size_t)arg; |
759 | } | 760 | } |
761 | mutex_unlock(&ashmem_mutex); | ||
760 | break; | 762 | break; |
761 | case ASHMEM_GET_SIZE: | 763 | case ASHMEM_GET_SIZE: |
762 | ret = asma->size; | 764 | ret = asma->size; |
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index a076ede50b22..ec90f2781085 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c | |||
@@ -1399,19 +1399,13 @@ static int rtw_wx_get_essid(struct net_device *dev, | |||
1399 | if ((check_fwstate(pmlmepriv, _FW_LINKED)) || | 1399 | if ((check_fwstate(pmlmepriv, _FW_LINKED)) || |
1400 | (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) { | 1400 | (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) { |
1401 | len = pcur_bss->Ssid.SsidLength; | 1401 | len = pcur_bss->Ssid.SsidLength; |
1402 | |||
1403 | wrqu->essid.length = len; | ||
1404 | |||
1405 | memcpy(extra, pcur_bss->Ssid.Ssid, len); | 1402 | memcpy(extra, pcur_bss->Ssid.Ssid, len); |
1406 | |||
1407 | wrqu->essid.flags = 1; | ||
1408 | } else { | 1403 | } else { |
1409 | ret = -1; | 1404 | len = 0; |
1410 | goto exit; | 1405 | *extra = 0; |
1411 | } | 1406 | } |
1412 | 1407 | wrqu->essid.length = len; | |
1413 | exit: | 1408 | wrqu->essid.flags = 1; |
1414 | |||
1415 | 1409 | ||
1416 | return ret; | 1410 | return ret; |
1417 | } | 1411 | } |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 8a4092cd97ee..58fe27705b96 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -1759,7 +1759,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1759 | struct iscsi_tmr_req *tmr_req; | 1759 | struct iscsi_tmr_req *tmr_req; |
1760 | struct iscsi_tm *hdr; | 1760 | struct iscsi_tm *hdr; |
1761 | int out_of_order_cmdsn = 0, ret; | 1761 | int out_of_order_cmdsn = 0, ret; |
1762 | bool sess_ref = false; | ||
1763 | u8 function, tcm_function = TMR_UNKNOWN; | 1762 | u8 function, tcm_function = TMR_UNKNOWN; |
1764 | 1763 | ||
1765 | hdr = (struct iscsi_tm *) buf; | 1764 | hdr = (struct iscsi_tm *) buf; |
@@ -1801,18 +1800,17 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1801 | buf); | 1800 | buf); |
1802 | } | 1801 | } |
1803 | 1802 | ||
1803 | transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, | ||
1804 | conn->sess->se_sess, 0, DMA_NONE, | ||
1805 | TCM_SIMPLE_TAG, cmd->sense_buffer + 2); | ||
1806 | |||
1807 | target_get_sess_cmd(&cmd->se_cmd, true); | ||
1808 | |||
1804 | /* | 1809 | /* |
1805 | * TASK_REASSIGN for ERL=2 / connection stays inside of | 1810 | * TASK_REASSIGN for ERL=2 / connection stays inside of |
1806 | * LIO-Target $FABRIC_MOD | 1811 | * LIO-Target $FABRIC_MOD |
1807 | */ | 1812 | */ |
1808 | if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { | 1813 | if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { |
1809 | transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, | ||
1810 | conn->sess->se_sess, 0, DMA_NONE, | ||
1811 | TCM_SIMPLE_TAG, cmd->sense_buffer + 2); | ||
1812 | |||
1813 | target_get_sess_cmd(&cmd->se_cmd, true); | ||
1814 | sess_ref = true; | ||
1815 | |||
1816 | switch (function) { | 1814 | switch (function) { |
1817 | case ISCSI_TM_FUNC_ABORT_TASK: | 1815 | case ISCSI_TM_FUNC_ABORT_TASK: |
1818 | tcm_function = TMR_ABORT_TASK; | 1816 | tcm_function = TMR_ABORT_TASK; |
@@ -1951,12 +1949,8 @@ attach: | |||
1951 | * For connection recovery, this is also the default action for | 1949 | * For connection recovery, this is also the default action for |
1952 | * TMR TASK_REASSIGN. | 1950 | * TMR TASK_REASSIGN. |
1953 | */ | 1951 | */ |
1954 | if (sess_ref) { | ||
1955 | pr_debug("Handle TMR, using sess_ref=true check\n"); | ||
1956 | target_put_sess_cmd(&cmd->se_cmd); | ||
1957 | } | ||
1958 | |||
1959 | iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); | 1952 | iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); |
1953 | target_put_sess_cmd(&cmd->se_cmd); | ||
1960 | return 0; | 1954 | return 0; |
1961 | } | 1955 | } |
1962 | EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); | 1956 | EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index c9be953496ec..e926dd52b6b5 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd, | |||
133 | spin_unlock(&se_cmd->t_state_lock); | 133 | spin_unlock(&se_cmd->t_state_lock); |
134 | return false; | 134 | return false; |
135 | } | 135 | } |
136 | if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) { | ||
137 | if (se_cmd->scsi_status) { | ||
138 | pr_debug("Attempted to abort io tag: %llu early failure" | ||
139 | " status: 0x%02x\n", se_cmd->tag, | ||
140 | se_cmd->scsi_status); | ||
141 | spin_unlock(&se_cmd->t_state_lock); | ||
142 | return false; | ||
143 | } | ||
144 | } | ||
136 | if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { | 145 | if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { |
137 | pr_debug("Attempted to abort io tag: %llu already shutdown," | 146 | pr_debug("Attempted to abort io tag: %llu already shutdown," |
138 | " skipping\n", se_cmd->tag); | 147 | " skipping\n", se_cmd->tag); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 37abf881ca75..21f888ac550e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -1933,6 +1933,7 @@ void target_execute_cmd(struct se_cmd *cmd) | |||
1933 | } | 1933 | } |
1934 | 1934 | ||
1935 | cmd->t_state = TRANSPORT_PROCESSING; | 1935 | cmd->t_state = TRANSPORT_PROCESSING; |
1936 | cmd->transport_state &= ~CMD_T_PRE_EXECUTE; | ||
1936 | cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; | 1937 | cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; |
1937 | spin_unlock_irq(&cmd->t_state_lock); | 1938 | spin_unlock_irq(&cmd->t_state_lock); |
1938 | 1939 | ||
@@ -2572,6 +2573,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) | |||
2572 | ret = -ESHUTDOWN; | 2573 | ret = -ESHUTDOWN; |
2573 | goto out; | 2574 | goto out; |
2574 | } | 2575 | } |
2576 | se_cmd->transport_state |= CMD_T_PRE_EXECUTE; | ||
2575 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); | 2577 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); |
2576 | out: | 2578 | out: |
2577 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2579 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index 36d07295f8e3..a56f6cac6fc5 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c | |||
@@ -389,8 +389,11 @@ static int hisi_thermal_suspend(struct device *dev) | |||
389 | static int hisi_thermal_resume(struct device *dev) | 389 | static int hisi_thermal_resume(struct device *dev) |
390 | { | 390 | { |
391 | struct hisi_thermal_data *data = dev_get_drvdata(dev); | 391 | struct hisi_thermal_data *data = dev_get_drvdata(dev); |
392 | int ret; | ||
392 | 393 | ||
393 | clk_prepare_enable(data->clk); | 394 | ret = clk_prepare_enable(data->clk); |
395 | if (ret) | ||
396 | return ret; | ||
394 | 397 | ||
395 | data->irq_enabled = true; | 398 | data->irq_enabled = true; |
396 | hisi_thermal_enable_bind_irq_sensor(data); | 399 | hisi_thermal_enable_bind_irq_sensor(data); |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 84e71bd19082..41dda25da049 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
@@ -1801,7 +1801,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) | |||
1801 | { | 1801 | { |
1802 | struct n_tty_data *ldata = tty->disc_data; | 1802 | struct n_tty_data *ldata = tty->disc_data; |
1803 | 1803 | ||
1804 | if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) { | 1804 | if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) { |
1805 | bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); | 1805 | bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); |
1806 | ldata->line_start = ldata->read_tail; | 1806 | ldata->line_start = ldata->read_tail; |
1807 | if (!L_ICANON(tty) || !read_cnt(ldata)) { | 1807 | if (!L_ICANON(tty) || !read_cnt(ldata)) { |
@@ -2493,7 +2493,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
2493 | return put_user(tty_chars_in_buffer(tty), (int __user *) arg); | 2493 | return put_user(tty_chars_in_buffer(tty), (int __user *) arg); |
2494 | case TIOCINQ: | 2494 | case TIOCINQ: |
2495 | down_write(&tty->termios_rwsem); | 2495 | down_write(&tty->termios_rwsem); |
2496 | if (L_ICANON(tty)) | 2496 | if (L_ICANON(tty) && !L_EXTPROC(tty)) |
2497 | retval = inq_canon(ldata); | 2497 | retval = inq_canon(ldata); |
2498 | else | 2498 | else |
2499 | retval = read_cnt(ldata); | 2499 | retval = read_cnt(ldata); |
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 5b2d92ce7c55..ae01748027e8 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c | |||
@@ -609,6 +609,10 @@ static int omap_8250_startup(struct uart_port *port) | |||
609 | up->lsr_saved_flags = 0; | 609 | up->lsr_saved_flags = 0; |
610 | up->msr_saved_flags = 0; | 610 | up->msr_saved_flags = 0; |
611 | 611 | ||
612 | /* Disable DMA for console UART */ | ||
613 | if (uart_console(port)) | ||
614 | up->dma = NULL; | ||
615 | |||
612 | if (up->dma) { | 616 | if (up->dma) { |
613 | ret = serial8250_request_dma(up); | 617 | ret = serial8250_request_dma(up); |
614 | if (ret) { | 618 | if (ret) { |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 016e4be05cec..98176d12b3e1 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -2057,12 +2057,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on) | |||
2057 | val &= ~UCR3_AWAKEN; | 2057 | val &= ~UCR3_AWAKEN; |
2058 | writel(val, sport->port.membase + UCR3); | 2058 | writel(val, sport->port.membase + UCR3); |
2059 | 2059 | ||
2060 | val = readl(sport->port.membase + UCR1); | 2060 | if (sport->have_rtscts) { |
2061 | if (on) | 2061 | val = readl(sport->port.membase + UCR1); |
2062 | val |= UCR1_RTSDEN; | 2062 | if (on) |
2063 | else | 2063 | val |= UCR1_RTSDEN; |
2064 | val &= ~UCR1_RTSDEN; | 2064 | else |
2065 | writel(val, sport->port.membase + UCR1); | 2065 | val &= ~UCR1_RTSDEN; |
2066 | writel(val, sport->port.membase + UCR1); | ||
2067 | } | ||
2066 | } | 2068 | } |
2067 | 2069 | ||
2068 | static int imx_serial_port_suspend_noirq(struct device *dev) | 2070 | static int imx_serial_port_suspend_noirq(struct device *dev) |
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index b07f864f68e8..ed27fda13387 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c | |||
@@ -133,6 +133,12 @@ static void sysrq_handle_crash(int key) | |||
133 | { | 133 | { |
134 | char *killer = NULL; | 134 | char *killer = NULL; |
135 | 135 | ||
136 | /* we need to release the RCU read lock here, | ||
137 | * otherwise we get an annoying | ||
138 | * 'BUG: sleeping function called from invalid context' | ||
139 | * complaint from the kernel before the panic. | ||
140 | */ | ||
141 | rcu_read_unlock(); | ||
136 | panic_on_oops = 1; /* force panic */ | 142 | panic_on_oops = 1; /* force panic */ |
137 | wmb(); | 143 | wmb(); |
138 | *killer = 1; | 144 | *killer = 1; |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 3f6bb3fff890..edd8ef4ee502 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -377,7 +377,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) | |||
377 | 377 | ||
378 | res = usb_submit_urb(acm->read_urbs[index], mem_flags); | 378 | res = usb_submit_urb(acm->read_urbs[index], mem_flags); |
379 | if (res) { | 379 | if (res) { |
380 | if (res != -EPERM) { | 380 | if (res != -EPERM && res != -ENODEV) { |
381 | dev_err(&acm->data->dev, | 381 | dev_err(&acm->data->dev, |
382 | "%s - usb_submit_urb failed: %d\n", | 382 | "%s - usb_submit_urb failed: %d\n", |
383 | __func__, res); | 383 | __func__, res); |
@@ -1695,6 +1695,9 @@ static const struct usb_device_id acm_ids[] = { | |||
1695 | { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */ | 1695 | { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */ |
1696 | .driver_info = SINGLE_RX_URB, /* firmware bug */ | 1696 | .driver_info = SINGLE_RX_URB, /* firmware bug */ |
1697 | }, | 1697 | }, |
1698 | { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */ | ||
1699 | .driver_info = SINGLE_RX_URB, | ||
1700 | }, | ||
1698 | { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ | 1701 | { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ |
1699 | .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ | 1702 | .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ |
1700 | }, | 1703 | }, |
diff --git a/drivers/usb/common/usb-otg.c b/drivers/usb/common/usb-otg.c index 8d2b97b9d58c..c0f06ddacb39 100644 --- a/drivers/usb/common/usb-otg.c +++ b/drivers/usb/common/usb-otg.c | |||
@@ -1114,6 +1114,63 @@ int usb_otg_unregister_hcd(struct usb_hcd *hcd) | |||
1114 | EXPORT_SYMBOL_GPL(usb_otg_unregister_hcd); | 1114 | EXPORT_SYMBOL_GPL(usb_otg_unregister_hcd); |
1115 | 1115 | ||
1116 | /** | 1116 | /** |
1117 | * usb_otg_shutdown_hcd - Shutdown Host controller from OTG core | ||
1118 | * @hcd: Host controller device | ||
1119 | * | ||
1120 | * This is used by the USB Host stack to shutdown the Host controller. | ||
1121 | * This functon will call hcd->driver->shutdown() only if the | ||
1122 | * Host controller was running. It will also stop the OTG FSM to prevent | ||
1123 | * further OTG state changes. | ||
1124 | * | ||
1125 | * Returns: 0 on success, error value otherwise. | ||
1126 | */ | ||
1127 | int usb_otg_shutdown_hcd(struct usb_hcd *hcd) | ||
1128 | { | ||
1129 | struct usb_otg *otgd; | ||
1130 | struct device *hcd_dev = hcd_to_bus(hcd)->controller; | ||
1131 | struct device *otg_dev = usb_otg_get_device(hcd_dev); | ||
1132 | |||
1133 | if (!otg_dev) | ||
1134 | return -EINVAL; /* we're definitely not OTG */ | ||
1135 | |||
1136 | mutex_lock(&otg_list_mutex); | ||
1137 | otgd = usb_otg_get_data(otg_dev); | ||
1138 | mutex_unlock(&otg_list_mutex); | ||
1139 | if (!otgd) { | ||
1140 | dev_dbg(hcd_dev, "otg: host wasn't registered with otg\n"); | ||
1141 | return -EINVAL; | ||
1142 | } | ||
1143 | |||
1144 | mutex_lock(&otgd->fsm.lock); | ||
1145 | |||
1146 | if (otgd->fsm_running) { | ||
1147 | int i; | ||
1148 | |||
1149 | /* no more new events queued */ | ||
1150 | otgd->fsm_running = false; | ||
1151 | |||
1152 | /* Stop state machine / timers */ | ||
1153 | if (!otgd->drd_only) { | ||
1154 | for (i = 0; i < ARRAY_SIZE(otgd->timers); i++) | ||
1155 | hrtimer_cancel(&otgd->timers[i].timer); | ||
1156 | } | ||
1157 | |||
1158 | flush_workqueue(otgd->wq); | ||
1159 | } | ||
1160 | |||
1161 | /* shutdown host controller if it was running */ | ||
1162 | if (otgd->flags & OTG_FLAG_HOST_RUNNING) { | ||
1163 | if (hcd->driver->shutdown) | ||
1164 | hcd->driver->shutdown(hcd); | ||
1165 | } | ||
1166 | |||
1167 | mutex_unlock(&otgd->fsm.lock); | ||
1168 | |||
1169 | return 0; | ||
1170 | } | ||
1171 | EXPORT_SYMBOL_GPL(usb_otg_shutdown_hcd); | ||
1172 | |||
1173 | /** | ||
1117 | * usb_otg_register_gadget - Register Gadget controller to OTG core | 1174 | * usb_otg_register_gadget - Register Gadget controller to OTG core |
1118 | * @gadget: Gadget controller | 1175 | * @gadget: Gadget controller |
1119 | * | 1176 | * |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index f6fde903fcad..22dcccf2d286 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -973,7 +973,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) | |||
973 | case USB_SSP_CAP_TYPE: | 973 | case USB_SSP_CAP_TYPE: |
974 | ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; | 974 | ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; |
975 | ssac = (le32_to_cpu(ssp_cap->bmAttributes) & | 975 | ssac = (le32_to_cpu(ssp_cap->bmAttributes) & |
976 | USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1; | 976 | USB_SSP_SUBLINK_SPEED_ATTRIBS); |
977 | if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) | 977 | if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) |
978 | dev->bos->ssp_cap = ssp_cap; | 978 | dev->bos->ssp_cap = ssp_cap; |
979 | break; | 979 | break; |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 3856ba4518da..24b084748b63 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -3052,8 +3052,11 @@ usb_hcd_platform_shutdown(struct platform_device *dev) | |||
3052 | { | 3052 | { |
3053 | struct usb_hcd *hcd = platform_get_drvdata(dev); | 3053 | struct usb_hcd *hcd = platform_get_drvdata(dev); |
3054 | 3054 | ||
3055 | if (hcd->driver->shutdown) | 3055 | /* If OTG device, OTG core takes care of shutting down HCD */ |
3056 | hcd->driver->shutdown(hcd); | 3056 | if (usb_otg_shutdown_hcd(hcd)) { |
3057 | if (hcd->driver->shutdown) | ||
3058 | hcd->driver->shutdown(hcd); | ||
3059 | } | ||
3057 | } | 3060 | } |
3058 | EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown); | 3061 | EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown); |
3059 | 3062 | ||
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 50010282c010..c05c4f877750 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -57,10 +57,11 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
57 | /* Microsoft LifeCam-VX700 v2.0 */ | 57 | /* Microsoft LifeCam-VX700 v2.0 */ |
58 | { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, | 58 | { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, |
59 | 59 | ||
60 | /* Logitech HD Pro Webcams C920, C920-C and C930e */ | 60 | /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ |
61 | { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, | 61 | { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, |
62 | { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, | 62 | { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, |
63 | { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, | 63 | { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, |
64 | { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, | ||
64 | 65 | ||
65 | /* Logitech ConferenceCam CC3000e */ | 66 | /* Logitech ConferenceCam CC3000e */ |
66 | { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, | 67 | { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, |
@@ -154,6 +155,9 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
154 | /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */ | 155 | /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */ |
155 | { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM }, | 156 | { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM }, |
156 | 157 | ||
158 | /* ELSA MicroLink 56K */ | ||
159 | { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
160 | |||
157 | /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ | 161 | /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ |
158 | { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, | 162 | { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, |
159 | 163 | ||
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index a1fccfeb75d3..114131bbba73 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
@@ -104,7 +104,6 @@ int config_ep_by_speed(struct usb_gadget *g, | |||
104 | struct usb_function *f, | 104 | struct usb_function *f, |
105 | struct usb_ep *_ep) | 105 | struct usb_ep *_ep) |
106 | { | 106 | { |
107 | struct usb_composite_dev *cdev = get_gadget_data(g); | ||
108 | struct usb_endpoint_descriptor *chosen_desc = NULL; | 107 | struct usb_endpoint_descriptor *chosen_desc = NULL; |
109 | struct usb_descriptor_header **speed_desc = NULL; | 108 | struct usb_descriptor_header **speed_desc = NULL; |
110 | 109 | ||
@@ -176,8 +175,12 @@ ep_found: | |||
176 | _ep->maxburst = comp_desc->bMaxBurst + 1; | 175 | _ep->maxburst = comp_desc->bMaxBurst + 1; |
177 | break; | 176 | break; |
178 | default: | 177 | default: |
179 | if (comp_desc->bMaxBurst != 0) | 178 | if (comp_desc->bMaxBurst != 0) { |
179 | struct usb_composite_dev *cdev; | ||
180 | |||
181 | cdev = get_gadget_data(g); | ||
180 | ERROR(cdev, "ep0 bMaxBurst must be 0\n"); | 182 | ERROR(cdev, "ep0 bMaxBurst must be 0\n"); |
183 | } | ||
181 | _ep->maxburst = 1; | 184 | _ep->maxburst = 1; |
182 | break; | 185 | break; |
183 | } | 186 | } |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 39bb65265bff..eb298daf49c7 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -3490,7 +3490,8 @@ static void ffs_closed(struct ffs_data *ffs) | |||
3490 | ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; | 3490 | ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; |
3491 | ffs_dev_unlock(); | 3491 | ffs_dev_unlock(); |
3492 | 3492 | ||
3493 | unregister_gadget_item(ci); | 3493 | if (test_bit(FFS_FL_BOUND, &ffs->flags)) |
3494 | unregister_gadget_item(ci); | ||
3494 | return; | 3495 | return; |
3495 | done: | 3496 | done: |
3496 | ffs_dev_unlock(); | 3497 | ffs_dev_unlock(); |
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index c7689d05356c..f8a1881609a2 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c | |||
@@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) | |||
594 | opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); | 594 | opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); |
595 | opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); | 595 | opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); |
596 | 596 | ||
597 | /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */ | ||
598 | if (opts->streaming_maxburst && | ||
599 | (opts->streaming_maxpacket % 1024) != 0) { | ||
600 | opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024); | ||
601 | INFO(cdev, "overriding streaming_maxpacket to %d\n", | ||
602 | opts->streaming_maxpacket); | ||
603 | } | ||
604 | |||
597 | /* Fill in the FS/HS/SS Video Streaming specific descriptors from the | 605 | /* Fill in the FS/HS/SS Video Streaming specific descriptors from the |
598 | * module parameters. | 606 | * module parameters. |
599 | * | 607 | * |
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index ad8c9b05572d..01656f1c6d65 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c | |||
@@ -2202,7 +2202,7 @@ static struct configfs_item_operations uvc_item_ops = { | |||
2202 | .release = uvc_attr_release, | 2202 | .release = uvc_attr_release, |
2203 | }; | 2203 | }; |
2204 | 2204 | ||
2205 | #define UVCG_OPTS_ATTR(cname, conv, str2u, uxx, vnoc, limit) \ | 2205 | #define UVCG_OPTS_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \ |
2206 | static ssize_t f_uvc_opts_##cname##_show( \ | 2206 | static ssize_t f_uvc_opts_##cname##_show( \ |
2207 | struct config_item *item, char *page) \ | 2207 | struct config_item *item, char *page) \ |
2208 | { \ | 2208 | { \ |
@@ -2245,16 +2245,16 @@ end: \ | |||
2245 | return ret; \ | 2245 | return ret; \ |
2246 | } \ | 2246 | } \ |
2247 | \ | 2247 | \ |
2248 | UVC_ATTR(f_uvc_opts_, cname, aname) | 2248 | UVC_ATTR(f_uvc_opts_, cname, cname) |
2249 | 2249 | ||
2250 | #define identity_conv(x) (x) | 2250 | #define identity_conv(x) (x) |
2251 | 2251 | ||
2252 | UVCG_OPTS_ATTR(streaming_interval, identity_conv, kstrtou8, u8, identity_conv, | 2252 | UVCG_OPTS_ATTR(streaming_interval, streaming_interval, identity_conv, |
2253 | 16); | 2253 | kstrtou8, u8, identity_conv, 16); |
2254 | UVCG_OPTS_ATTR(streaming_maxpacket, le16_to_cpu, kstrtou16, u16, le16_to_cpu, | 2254 | UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, le16_to_cpu, |
2255 | 3072); | 2255 | kstrtou16, u16, le16_to_cpu, 3072); |
2256 | UVCG_OPTS_ATTR(streaming_maxburst, identity_conv, kstrtou8, u8, identity_conv, | 2256 | UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, identity_conv, |
2257 | 15); | 2257 | kstrtou8, u8, identity_conv, 15); |
2258 | 2258 | ||
2259 | #undef identity_conv | 2259 | #undef identity_conv |
2260 | 2260 | ||
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index 7a04157ff579..2806457b4748 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c | |||
@@ -1534,7 +1534,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev, | |||
1534 | td = phys_to_virt(addr); | 1534 | td = phys_to_virt(addr); |
1535 | addr2 = (dma_addr_t)td->next; | 1535 | addr2 = (dma_addr_t)td->next; |
1536 | pci_pool_free(dev->data_requests, td, addr); | 1536 | pci_pool_free(dev->data_requests, td, addr); |
1537 | td->next = 0x00; | ||
1538 | addr = addr2; | 1537 | addr = addr2; |
1539 | } | 1538 | } |
1540 | req->chain_len = 1; | 1539 | req->chain_len = 1; |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f7481c4e2bc9..d9363713b7f1 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1071,7 +1071,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
1071 | 1071 | ||
1072 | return 1; | 1072 | return 1; |
1073 | fail: | 1073 | fail: |
1074 | 1074 | if (dev->eps[0].ring) | |
1075 | xhci_ring_free(xhci, dev->eps[0].ring); | ||
1075 | if (dev->in_ctx) | 1076 | if (dev->in_ctx) |
1076 | xhci_free_container_ctx(xhci, dev->in_ctx); | 1077 | xhci_free_container_ctx(xhci, dev->in_ctx); |
1077 | if (dev->out_ctx) | 1078 | if (dev->out_ctx) |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index e8f990642281..cbf3be66f89c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -185,6 +185,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
185 | xhci->quirks |= XHCI_BROKEN_STREAMS; | 185 | xhci->quirks |= XHCI_BROKEN_STREAMS; |
186 | } | 186 | } |
187 | if (pdev->vendor == PCI_VENDOR_ID_RENESAS && | 187 | if (pdev->vendor == PCI_VENDOR_ID_RENESAS && |
188 | pdev->device == 0x0014) | ||
189 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | ||
190 | if (pdev->vendor == PCI_VENDOR_ID_RENESAS && | ||
188 | pdev->device == 0x0015) | 191 | pdev->device == 0x0015) |
189 | xhci->quirks |= XHCI_RESET_ON_RESUME; | 192 | xhci->quirks |= XHCI_RESET_ON_RESUME; |
190 | if (pdev->vendor == PCI_VENDOR_ID_VIA) | 193 | if (pdev->vendor == PCI_VENDOR_ID_VIA) |
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index e01d353a5978..64e722c59a18 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
@@ -290,6 +290,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); | |||
290 | static struct platform_driver usb_xhci_driver = { | 290 | static struct platform_driver usb_xhci_driver = { |
291 | .probe = xhci_plat_probe, | 291 | .probe = xhci_plat_probe, |
292 | .remove = xhci_plat_remove, | 292 | .remove = xhci_plat_remove, |
293 | .shutdown = usb_hcd_platform_shutdown, | ||
293 | .driver = { | 294 | .driver = { |
294 | .name = "xhci-hcd", | 295 | .name = "xhci-hcd", |
295 | .pm = DEV_PM_OPS, | 296 | .pm = DEV_PM_OPS, |
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c index b45cb77c0744..9e8789877763 100644 --- a/drivers/usb/misc/usb3503.c +++ b/drivers/usb/misc/usb3503.c | |||
@@ -292,6 +292,8 @@ static int usb3503_probe(struct usb3503 *hub) | |||
292 | if (gpio_is_valid(hub->gpio_reset)) { | 292 | if (gpio_is_valid(hub->gpio_reset)) { |
293 | err = devm_gpio_request_one(dev, hub->gpio_reset, | 293 | err = devm_gpio_request_one(dev, hub->gpio_reset, |
294 | GPIOF_OUT_INIT_LOW, "usb3503 reset"); | 294 | GPIOF_OUT_INIT_LOW, "usb3503 reset"); |
295 | /* Datasheet defines a hardware reset to be at least 100us */ | ||
296 | usleep_range(100, 10000); | ||
295 | if (err) { | 297 | if (err) { |
296 | dev_err(dev, | 298 | dev_err(dev, |
297 | "unable to request GPIO %d as reset pin (%d)\n", | 299 | "unable to request GPIO %d as reset pin (%d)\n", |
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 3598f1a62673..251d123d9046 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c | |||
@@ -1001,7 +1001,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg | |||
1001 | break; | 1001 | break; |
1002 | 1002 | ||
1003 | case MON_IOCQ_RING_SIZE: | 1003 | case MON_IOCQ_RING_SIZE: |
1004 | mutex_lock(&rp->fetch_lock); | ||
1004 | ret = rp->b_size; | 1005 | ret = rp->b_size; |
1006 | mutex_unlock(&rp->fetch_lock); | ||
1005 | break; | 1007 | break; |
1006 | 1008 | ||
1007 | case MON_IOCT_RING_SIZE: | 1009 | case MON_IOCT_RING_SIZE: |
@@ -1228,12 +1230,16 @@ static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1228 | unsigned long offset, chunk_idx; | 1230 | unsigned long offset, chunk_idx; |
1229 | struct page *pageptr; | 1231 | struct page *pageptr; |
1230 | 1232 | ||
1233 | mutex_lock(&rp->fetch_lock); | ||
1231 | offset = vmf->pgoff << PAGE_SHIFT; | 1234 | offset = vmf->pgoff << PAGE_SHIFT; |
1232 | if (offset >= rp->b_size) | 1235 | if (offset >= rp->b_size) { |
1236 | mutex_unlock(&rp->fetch_lock); | ||
1233 | return VM_FAULT_SIGBUS; | 1237 | return VM_FAULT_SIGBUS; |
1238 | } | ||
1234 | chunk_idx = offset / CHUNK_SIZE; | 1239 | chunk_idx = offset / CHUNK_SIZE; |
1235 | pageptr = rp->b_vec[chunk_idx].pg; | 1240 | pageptr = rp->b_vec[chunk_idx].pg; |
1236 | get_page(pageptr); | 1241 | get_page(pageptr); |
1242 | mutex_unlock(&rp->fetch_lock); | ||
1237 | vmf->page = pageptr; | 1243 | vmf->page = pageptr; |
1238 | return 0; | 1244 | return 0; |
1239 | } | 1245 | } |
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index b2685e75a683..3eaa4ba6867d 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c | |||
@@ -348,7 +348,9 @@ static int ux500_suspend(struct device *dev) | |||
348 | struct ux500_glue *glue = dev_get_drvdata(dev); | 348 | struct ux500_glue *glue = dev_get_drvdata(dev); |
349 | struct musb *musb = glue_to_musb(glue); | 349 | struct musb *musb = glue_to_musb(glue); |
350 | 350 | ||
351 | usb_phy_set_suspend(musb->xceiv, 1); | 351 | if (musb) |
352 | usb_phy_set_suspend(musb->xceiv, 1); | ||
353 | |||
352 | clk_disable_unprepare(glue->clk); | 354 | clk_disable_unprepare(glue->clk); |
353 | 355 | ||
354 | return 0; | 356 | return 0; |
@@ -366,7 +368,8 @@ static int ux500_resume(struct device *dev) | |||
366 | return ret; | 368 | return ret; |
367 | } | 369 | } |
368 | 370 | ||
369 | usb_phy_set_suspend(musb->xceiv, 0); | 371 | if (musb) |
372 | usb_phy_set_suspend(musb->xceiv, 0); | ||
370 | 373 | ||
371 | return 0; | 374 | return 0; |
372 | } | 375 | } |
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index 56ecb8b5115d..584ae8cbaf1c 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig | |||
@@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE | |||
63 | - Google USB serial devices | 63 | - Google USB serial devices |
64 | - HP4x calculators | 64 | - HP4x calculators |
65 | - a number of Motorola phones | 65 | - a number of Motorola phones |
66 | - Motorola Tetra devices | ||
66 | - Novatel Wireless GPS receivers | 67 | - Novatel Wireless GPS receivers |
67 | - Siemens USB/MPI adapter. | 68 | - Siemens USB/MPI adapter. |
68 | - ViVOtech ViVOpay USB device. | 69 | - ViVOtech ViVOpay USB device. |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 1f5ecf905b7d..a4ab4fdf5ba3 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = { | |||
120 | { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ | 120 | { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ |
121 | { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ | 121 | { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ |
122 | { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ | 122 | { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ |
123 | { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ | ||
123 | { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ | 124 | { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ |
124 | { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ | 125 | { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ |
125 | { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ | 126 | { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ |
@@ -170,6 +171,7 @@ static const struct usb_device_id id_table[] = { | |||
170 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ | 171 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
171 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | 172 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
172 | { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ | 173 | { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ |
174 | { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */ | ||
173 | { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ | 175 | { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ |
174 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ | 176 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ |
175 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ | 177 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 30344efc123f..64fe9dc25ed4 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -1017,6 +1017,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
1017 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 1017 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
1018 | { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, | 1018 | { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, |
1019 | { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, | 1019 | { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, |
1020 | { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) }, | ||
1020 | { } /* Terminating entry */ | 1021 | { } /* Terminating entry */ |
1021 | }; | 1022 | }; |
1022 | 1023 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index f9d15bd62785..543d2801632b 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -914,6 +914,12 @@ | |||
914 | #define ICPDAS_I7563U_PID 0x0105 | 914 | #define ICPDAS_I7563U_PID 0x0105 |
915 | 915 | ||
916 | /* | 916 | /* |
917 | * Airbus Defence and Space | ||
918 | */ | ||
919 | #define AIRBUS_DS_VID 0x1e8e /* Vendor ID */ | ||
920 | #define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */ | ||
921 | |||
922 | /* | ||
917 | * RT Systems programming cables for various ham radios | 923 | * RT Systems programming cables for various ham radios |
918 | */ | 924 | */ |
919 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ | 925 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ |
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 749e1b674145..6947985ccfb0 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c | |||
@@ -2219,7 +2219,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port, | |||
2219 | /* something went wrong */ | 2219 | /* something went wrong */ |
2220 | dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n", | 2220 | dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n", |
2221 | __func__, status); | 2221 | __func__, status); |
2222 | usb_kill_urb(urb); | ||
2223 | usb_free_urb(urb); | 2222 | usb_free_urb(urb); |
2224 | atomic_dec(&CmdUrbs); | 2223 | atomic_dec(&CmdUrbs); |
2225 | return status; | 2224 | return status; |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ffa8ec917ff5..1799aa058a5b 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb); | |||
236 | /* These Quectel products use Qualcomm's vendor ID */ | 236 | /* These Quectel products use Qualcomm's vendor ID */ |
237 | #define QUECTEL_PRODUCT_UC20 0x9003 | 237 | #define QUECTEL_PRODUCT_UC20 0x9003 |
238 | #define QUECTEL_PRODUCT_UC15 0x9090 | 238 | #define QUECTEL_PRODUCT_UC15 0x9090 |
239 | /* These Yuga products use Qualcomm's vendor ID */ | ||
240 | #define YUGA_PRODUCT_CLM920_NC5 0x9625 | ||
239 | 241 | ||
240 | #define QUECTEL_VENDOR_ID 0x2c7c | 242 | #define QUECTEL_VENDOR_ID 0x2c7c |
241 | /* These Quectel products use Quectel's vendor ID */ | 243 | /* These Quectel products use Quectel's vendor ID */ |
@@ -283,6 +285,7 @@ static void option_instat_callback(struct urb *urb); | |||
283 | #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 | 285 | #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 |
284 | #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 | 286 | #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 |
285 | #define TELIT_PRODUCT_ME910 0x1100 | 287 | #define TELIT_PRODUCT_ME910 0x1100 |
288 | #define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101 | ||
286 | #define TELIT_PRODUCT_LE920 0x1200 | 289 | #define TELIT_PRODUCT_LE920 0x1200 |
287 | #define TELIT_PRODUCT_LE910 0x1201 | 290 | #define TELIT_PRODUCT_LE910 0x1201 |
288 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 | 291 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 |
@@ -380,6 +383,9 @@ static void option_instat_callback(struct urb *urb); | |||
380 | #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 | 383 | #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 |
381 | #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 | 384 | #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 |
382 | 385 | ||
386 | /* Fujisoft products */ | ||
387 | #define FUJISOFT_PRODUCT_FS040U 0x9b02 | ||
388 | |||
383 | /* iBall 3.5G connect wireless modem */ | 389 | /* iBall 3.5G connect wireless modem */ |
384 | #define IBALL_3_5G_CONNECT 0x9605 | 390 | #define IBALL_3_5G_CONNECT 0x9605 |
385 | 391 | ||
@@ -648,6 +654,11 @@ static const struct option_blacklist_info telit_me910_blacklist = { | |||
648 | .reserved = BIT(1) | BIT(3), | 654 | .reserved = BIT(1) | BIT(3), |
649 | }; | 655 | }; |
650 | 656 | ||
657 | static const struct option_blacklist_info telit_me910_dual_modem_blacklist = { | ||
658 | .sendsetup = BIT(0), | ||
659 | .reserved = BIT(3), | ||
660 | }; | ||
661 | |||
651 | static const struct option_blacklist_info telit_le910_blacklist = { | 662 | static const struct option_blacklist_info telit_le910_blacklist = { |
652 | .sendsetup = BIT(0), | 663 | .sendsetup = BIT(0), |
653 | .reserved = BIT(1) | BIT(2), | 664 | .reserved = BIT(1) | BIT(2), |
@@ -677,6 +688,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = { | |||
677 | .reserved = BIT(4) | BIT(5), | 688 | .reserved = BIT(4) | BIT(5), |
678 | }; | 689 | }; |
679 | 690 | ||
691 | static const struct option_blacklist_info yuga_clm920_nc5_blacklist = { | ||
692 | .reserved = BIT(1) | BIT(4), | ||
693 | }; | ||
694 | |||
680 | static const struct usb_device_id option_ids[] = { | 695 | static const struct usb_device_id option_ids[] = { |
681 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 696 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
682 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 697 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
@@ -1181,6 +1196,9 @@ static const struct usb_device_id option_ids[] = { | |||
1181 | { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, | 1196 | { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, |
1182 | { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), | 1197 | { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), |
1183 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1198 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1199 | /* Yuga products use Qualcomm vendor ID */ | ||
1200 | { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5), | ||
1201 | .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist }, | ||
1184 | /* Quectel products using Quectel vendor ID */ | 1202 | /* Quectel products using Quectel vendor ID */ |
1185 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), | 1203 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), |
1186 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1204 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
@@ -1247,6 +1265,8 @@ static const struct usb_device_id option_ids[] = { | |||
1247 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, | 1265 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, |
1248 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), | 1266 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), |
1249 | .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, | 1267 | .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, |
1268 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), | ||
1269 | .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist }, | ||
1250 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), | 1270 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), |
1251 | .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, | 1271 | .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, |
1252 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), | 1272 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), |
@@ -1880,6 +1900,8 @@ static const struct usb_device_id option_ids[] = { | |||
1880 | { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), | 1900 | { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), |
1881 | .driver_info = (kernel_ulong_t)&four_g_w100_blacklist | 1901 | .driver_info = (kernel_ulong_t)&four_g_w100_blacklist |
1882 | }, | 1902 | }, |
1903 | {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U), | ||
1904 | .driver_info = (kernel_ulong_t)&net_intf3_blacklist}, | ||
1883 | { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, | 1905 | { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, |
1884 | { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), | 1906 | { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), |
1885 | .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, | 1907 | .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index a51b28379850..3da25ad267a2 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -39,6 +39,7 @@ static const struct usb_device_id id_table[] = { | |||
39 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, | 39 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, |
40 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, | 40 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, |
41 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) }, | 41 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) }, |
42 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) }, | ||
42 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) }, | 43 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) }, |
43 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) }, | 44 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) }, |
44 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, | 45 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, |
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 3b5a15d1dc0d..123289085ee2 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #define PL2303_PRODUCT_ID_DCU11 0x1234 | 17 | #define PL2303_PRODUCT_ID_DCU11 0x1234 |
18 | #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 | 18 | #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 |
19 | #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2 | 19 | #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2 |
20 | #define PL2303_PRODUCT_ID_CHILITAG 0xaaa8 | ||
20 | #define PL2303_PRODUCT_ID_ALDIGA 0x0611 | 21 | #define PL2303_PRODUCT_ID_ALDIGA 0x0611 |
21 | #define PL2303_PRODUCT_ID_MMX 0x0612 | 22 | #define PL2303_PRODUCT_ID_MMX 0x0612 |
22 | #define PL2303_PRODUCT_ID_GPRS 0x0609 | 23 | #define PL2303_PRODUCT_ID_GPRS 0x0609 |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 4516291df1b8..fb6dc16c754a 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = { | |||
166 | {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ | 166 | {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ |
167 | {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ | 167 | {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ |
168 | {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ | 168 | {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ |
169 | {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ | ||
170 | {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ | ||
169 | {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ | 171 | {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ |
170 | {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ | 172 | {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ |
171 | {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ | 173 | {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ |
@@ -346,6 +348,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
346 | break; | 348 | break; |
347 | case 2: | 349 | case 2: |
348 | dev_dbg(dev, "NMEA GPS interface found\n"); | 350 | dev_dbg(dev, "NMEA GPS interface found\n"); |
351 | sendsetup = true; | ||
349 | break; | 352 | break; |
350 | case 3: | 353 | case 3: |
351 | dev_dbg(dev, "Modem port found\n"); | 354 | dev_dbg(dev, "Modem port found\n"); |
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index e98b6e57b703..6aa7ff2c1cf7 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c | |||
@@ -80,6 +80,11 @@ DEVICE(vivopay, VIVOPAY_IDS); | |||
80 | { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */ | 80 | { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */ |
81 | DEVICE(moto_modem, MOTO_IDS); | 81 | DEVICE(moto_modem, MOTO_IDS); |
82 | 82 | ||
83 | /* Motorola Tetra driver */ | ||
84 | #define MOTOROLA_TETRA_IDS() \ | ||
85 | { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */ | ||
86 | DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); | ||
87 | |||
83 | /* Novatel Wireless GPS driver */ | 88 | /* Novatel Wireless GPS driver */ |
84 | #define NOVATEL_IDS() \ | 89 | #define NOVATEL_IDS() \ |
85 | { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ | 90 | { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ |
@@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = { | |||
110 | &google_device, | 115 | &google_device, |
111 | &vivopay_device, | 116 | &vivopay_device, |
112 | &moto_modem_device, | 117 | &moto_modem_device, |
118 | &motorola_tetra_device, | ||
113 | &novatel_gps_device, | 119 | &novatel_gps_device, |
114 | &hp4x_device, | 120 | &hp4x_device, |
115 | &suunto_device, | 121 | &suunto_device, |
@@ -125,6 +131,7 @@ static const struct usb_device_id id_table[] = { | |||
125 | GOOGLE_IDS(), | 131 | GOOGLE_IDS(), |
126 | VIVOPAY_IDS(), | 132 | VIVOPAY_IDS(), |
127 | MOTO_IDS(), | 133 | MOTO_IDS(), |
134 | MOTOROLA_TETRA_IDS(), | ||
128 | NOVATEL_IDS(), | 135 | NOVATEL_IDS(), |
129 | HP4X_IDS(), | 136 | HP4X_IDS(), |
130 | SUUNTO_IDS(), | 137 | SUUNTO_IDS(), |
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index f952635ebe5f..de7214ae4fed 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
@@ -1052,20 +1052,19 @@ static int uas_post_reset(struct usb_interface *intf) | |||
1052 | return 0; | 1052 | return 0; |
1053 | 1053 | ||
1054 | err = uas_configure_endpoints(devinfo); | 1054 | err = uas_configure_endpoints(devinfo); |
1055 | if (err) { | 1055 | if (err && err != ENODEV) |
1056 | shost_printk(KERN_ERR, shost, | 1056 | shost_printk(KERN_ERR, shost, |
1057 | "%s: alloc streams error %d after reset", | 1057 | "%s: alloc streams error %d after reset", |
1058 | __func__, err); | 1058 | __func__, err); |
1059 | return 1; | ||
1060 | } | ||
1061 | 1059 | ||
1060 | /* we must unblock the host in every case lest we deadlock */ | ||
1062 | spin_lock_irqsave(shost->host_lock, flags); | 1061 | spin_lock_irqsave(shost->host_lock, flags); |
1063 | scsi_report_bus_reset(shost, 0); | 1062 | scsi_report_bus_reset(shost, 0); |
1064 | spin_unlock_irqrestore(shost->host_lock, flags); | 1063 | spin_unlock_irqrestore(shost->host_lock, flags); |
1065 | 1064 | ||
1066 | scsi_unblock_requests(shost); | 1065 | scsi_unblock_requests(shost); |
1067 | 1066 | ||
1068 | return 0; | 1067 | return err ? 1 : 0; |
1069 | } | 1068 | } |
1070 | 1069 | ||
1071 | static int uas_suspend(struct usb_interface *intf, pm_message_t message) | 1070 | static int uas_suspend(struct usb_interface *intf, pm_message_t message) |
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 2f80163ffb94..8ed80f28416f 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
@@ -155,6 +155,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, | |||
155 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 155 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
156 | US_FL_NO_ATA_1X), | 156 | US_FL_NO_ATA_1X), |
157 | 157 | ||
158 | /* Reported-by: Icenowy Zheng <icenowy@aosc.io> */ | ||
159 | UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999, | ||
160 | "Norelsys", | ||
161 | "NS1068X", | ||
162 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
163 | US_FL_IGNORE_UAS), | ||
164 | |||
158 | /* Reported-by: Takeo Nakayama <javhera@gmx.com> */ | 165 | /* Reported-by: Takeo Nakayama <javhera@gmx.com> */ |
159 | UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, | 166 | UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, |
160 | "JMicron", | 167 | "JMicron", |
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index a3ec49bdc1e6..ec38370ffcab 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c | |||
@@ -163,8 +163,7 @@ static void stub_shutdown_connection(struct usbip_device *ud) | |||
163 | * step 1? | 163 | * step 1? |
164 | */ | 164 | */ |
165 | if (ud->tcp_socket) { | 165 | if (ud->tcp_socket) { |
166 | dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n", | 166 | dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd); |
167 | ud->tcp_socket); | ||
168 | kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); | 167 | kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); |
169 | } | 168 | } |
170 | 169 | ||
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index af10f7b131a4..325b4c05acdd 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c | |||
@@ -252,11 +252,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev) | |||
252 | struct stub_priv *priv; | 252 | struct stub_priv *priv; |
253 | struct urb *urb; | 253 | struct urb *urb; |
254 | 254 | ||
255 | dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev); | 255 | dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n"); |
256 | 256 | ||
257 | while ((priv = stub_priv_pop(sdev))) { | 257 | while ((priv = stub_priv_pop(sdev))) { |
258 | urb = priv->urb; | 258 | urb = priv->urb; |
259 | dev_dbg(&sdev->udev->dev, "free urb %p\n", urb); | 259 | dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n", |
260 | priv->seqnum); | ||
260 | usb_kill_urb(urb); | 261 | usb_kill_urb(urb); |
261 | 262 | ||
262 | kmem_cache_free(stub_priv_cache, priv); | 263 | kmem_cache_free(stub_priv_cache, priv); |
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 00e475c51a12..56cacb68040c 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c | |||
@@ -230,9 +230,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, | |||
230 | if (priv->seqnum != pdu->u.cmd_unlink.seqnum) | 230 | if (priv->seqnum != pdu->u.cmd_unlink.seqnum) |
231 | continue; | 231 | continue; |
232 | 232 | ||
233 | dev_info(&priv->urb->dev->dev, "unlink urb %p\n", | ||
234 | priv->urb); | ||
235 | |||
236 | /* | 233 | /* |
237 | * This matched urb is not completed yet (i.e., be in | 234 | * This matched urb is not completed yet (i.e., be in |
238 | * flight in usb hcd hardware/driver). Now we are | 235 | * flight in usb hcd hardware/driver). Now we are |
@@ -271,8 +268,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, | |||
271 | ret = usb_unlink_urb(priv->urb); | 268 | ret = usb_unlink_urb(priv->urb); |
272 | if (ret != -EINPROGRESS) | 269 | if (ret != -EINPROGRESS) |
273 | dev_err(&priv->urb->dev->dev, | 270 | dev_err(&priv->urb->dev->dev, |
274 | "failed to unlink a urb %p, ret %d\n", | 271 | "failed to unlink a urb # %lu, ret %d\n", |
275 | priv->urb, ret); | 272 | priv->seqnum, ret); |
276 | 273 | ||
277 | return 0; | 274 | return 0; |
278 | } | 275 | } |
@@ -341,23 +338,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev, | |||
341 | return priv; | 338 | return priv; |
342 | } | 339 | } |
343 | 340 | ||
344 | static int get_pipe(struct stub_device *sdev, int epnum, int dir) | 341 | static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) |
345 | { | 342 | { |
346 | struct usb_device *udev = sdev->udev; | 343 | struct usb_device *udev = sdev->udev; |
347 | struct usb_host_endpoint *ep; | 344 | struct usb_host_endpoint *ep; |
348 | struct usb_endpoint_descriptor *epd = NULL; | 345 | struct usb_endpoint_descriptor *epd = NULL; |
346 | int epnum = pdu->base.ep; | ||
347 | int dir = pdu->base.direction; | ||
348 | |||
349 | if (epnum < 0 || epnum > 15) | ||
350 | goto err_ret; | ||
349 | 351 | ||
350 | if (dir == USBIP_DIR_IN) | 352 | if (dir == USBIP_DIR_IN) |
351 | ep = udev->ep_in[epnum & 0x7f]; | 353 | ep = udev->ep_in[epnum & 0x7f]; |
352 | else | 354 | else |
353 | ep = udev->ep_out[epnum & 0x7f]; | 355 | ep = udev->ep_out[epnum & 0x7f]; |
354 | if (!ep) { | 356 | if (!ep) |
355 | dev_err(&sdev->interface->dev, "no such endpoint?, %d\n", | 357 | goto err_ret; |
356 | epnum); | ||
357 | BUG(); | ||
358 | } | ||
359 | 358 | ||
360 | epd = &ep->desc; | 359 | epd = &ep->desc; |
360 | |||
361 | if (usb_endpoint_xfer_control(epd)) { | 361 | if (usb_endpoint_xfer_control(epd)) { |
362 | if (dir == USBIP_DIR_OUT) | 362 | if (dir == USBIP_DIR_OUT) |
363 | return usb_sndctrlpipe(udev, epnum); | 363 | return usb_sndctrlpipe(udev, epnum); |
@@ -380,15 +380,37 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir) | |||
380 | } | 380 | } |
381 | 381 | ||
382 | if (usb_endpoint_xfer_isoc(epd)) { | 382 | if (usb_endpoint_xfer_isoc(epd)) { |
383 | /* validate packet size and number of packets */ | ||
384 | unsigned int maxp, packets, bytes; | ||
385 | |||
386 | #define USB_EP_MAXP_MULT_SHIFT 11 | ||
387 | #define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT) | ||
388 | #define USB_EP_MAXP_MULT(m) \ | ||
389 | (((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT) | ||
390 | |||
391 | maxp = usb_endpoint_maxp(epd); | ||
392 | maxp *= (USB_EP_MAXP_MULT( | ||
393 | __le16_to_cpu(epd->wMaxPacketSize)) + 1); | ||
394 | bytes = pdu->u.cmd_submit.transfer_buffer_length; | ||
395 | packets = DIV_ROUND_UP(bytes, maxp); | ||
396 | |||
397 | if (pdu->u.cmd_submit.number_of_packets < 0 || | ||
398 | pdu->u.cmd_submit.number_of_packets > packets) { | ||
399 | dev_err(&sdev->udev->dev, | ||
400 | "CMD_SUBMIT: isoc invalid num packets %d\n", | ||
401 | pdu->u.cmd_submit.number_of_packets); | ||
402 | return -1; | ||
403 | } | ||
383 | if (dir == USBIP_DIR_OUT) | 404 | if (dir == USBIP_DIR_OUT) |
384 | return usb_sndisocpipe(udev, epnum); | 405 | return usb_sndisocpipe(udev, epnum); |
385 | else | 406 | else |
386 | return usb_rcvisocpipe(udev, epnum); | 407 | return usb_rcvisocpipe(udev, epnum); |
387 | } | 408 | } |
388 | 409 | ||
410 | err_ret: | ||
389 | /* NOT REACHED */ | 411 | /* NOT REACHED */ |
390 | dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum); | 412 | dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum); |
391 | return 0; | 413 | return -1; |
392 | } | 414 | } |
393 | 415 | ||
394 | static void masking_bogus_flags(struct urb *urb) | 416 | static void masking_bogus_flags(struct urb *urb) |
@@ -452,7 +474,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, | |||
452 | struct stub_priv *priv; | 474 | struct stub_priv *priv; |
453 | struct usbip_device *ud = &sdev->ud; | 475 | struct usbip_device *ud = &sdev->ud; |
454 | struct usb_device *udev = sdev->udev; | 476 | struct usb_device *udev = sdev->udev; |
455 | int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); | 477 | int pipe = get_pipe(sdev, pdu); |
478 | |||
479 | if (pipe == -1) | ||
480 | return; | ||
456 | 481 | ||
457 | priv = stub_priv_alloc(sdev, pdu); | 482 | priv = stub_priv_alloc(sdev, pdu); |
458 | if (!priv) | 483 | if (!priv) |
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index af858d52608a..f4dd30c56f36 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c | |||
@@ -201,8 +201,8 @@ static int stub_send_ret_submit(struct stub_device *sdev) | |||
201 | 201 | ||
202 | /* 1. setup usbip_header */ | 202 | /* 1. setup usbip_header */ |
203 | setup_ret_submit_pdu(&pdu_header, urb); | 203 | setup_ret_submit_pdu(&pdu_header, urb); |
204 | usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", | 204 | usbip_dbg_stub_tx("setup txdata seqnum: %d\n", |
205 | pdu_header.base.seqnum, urb); | 205 | pdu_header.base.seqnum); |
206 | usbip_header_correct_endian(&pdu_header, 1); | 206 | usbip_header_correct_endian(&pdu_header, 1); |
207 | 207 | ||
208 | iov[iovnum].iov_base = &pdu_header; | 208 | iov[iovnum].iov_base = &pdu_header; |
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index e40da7759a0e..1838f1b2c2fa 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c | |||
@@ -103,7 +103,7 @@ static void usbip_dump_usb_device(struct usb_device *udev) | |||
103 | dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", | 103 | dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", |
104 | udev->devnum, udev->devpath, usb_speed_string(udev->speed)); | 104 | udev->devnum, udev->devpath, usb_speed_string(udev->speed)); |
105 | 105 | ||
106 | pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport); | 106 | pr_debug("tt hub ttport %d\n", udev->ttport); |
107 | 107 | ||
108 | dev_dbg(dev, " "); | 108 | dev_dbg(dev, " "); |
109 | for (i = 0; i < 16; i++) | 109 | for (i = 0; i < 16; i++) |
@@ -136,12 +136,8 @@ static void usbip_dump_usb_device(struct usb_device *udev) | |||
136 | } | 136 | } |
137 | pr_debug("\n"); | 137 | pr_debug("\n"); |
138 | 138 | ||
139 | dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus); | 139 | dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev), |
140 | 140 | udev->bus->bus_name); | |
141 | dev_dbg(dev, | ||
142 | "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n", | ||
143 | &udev->descriptor, udev->config, | ||
144 | udev->actconfig, udev->rawdescriptors); | ||
145 | 141 | ||
146 | dev_dbg(dev, "have_langid %d, string_langid %d\n", | 142 | dev_dbg(dev, "have_langid %d, string_langid %d\n", |
147 | udev->have_langid, udev->string_langid); | 143 | udev->have_langid, udev->string_langid); |
@@ -249,9 +245,6 @@ void usbip_dump_urb(struct urb *urb) | |||
249 | 245 | ||
250 | dev = &urb->dev->dev; | 246 | dev = &urb->dev->dev; |
251 | 247 | ||
252 | dev_dbg(dev, " urb :%p\n", urb); | ||
253 | dev_dbg(dev, " dev :%p\n", urb->dev); | ||
254 | |||
255 | usbip_dump_usb_device(urb->dev); | 248 | usbip_dump_usb_device(urb->dev); |
256 | 249 | ||
257 | dev_dbg(dev, " pipe :%08x ", urb->pipe); | 250 | dev_dbg(dev, " pipe :%08x ", urb->pipe); |
@@ -260,11 +253,9 @@ void usbip_dump_urb(struct urb *urb) | |||
260 | 253 | ||
261 | dev_dbg(dev, " status :%d\n", urb->status); | 254 | dev_dbg(dev, " status :%d\n", urb->status); |
262 | dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); | 255 | dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); |
263 | dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer); | ||
264 | dev_dbg(dev, " transfer_buffer_length:%d\n", | 256 | dev_dbg(dev, " transfer_buffer_length:%d\n", |
265 | urb->transfer_buffer_length); | 257 | urb->transfer_buffer_length); |
266 | dev_dbg(dev, " actual_length :%d\n", urb->actual_length); | 258 | dev_dbg(dev, " actual_length :%d\n", urb->actual_length); |
267 | dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet); | ||
268 | 259 | ||
269 | if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) | 260 | if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) |
270 | usbip_dump_usb_ctrlrequest( | 261 | usbip_dump_usb_ctrlrequest( |
@@ -274,8 +265,6 @@ void usbip_dump_urb(struct urb *urb) | |||
274 | dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); | 265 | dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); |
275 | dev_dbg(dev, " interval :%d\n", urb->interval); | 266 | dev_dbg(dev, " interval :%d\n", urb->interval); |
276 | dev_dbg(dev, " error_count :%d\n", urb->error_count); | 267 | dev_dbg(dev, " error_count :%d\n", urb->error_count); |
277 | dev_dbg(dev, " context :%p\n", urb->context); | ||
278 | dev_dbg(dev, " complete :%p\n", urb->complete); | ||
279 | } | 268 | } |
280 | EXPORT_SYMBOL_GPL(usbip_dump_urb); | 269 | EXPORT_SYMBOL_GPL(usbip_dump_urb); |
281 | 270 | ||
@@ -328,18 +317,14 @@ int usbip_recv(struct socket *sock, void *buf, int size) | |||
328 | struct msghdr msg; | 317 | struct msghdr msg; |
329 | struct kvec iov; | 318 | struct kvec iov; |
330 | int total = 0; | 319 | int total = 0; |
331 | |||
332 | /* for blocks of if (usbip_dbg_flag_xmit) */ | 320 | /* for blocks of if (usbip_dbg_flag_xmit) */ |
333 | char *bp = buf; | 321 | char *bp = buf; |
334 | int osize = size; | 322 | int osize = size; |
335 | 323 | ||
336 | usbip_dbg_xmit("enter\n"); | 324 | if (!sock || !buf || !size) |
337 | |||
338 | if (!sock || !buf || !size) { | ||
339 | pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf, | ||
340 | size); | ||
341 | return -EINVAL; | 325 | return -EINVAL; |
342 | } | 326 | |
327 | usbip_dbg_xmit("enter\n"); | ||
343 | 328 | ||
344 | do { | 329 | do { |
345 | sock->sk->sk_allocation = GFP_NOIO; | 330 | sock->sk->sk_allocation = GFP_NOIO; |
@@ -352,11 +337,8 @@ int usbip_recv(struct socket *sock, void *buf, int size) | |||
352 | msg.msg_flags = MSG_NOSIGNAL; | 337 | msg.msg_flags = MSG_NOSIGNAL; |
353 | 338 | ||
354 | result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL); | 339 | result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL); |
355 | if (result <= 0) { | 340 | if (result <= 0) |
356 | pr_debug("receive sock %p buf %p size %u ret %d total %d\n", | ||
357 | sock, buf, size, result, total); | ||
358 | goto err; | 341 | goto err; |
359 | } | ||
360 | 342 | ||
361 | size -= result; | 343 | size -= result; |
362 | buf += result; | 344 | buf += result; |
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 86b08475c254..f875ccaa55f9 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h | |||
@@ -261,6 +261,7 @@ struct usbip_device { | |||
261 | /* lock for status */ | 261 | /* lock for status */ |
262 | spinlock_t lock; | 262 | spinlock_t lock; |
263 | 263 | ||
264 | int sockfd; | ||
264 | struct socket *tcp_socket; | 265 | struct socket *tcp_socket; |
265 | 266 | ||
266 | struct task_struct *tcp_rx; | 267 | struct task_struct *tcp_rx; |
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c index 64933b993d7a..2580a32bcdff 100644 --- a/drivers/usb/usbip/usbip_event.c +++ b/drivers/usb/usbip/usbip_event.c | |||
@@ -117,11 +117,12 @@ EXPORT_SYMBOL_GPL(usbip_event_add); | |||
117 | int usbip_event_happened(struct usbip_device *ud) | 117 | int usbip_event_happened(struct usbip_device *ud) |
118 | { | 118 | { |
119 | int happened = 0; | 119 | int happened = 0; |
120 | unsigned long flags; | ||
120 | 121 | ||
121 | spin_lock(&ud->lock); | 122 | spin_lock_irqsave(&ud->lock, flags); |
122 | if (ud->event != 0) | 123 | if (ud->event != 0) |
123 | happened = 1; | 124 | happened = 1; |
124 | spin_unlock(&ud->lock); | 125 | spin_unlock_irqrestore(&ud->lock, flags); |
125 | 126 | ||
126 | return happened; | 127 | return happened; |
127 | } | 128 | } |
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 81b2b9f808b5..2d96bfd34138 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c | |||
@@ -121,9 +121,11 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status) | |||
121 | 121 | ||
122 | void rh_port_connect(int rhport, enum usb_device_speed speed) | 122 | void rh_port_connect(int rhport, enum usb_device_speed speed) |
123 | { | 123 | { |
124 | unsigned long flags; | ||
125 | |||
124 | usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport); | 126 | usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport); |
125 | 127 | ||
126 | spin_lock(&the_controller->lock); | 128 | spin_lock_irqsave(&the_controller->lock, flags); |
127 | 129 | ||
128 | the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION | 130 | the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION |
129 | | (1 << USB_PORT_FEAT_C_CONNECTION); | 131 | | (1 << USB_PORT_FEAT_C_CONNECTION); |
@@ -139,22 +141,24 @@ void rh_port_connect(int rhport, enum usb_device_speed speed) | |||
139 | break; | 141 | break; |
140 | } | 142 | } |
141 | 143 | ||
142 | spin_unlock(&the_controller->lock); | 144 | spin_unlock_irqrestore(&the_controller->lock, flags); |
143 | 145 | ||
144 | usb_hcd_poll_rh_status(vhci_to_hcd(the_controller)); | 146 | usb_hcd_poll_rh_status(vhci_to_hcd(the_controller)); |
145 | } | 147 | } |
146 | 148 | ||
147 | static void rh_port_disconnect(int rhport) | 149 | static void rh_port_disconnect(int rhport) |
148 | { | 150 | { |
151 | unsigned long flags; | ||
152 | |||
149 | usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport); | 153 | usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport); |
150 | 154 | ||
151 | spin_lock(&the_controller->lock); | 155 | spin_lock_irqsave(&the_controller->lock, flags); |
152 | 156 | ||
153 | the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION; | 157 | the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION; |
154 | the_controller->port_status[rhport] |= | 158 | the_controller->port_status[rhport] |= |
155 | (1 << USB_PORT_FEAT_C_CONNECTION); | 159 | (1 << USB_PORT_FEAT_C_CONNECTION); |
156 | 160 | ||
157 | spin_unlock(&the_controller->lock); | 161 | spin_unlock_irqrestore(&the_controller->lock, flags); |
158 | usb_hcd_poll_rh_status(vhci_to_hcd(the_controller)); | 162 | usb_hcd_poll_rh_status(vhci_to_hcd(the_controller)); |
159 | } | 163 | } |
160 | 164 | ||
@@ -182,13 +186,14 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf) | |||
182 | int retval; | 186 | int retval; |
183 | int rhport; | 187 | int rhport; |
184 | int changed = 0; | 188 | int changed = 0; |
189 | unsigned long flags; | ||
185 | 190 | ||
186 | retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8); | 191 | retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8); |
187 | memset(buf, 0, retval); | 192 | memset(buf, 0, retval); |
188 | 193 | ||
189 | vhci = hcd_to_vhci(hcd); | 194 | vhci = hcd_to_vhci(hcd); |
190 | 195 | ||
191 | spin_lock(&vhci->lock); | 196 | spin_lock_irqsave(&vhci->lock, flags); |
192 | if (!HCD_HW_ACCESSIBLE(hcd)) { | 197 | if (!HCD_HW_ACCESSIBLE(hcd)) { |
193 | usbip_dbg_vhci_rh("hw accessible flag not on?\n"); | 198 | usbip_dbg_vhci_rh("hw accessible flag not on?\n"); |
194 | goto done; | 199 | goto done; |
@@ -209,7 +214,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf) | |||
209 | usb_hcd_resume_root_hub(hcd); | 214 | usb_hcd_resume_root_hub(hcd); |
210 | 215 | ||
211 | done: | 216 | done: |
212 | spin_unlock(&vhci->lock); | 217 | spin_unlock_irqrestore(&vhci->lock, flags); |
213 | return changed ? retval : 0; | 218 | return changed ? retval : 0; |
214 | } | 219 | } |
215 | 220 | ||
@@ -236,6 +241,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
236 | struct vhci_hcd *dum; | 241 | struct vhci_hcd *dum; |
237 | int retval = 0; | 242 | int retval = 0; |
238 | int rhport; | 243 | int rhport; |
244 | unsigned long flags; | ||
239 | 245 | ||
240 | u32 prev_port_status[VHCI_NPORTS]; | 246 | u32 prev_port_status[VHCI_NPORTS]; |
241 | 247 | ||
@@ -254,7 +260,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
254 | 260 | ||
255 | dum = hcd_to_vhci(hcd); | 261 | dum = hcd_to_vhci(hcd); |
256 | 262 | ||
257 | spin_lock(&dum->lock); | 263 | spin_lock_irqsave(&dum->lock, flags); |
258 | 264 | ||
259 | /* store old status and compare now and old later */ | 265 | /* store old status and compare now and old later */ |
260 | if (usbip_dbg_flag_vhci_rh) { | 266 | if (usbip_dbg_flag_vhci_rh) { |
@@ -279,7 +285,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
279 | case USB_PORT_FEAT_POWER: | 285 | case USB_PORT_FEAT_POWER: |
280 | usbip_dbg_vhci_rh( | 286 | usbip_dbg_vhci_rh( |
281 | " ClearPortFeature: USB_PORT_FEAT_POWER\n"); | 287 | " ClearPortFeature: USB_PORT_FEAT_POWER\n"); |
282 | dum->port_status[rhport] = 0; | 288 | dum->port_status[rhport] &= ~USB_PORT_STAT_POWER; |
283 | dum->resuming = 0; | 289 | dum->resuming = 0; |
284 | break; | 290 | break; |
285 | case USB_PORT_FEAT_C_RESET: | 291 | case USB_PORT_FEAT_C_RESET: |
@@ -408,7 +414,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
408 | } | 414 | } |
409 | usbip_dbg_vhci_rh(" bye\n"); | 415 | usbip_dbg_vhci_rh(" bye\n"); |
410 | 416 | ||
411 | spin_unlock(&dum->lock); | 417 | spin_unlock_irqrestore(&dum->lock, flags); |
412 | 418 | ||
413 | return retval; | 419 | return retval; |
414 | } | 420 | } |
@@ -431,6 +437,7 @@ static void vhci_tx_urb(struct urb *urb) | |||
431 | { | 437 | { |
432 | struct vhci_device *vdev = get_vdev(urb->dev); | 438 | struct vhci_device *vdev = get_vdev(urb->dev); |
433 | struct vhci_priv *priv; | 439 | struct vhci_priv *priv; |
440 | unsigned long flags; | ||
434 | 441 | ||
435 | if (!vdev) { | 442 | if (!vdev) { |
436 | pr_err("could not get virtual device"); | 443 | pr_err("could not get virtual device"); |
@@ -443,7 +450,7 @@ static void vhci_tx_urb(struct urb *urb) | |||
443 | return; | 450 | return; |
444 | } | 451 | } |
445 | 452 | ||
446 | spin_lock(&vdev->priv_lock); | 453 | spin_lock_irqsave(&vdev->priv_lock, flags); |
447 | 454 | ||
448 | priv->seqnum = atomic_inc_return(&the_controller->seqnum); | 455 | priv->seqnum = atomic_inc_return(&the_controller->seqnum); |
449 | if (priv->seqnum == 0xffff) | 456 | if (priv->seqnum == 0xffff) |
@@ -457,7 +464,7 @@ static void vhci_tx_urb(struct urb *urb) | |||
457 | list_add_tail(&priv->list, &vdev->priv_tx); | 464 | list_add_tail(&priv->list, &vdev->priv_tx); |
458 | 465 | ||
459 | wake_up(&vdev->waitq_tx); | 466 | wake_up(&vdev->waitq_tx); |
460 | spin_unlock(&vdev->priv_lock); | 467 | spin_unlock_irqrestore(&vdev->priv_lock, flags); |
461 | } | 468 | } |
462 | 469 | ||
463 | static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | 470 | static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, |
@@ -466,18 +473,16 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |||
466 | struct device *dev = &urb->dev->dev; | 473 | struct device *dev = &urb->dev->dev; |
467 | int ret = 0; | 474 | int ret = 0; |
468 | struct vhci_device *vdev; | 475 | struct vhci_device *vdev; |
469 | 476 | unsigned long flags; | |
470 | usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n", | ||
471 | hcd, urb, mem_flags); | ||
472 | 477 | ||
473 | /* patch to usb_sg_init() is in 2.5.60 */ | 478 | /* patch to usb_sg_init() is in 2.5.60 */ |
474 | BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length); | 479 | BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length); |
475 | 480 | ||
476 | spin_lock(&the_controller->lock); | 481 | spin_lock_irqsave(&the_controller->lock, flags); |
477 | 482 | ||
478 | if (urb->status != -EINPROGRESS) { | 483 | if (urb->status != -EINPROGRESS) { |
479 | dev_err(dev, "URB already unlinked!, status %d\n", urb->status); | 484 | dev_err(dev, "URB already unlinked!, status %d\n", urb->status); |
480 | spin_unlock(&the_controller->lock); | 485 | spin_unlock_irqrestore(&the_controller->lock, flags); |
481 | return urb->status; | 486 | return urb->status; |
482 | } | 487 | } |
483 | 488 | ||
@@ -489,7 +494,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |||
489 | vdev->ud.status == VDEV_ST_ERROR) { | 494 | vdev->ud.status == VDEV_ST_ERROR) { |
490 | dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport); | 495 | dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport); |
491 | spin_unlock(&vdev->ud.lock); | 496 | spin_unlock(&vdev->ud.lock); |
492 | spin_unlock(&the_controller->lock); | 497 | spin_unlock_irqrestore(&the_controller->lock, flags); |
493 | return -ENODEV; | 498 | return -ENODEV; |
494 | } | 499 | } |
495 | spin_unlock(&vdev->ud.lock); | 500 | spin_unlock(&vdev->ud.lock); |
@@ -562,14 +567,14 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |||
562 | 567 | ||
563 | out: | 568 | out: |
564 | vhci_tx_urb(urb); | 569 | vhci_tx_urb(urb); |
565 | spin_unlock(&the_controller->lock); | 570 | spin_unlock_irqrestore(&the_controller->lock, flags); |
566 | 571 | ||
567 | return 0; | 572 | return 0; |
568 | 573 | ||
569 | no_need_xmit: | 574 | no_need_xmit: |
570 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 575 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
571 | no_need_unlink: | 576 | no_need_unlink: |
572 | spin_unlock(&the_controller->lock); | 577 | spin_unlock_irqrestore(&the_controller->lock, flags); |
573 | if (!ret) | 578 | if (!ret) |
574 | usb_hcd_giveback_urb(vhci_to_hcd(the_controller), | 579 | usb_hcd_giveback_urb(vhci_to_hcd(the_controller), |
575 | urb, urb->status); | 580 | urb, urb->status); |
@@ -626,16 +631,15 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
626 | { | 631 | { |
627 | struct vhci_priv *priv; | 632 | struct vhci_priv *priv; |
628 | struct vhci_device *vdev; | 633 | struct vhci_device *vdev; |
634 | unsigned long flags; | ||
629 | 635 | ||
630 | pr_info("dequeue a urb %p\n", urb); | 636 | spin_lock_irqsave(&the_controller->lock, flags); |
631 | |||
632 | spin_lock(&the_controller->lock); | ||
633 | 637 | ||
634 | priv = urb->hcpriv; | 638 | priv = urb->hcpriv; |
635 | if (!priv) { | 639 | if (!priv) { |
636 | /* URB was never linked! or will be soon given back by | 640 | /* URB was never linked! or will be soon given back by |
637 | * vhci_rx. */ | 641 | * vhci_rx. */ |
638 | spin_unlock(&the_controller->lock); | 642 | spin_unlock_irqrestore(&the_controller->lock, flags); |
639 | return -EIDRM; | 643 | return -EIDRM; |
640 | } | 644 | } |
641 | 645 | ||
@@ -644,7 +648,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
644 | 648 | ||
645 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | 649 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); |
646 | if (ret) { | 650 | if (ret) { |
647 | spin_unlock(&the_controller->lock); | 651 | spin_unlock_irqrestore(&the_controller->lock, flags); |
648 | return ret; | 652 | return ret; |
649 | } | 653 | } |
650 | } | 654 | } |
@@ -656,7 +660,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
656 | /* tcp connection is closed */ | 660 | /* tcp connection is closed */ |
657 | spin_lock(&vdev->priv_lock); | 661 | spin_lock(&vdev->priv_lock); |
658 | 662 | ||
659 | pr_info("device %p seems to be disconnected\n", vdev); | ||
660 | list_del(&priv->list); | 663 | list_del(&priv->list); |
661 | kfree(priv); | 664 | kfree(priv); |
662 | urb->hcpriv = NULL; | 665 | urb->hcpriv = NULL; |
@@ -668,14 +671,12 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
668 | * vhci_rx will receive RET_UNLINK and give back the URB. | 671 | * vhci_rx will receive RET_UNLINK and give back the URB. |
669 | * Otherwise, we give back it here. | 672 | * Otherwise, we give back it here. |
670 | */ | 673 | */ |
671 | pr_info("gives back urb %p\n", urb); | ||
672 | |||
673 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 674 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
674 | 675 | ||
675 | spin_unlock(&the_controller->lock); | 676 | spin_unlock_irqrestore(&the_controller->lock, flags); |
676 | usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, | 677 | usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, |
677 | urb->status); | 678 | urb->status); |
678 | spin_lock(&the_controller->lock); | 679 | spin_lock_irqsave(&the_controller->lock, flags); |
679 | 680 | ||
680 | } else { | 681 | } else { |
681 | /* tcp connection is alive */ | 682 | /* tcp connection is alive */ |
@@ -687,7 +688,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
687 | unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC); | 688 | unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC); |
688 | if (!unlink) { | 689 | if (!unlink) { |
689 | spin_unlock(&vdev->priv_lock); | 690 | spin_unlock(&vdev->priv_lock); |
690 | spin_unlock(&the_controller->lock); | 691 | spin_unlock_irqrestore(&the_controller->lock, flags); |
691 | usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC); | 692 | usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC); |
692 | return -ENOMEM; | 693 | return -ENOMEM; |
693 | } | 694 | } |
@@ -698,8 +699,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
698 | 699 | ||
699 | unlink->unlink_seqnum = priv->seqnum; | 700 | unlink->unlink_seqnum = priv->seqnum; |
700 | 701 | ||
701 | pr_info("device %p seems to be still connected\n", vdev); | ||
702 | |||
703 | /* send cmd_unlink and try to cancel the pending URB in the | 702 | /* send cmd_unlink and try to cancel the pending URB in the |
704 | * peer */ | 703 | * peer */ |
705 | list_add_tail(&unlink->list, &vdev->unlink_tx); | 704 | list_add_tail(&unlink->list, &vdev->unlink_tx); |
@@ -708,7 +707,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
708 | spin_unlock(&vdev->priv_lock); | 707 | spin_unlock(&vdev->priv_lock); |
709 | } | 708 | } |
710 | 709 | ||
711 | spin_unlock(&the_controller->lock); | 710 | spin_unlock_irqrestore(&the_controller->lock, flags); |
712 | 711 | ||
713 | usbip_dbg_vhci_hc("leave\n"); | 712 | usbip_dbg_vhci_hc("leave\n"); |
714 | return 0; | 713 | return 0; |
@@ -717,8 +716,9 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
717 | static void vhci_device_unlink_cleanup(struct vhci_device *vdev) | 716 | static void vhci_device_unlink_cleanup(struct vhci_device *vdev) |
718 | { | 717 | { |
719 | struct vhci_unlink *unlink, *tmp; | 718 | struct vhci_unlink *unlink, *tmp; |
719 | unsigned long flags; | ||
720 | 720 | ||
721 | spin_lock(&the_controller->lock); | 721 | spin_lock_irqsave(&the_controller->lock, flags); |
722 | spin_lock(&vdev->priv_lock); | 722 | spin_lock(&vdev->priv_lock); |
723 | 723 | ||
724 | list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) { | 724 | list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) { |
@@ -752,19 +752,19 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev) | |||
752 | list_del(&unlink->list); | 752 | list_del(&unlink->list); |
753 | 753 | ||
754 | spin_unlock(&vdev->priv_lock); | 754 | spin_unlock(&vdev->priv_lock); |
755 | spin_unlock(&the_controller->lock); | 755 | spin_unlock_irqrestore(&the_controller->lock, flags); |
756 | 756 | ||
757 | usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, | 757 | usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, |
758 | urb->status); | 758 | urb->status); |
759 | 759 | ||
760 | spin_lock(&the_controller->lock); | 760 | spin_lock_irqsave(&the_controller->lock, flags); |
761 | spin_lock(&vdev->priv_lock); | 761 | spin_lock(&vdev->priv_lock); |
762 | 762 | ||
763 | kfree(unlink); | 763 | kfree(unlink); |
764 | } | 764 | } |
765 | 765 | ||
766 | spin_unlock(&vdev->priv_lock); | 766 | spin_unlock(&vdev->priv_lock); |
767 | spin_unlock(&the_controller->lock); | 767 | spin_unlock_irqrestore(&the_controller->lock, flags); |
768 | } | 768 | } |
769 | 769 | ||
770 | /* | 770 | /* |
@@ -778,7 +778,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) | |||
778 | 778 | ||
779 | /* need this? see stub_dev.c */ | 779 | /* need this? see stub_dev.c */ |
780 | if (ud->tcp_socket) { | 780 | if (ud->tcp_socket) { |
781 | pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket); | 781 | pr_debug("shutdown sockfd %d\n", ud->sockfd); |
782 | kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); | 782 | kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); |
783 | } | 783 | } |
784 | 784 | ||
@@ -831,8 +831,9 @@ static void vhci_shutdown_connection(struct usbip_device *ud) | |||
831 | static void vhci_device_reset(struct usbip_device *ud) | 831 | static void vhci_device_reset(struct usbip_device *ud) |
832 | { | 832 | { |
833 | struct vhci_device *vdev = container_of(ud, struct vhci_device, ud); | 833 | struct vhci_device *vdev = container_of(ud, struct vhci_device, ud); |
834 | unsigned long flags; | ||
834 | 835 | ||
835 | spin_lock(&ud->lock); | 836 | spin_lock_irqsave(&ud->lock, flags); |
836 | 837 | ||
837 | vdev->speed = 0; | 838 | vdev->speed = 0; |
838 | vdev->devid = 0; | 839 | vdev->devid = 0; |
@@ -846,14 +847,16 @@ static void vhci_device_reset(struct usbip_device *ud) | |||
846 | } | 847 | } |
847 | ud->status = VDEV_ST_NULL; | 848 | ud->status = VDEV_ST_NULL; |
848 | 849 | ||
849 | spin_unlock(&ud->lock); | 850 | spin_unlock_irqrestore(&ud->lock, flags); |
850 | } | 851 | } |
851 | 852 | ||
852 | static void vhci_device_unusable(struct usbip_device *ud) | 853 | static void vhci_device_unusable(struct usbip_device *ud) |
853 | { | 854 | { |
854 | spin_lock(&ud->lock); | 855 | unsigned long flags; |
856 | |||
857 | spin_lock_irqsave(&ud->lock, flags); | ||
855 | ud->status = VDEV_ST_ERROR; | 858 | ud->status = VDEV_ST_ERROR; |
856 | spin_unlock(&ud->lock); | 859 | spin_unlock_irqrestore(&ud->lock, flags); |
857 | } | 860 | } |
858 | 861 | ||
859 | static void vhci_device_init(struct vhci_device *vdev) | 862 | static void vhci_device_init(struct vhci_device *vdev) |
@@ -943,12 +946,13 @@ static int vhci_get_frame_number(struct usb_hcd *hcd) | |||
943 | static int vhci_bus_suspend(struct usb_hcd *hcd) | 946 | static int vhci_bus_suspend(struct usb_hcd *hcd) |
944 | { | 947 | { |
945 | struct vhci_hcd *vhci = hcd_to_vhci(hcd); | 948 | struct vhci_hcd *vhci = hcd_to_vhci(hcd); |
949 | unsigned long flags; | ||
946 | 950 | ||
947 | dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__); | 951 | dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__); |
948 | 952 | ||
949 | spin_lock(&vhci->lock); | 953 | spin_lock_irqsave(&vhci->lock, flags); |
950 | hcd->state = HC_STATE_SUSPENDED; | 954 | hcd->state = HC_STATE_SUSPENDED; |
951 | spin_unlock(&vhci->lock); | 955 | spin_unlock_irqrestore(&vhci->lock, flags); |
952 | 956 | ||
953 | return 0; | 957 | return 0; |
954 | } | 958 | } |
@@ -957,15 +961,16 @@ static int vhci_bus_resume(struct usb_hcd *hcd) | |||
957 | { | 961 | { |
958 | struct vhci_hcd *vhci = hcd_to_vhci(hcd); | 962 | struct vhci_hcd *vhci = hcd_to_vhci(hcd); |
959 | int rc = 0; | 963 | int rc = 0; |
964 | unsigned long flags; | ||
960 | 965 | ||
961 | dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__); | 966 | dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__); |
962 | 967 | ||
963 | spin_lock(&vhci->lock); | 968 | spin_lock_irqsave(&vhci->lock, flags); |
964 | if (!HCD_HW_ACCESSIBLE(hcd)) | 969 | if (!HCD_HW_ACCESSIBLE(hcd)) |
965 | rc = -ESHUTDOWN; | 970 | rc = -ESHUTDOWN; |
966 | else | 971 | else |
967 | hcd->state = HC_STATE_RUNNING; | 972 | hcd->state = HC_STATE_RUNNING; |
968 | spin_unlock(&vhci->lock); | 973 | spin_unlock_irqrestore(&vhci->lock, flags); |
969 | 974 | ||
970 | return rc; | 975 | return rc; |
971 | } | 976 | } |
@@ -1063,17 +1068,18 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state) | |||
1063 | int rhport = 0; | 1068 | int rhport = 0; |
1064 | int connected = 0; | 1069 | int connected = 0; |
1065 | int ret = 0; | 1070 | int ret = 0; |
1071 | unsigned long flags; | ||
1066 | 1072 | ||
1067 | hcd = platform_get_drvdata(pdev); | 1073 | hcd = platform_get_drvdata(pdev); |
1068 | 1074 | ||
1069 | spin_lock(&the_controller->lock); | 1075 | spin_lock_irqsave(&the_controller->lock, flags); |
1070 | 1076 | ||
1071 | for (rhport = 0; rhport < VHCI_NPORTS; rhport++) | 1077 | for (rhport = 0; rhport < VHCI_NPORTS; rhport++) |
1072 | if (the_controller->port_status[rhport] & | 1078 | if (the_controller->port_status[rhport] & |
1073 | USB_PORT_STAT_CONNECTION) | 1079 | USB_PORT_STAT_CONNECTION) |
1074 | connected += 1; | 1080 | connected += 1; |
1075 | 1081 | ||
1076 | spin_unlock(&the_controller->lock); | 1082 | spin_unlock_irqrestore(&the_controller->lock, flags); |
1077 | 1083 | ||
1078 | if (connected > 0) { | 1084 | if (connected > 0) { |
1079 | dev_info(&pdev->dev, | 1085 | dev_info(&pdev->dev, |
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index 00e4a54308e4..323aa7789989 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c | |||
@@ -37,24 +37,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum) | |||
37 | urb = priv->urb; | 37 | urb = priv->urb; |
38 | status = urb->status; | 38 | status = urb->status; |
39 | 39 | ||
40 | usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n", | 40 | usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum); |
41 | urb, priv, seqnum); | ||
42 | 41 | ||
43 | switch (status) { | 42 | switch (status) { |
44 | case -ENOENT: | 43 | case -ENOENT: |
45 | /* fall through */ | 44 | /* fall through */ |
46 | case -ECONNRESET: | 45 | case -ECONNRESET: |
47 | dev_info(&urb->dev->dev, | 46 | dev_dbg(&urb->dev->dev, |
48 | "urb %p was unlinked %ssynchronuously.\n", urb, | 47 | "urb seq# %u was unlinked %ssynchronuously\n", |
49 | status == -ENOENT ? "" : "a"); | 48 | seqnum, status == -ENOENT ? "" : "a"); |
50 | break; | 49 | break; |
51 | case -EINPROGRESS: | 50 | case -EINPROGRESS: |
52 | /* no info output */ | 51 | /* no info output */ |
53 | break; | 52 | break; |
54 | default: | 53 | default: |
55 | dev_info(&urb->dev->dev, | 54 | dev_dbg(&urb->dev->dev, |
56 | "urb %p may be in a error, status %d\n", urb, | 55 | "urb seq# %u may be in a error, status %d\n", |
57 | status); | 56 | seqnum, status); |
58 | } | 57 | } |
59 | 58 | ||
60 | list_del(&priv->list); | 59 | list_del(&priv->list); |
@@ -72,14 +71,15 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, | |||
72 | { | 71 | { |
73 | struct usbip_device *ud = &vdev->ud; | 72 | struct usbip_device *ud = &vdev->ud; |
74 | struct urb *urb; | 73 | struct urb *urb; |
74 | unsigned long flags; | ||
75 | 75 | ||
76 | spin_lock(&vdev->priv_lock); | 76 | spin_lock_irqsave(&vdev->priv_lock, flags); |
77 | urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); | 77 | urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); |
78 | spin_unlock(&vdev->priv_lock); | 78 | spin_unlock_irqrestore(&vdev->priv_lock, flags); |
79 | 79 | ||
80 | if (!urb) { | 80 | if (!urb) { |
81 | pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); | 81 | pr_err("cannot find a urb of seqnum %u max seqnum %d\n", |
82 | pr_info("max seqnum %d\n", | 82 | pdu->base.seqnum, |
83 | atomic_read(&the_controller->seqnum)); | 83 | atomic_read(&the_controller->seqnum)); |
84 | usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); | 84 | usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); |
85 | return; | 85 | return; |
@@ -102,11 +102,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, | |||
102 | if (usbip_dbg_flag_vhci_rx) | 102 | if (usbip_dbg_flag_vhci_rx) |
103 | usbip_dump_urb(urb); | 103 | usbip_dump_urb(urb); |
104 | 104 | ||
105 | usbip_dbg_vhci_rx("now giveback urb %p\n", urb); | 105 | usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum); |
106 | 106 | ||
107 | spin_lock(&the_controller->lock); | 107 | spin_lock_irqsave(&the_controller->lock, flags); |
108 | usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); | 108 | usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); |
109 | spin_unlock(&the_controller->lock); | 109 | spin_unlock_irqrestore(&the_controller->lock, flags); |
110 | 110 | ||
111 | usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); | 111 | usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); |
112 | 112 | ||
@@ -117,8 +117,9 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev, | |||
117 | struct usbip_header *pdu) | 117 | struct usbip_header *pdu) |
118 | { | 118 | { |
119 | struct vhci_unlink *unlink, *tmp; | 119 | struct vhci_unlink *unlink, *tmp; |
120 | unsigned long flags; | ||
120 | 121 | ||
121 | spin_lock(&vdev->priv_lock); | 122 | spin_lock_irqsave(&vdev->priv_lock, flags); |
122 | 123 | ||
123 | list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) { | 124 | list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) { |
124 | pr_info("unlink->seqnum %lu\n", unlink->seqnum); | 125 | pr_info("unlink->seqnum %lu\n", unlink->seqnum); |
@@ -127,12 +128,12 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev, | |||
127 | unlink->seqnum); | 128 | unlink->seqnum); |
128 | list_del(&unlink->list); | 129 | list_del(&unlink->list); |
129 | 130 | ||
130 | spin_unlock(&vdev->priv_lock); | 131 | spin_unlock_irqrestore(&vdev->priv_lock, flags); |
131 | return unlink; | 132 | return unlink; |
132 | } | 133 | } |
133 | } | 134 | } |
134 | 135 | ||
135 | spin_unlock(&vdev->priv_lock); | 136 | spin_unlock_irqrestore(&vdev->priv_lock, flags); |
136 | 137 | ||
137 | return NULL; | 138 | return NULL; |
138 | } | 139 | } |
@@ -142,6 +143,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, | |||
142 | { | 143 | { |
143 | struct vhci_unlink *unlink; | 144 | struct vhci_unlink *unlink; |
144 | struct urb *urb; | 145 | struct urb *urb; |
146 | unsigned long flags; | ||
145 | 147 | ||
146 | usbip_dump_header(pdu); | 148 | usbip_dump_header(pdu); |
147 | 149 | ||
@@ -152,9 +154,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, | |||
152 | return; | 154 | return; |
153 | } | 155 | } |
154 | 156 | ||
155 | spin_lock(&vdev->priv_lock); | 157 | spin_lock_irqsave(&vdev->priv_lock, flags); |
156 | urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum); | 158 | urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum); |
157 | spin_unlock(&vdev->priv_lock); | 159 | spin_unlock_irqrestore(&vdev->priv_lock, flags); |
158 | 160 | ||
159 | if (!urb) { | 161 | if (!urb) { |
160 | /* | 162 | /* |
@@ -165,15 +167,15 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, | |||
165 | pr_info("the urb (seqnum %d) was already given back\n", | 167 | pr_info("the urb (seqnum %d) was already given back\n", |
166 | pdu->base.seqnum); | 168 | pdu->base.seqnum); |
167 | } else { | 169 | } else { |
168 | usbip_dbg_vhci_rx("now giveback urb %p\n", urb); | 170 | usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum); |
169 | 171 | ||
170 | /* If unlink is successful, status is -ECONNRESET */ | 172 | /* If unlink is successful, status is -ECONNRESET */ |
171 | urb->status = pdu->u.ret_unlink.status; | 173 | urb->status = pdu->u.ret_unlink.status; |
172 | pr_info("urb->status %d\n", urb->status); | 174 | pr_info("urb->status %d\n", urb->status); |
173 | 175 | ||
174 | spin_lock(&the_controller->lock); | 176 | spin_lock_irqsave(&the_controller->lock, flags); |
175 | usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); | 177 | usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); |
176 | spin_unlock(&the_controller->lock); | 178 | spin_unlock_irqrestore(&the_controller->lock, flags); |
177 | 179 | ||
178 | usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, | 180 | usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, |
179 | urb->status); | 181 | urb->status); |
@@ -185,10 +187,11 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, | |||
185 | static int vhci_priv_tx_empty(struct vhci_device *vdev) | 187 | static int vhci_priv_tx_empty(struct vhci_device *vdev) |
186 | { | 188 | { |
187 | int empty = 0; | 189 | int empty = 0; |
190 | unsigned long flags; | ||
188 | 191 | ||
189 | spin_lock(&vdev->priv_lock); | 192 | spin_lock_irqsave(&vdev->priv_lock, flags); |
190 | empty = list_empty(&vdev->priv_rx); | 193 | empty = list_empty(&vdev->priv_rx); |
191 | spin_unlock(&vdev->priv_lock); | 194 | spin_unlock_irqrestore(&vdev->priv_lock, flags); |
192 | 195 | ||
193 | return empty; | 196 | return empty; |
194 | } | 197 | } |
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index 211f43f67ea2..b9432fdec775 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c | |||
@@ -32,23 +32,28 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr, | |||
32 | { | 32 | { |
33 | char *s = out; | 33 | char *s = out; |
34 | int i = 0; | 34 | int i = 0; |
35 | unsigned long flags; | ||
35 | 36 | ||
36 | BUG_ON(!the_controller || !out); | 37 | BUG_ON(!the_controller || !out); |
37 | 38 | ||
38 | spin_lock(&the_controller->lock); | 39 | spin_lock_irqsave(&the_controller->lock, flags); |
39 | 40 | ||
40 | /* | 41 | /* |
41 | * output example: | 42 | * output example: |
42 | * prt sta spd dev socket local_busid | 43 | * port sta spd dev sockfd local_busid |
43 | * 000 004 000 000 c5a7bb80 1-2.3 | 44 | * 0000 004 000 00000000 000003 1-2.3 |
44 | * 001 004 000 000 d8cee980 2-3.4 | 45 | * 0001 004 000 00000000 000004 2-3.4 |
45 | * | 46 | * |
46 | * IP address can be retrieved from a socket pointer address by looking | 47 | * Output includes socket fd instead of socket pointer address to |
47 | * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a | 48 | * avoid leaking kernel memory address in: |
48 | * port number and its peer IP address. | 49 | * /sys/devices/platform/vhci_hcd.0/status and in debug output. |
50 | * The socket pointer address is not used at the moment and it was | ||
51 | * made visible as a convenient way to find IP address from socket | ||
52 | * pointer address by looking up /proc/net/{tcp,tcp6}. As this opens | ||
53 | * a security hole, the change is made to use sockfd instead. | ||
49 | */ | 54 | */ |
50 | out += sprintf(out, | 55 | out += sprintf(out, |
51 | "prt sta spd bus dev socket local_busid\n"); | 56 | "prt sta spd dev sockfd local_busid\n"); |
52 | 57 | ||
53 | for (i = 0; i < VHCI_NPORTS; i++) { | 58 | for (i = 0; i < VHCI_NPORTS; i++) { |
54 | struct vhci_device *vdev = port_to_vdev(i); | 59 | struct vhci_device *vdev = port_to_vdev(i); |
@@ -59,18 +64,17 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr, | |||
59 | if (vdev->ud.status == VDEV_ST_USED) { | 64 | if (vdev->ud.status == VDEV_ST_USED) { |
60 | out += sprintf(out, "%03u %08x ", | 65 | out += sprintf(out, "%03u %08x ", |
61 | vdev->speed, vdev->devid); | 66 | vdev->speed, vdev->devid); |
62 | out += sprintf(out, "%16p ", vdev->ud.tcp_socket); | 67 | out += sprintf(out, "%06u ", vdev->ud.sockfd); |
63 | out += sprintf(out, "%s", dev_name(&vdev->udev->dev)); | 68 | out += sprintf(out, "%s", dev_name(&vdev->udev->dev)); |
64 | 69 | ||
65 | } else { | 70 | } else |
66 | out += sprintf(out, "000 000 000 0000000000000000 0-0"); | 71 | out += sprintf(out, "000 00000000 000000 0-0"); |
67 | } | ||
68 | 72 | ||
69 | out += sprintf(out, "\n"); | 73 | out += sprintf(out, "\n"); |
70 | spin_unlock(&vdev->ud.lock); | 74 | spin_unlock(&vdev->ud.lock); |
71 | } | 75 | } |
72 | 76 | ||
73 | spin_unlock(&the_controller->lock); | 77 | spin_unlock_irqrestore(&the_controller->lock, flags); |
74 | 78 | ||
75 | return out - s; | 79 | return out - s; |
76 | } | 80 | } |
@@ -80,11 +84,12 @@ static DEVICE_ATTR_RO(status); | |||
80 | static int vhci_port_disconnect(__u32 rhport) | 84 | static int vhci_port_disconnect(__u32 rhport) |
81 | { | 85 | { |
82 | struct vhci_device *vdev; | 86 | struct vhci_device *vdev; |
87 | unsigned long flags; | ||
83 | 88 | ||
84 | usbip_dbg_vhci_sysfs("enter\n"); | 89 | usbip_dbg_vhci_sysfs("enter\n"); |
85 | 90 | ||
86 | /* lock */ | 91 | /* lock */ |
87 | spin_lock(&the_controller->lock); | 92 | spin_lock_irqsave(&the_controller->lock, flags); |
88 | 93 | ||
89 | vdev = port_to_vdev(rhport); | 94 | vdev = port_to_vdev(rhport); |
90 | 95 | ||
@@ -94,14 +99,14 @@ static int vhci_port_disconnect(__u32 rhport) | |||
94 | 99 | ||
95 | /* unlock */ | 100 | /* unlock */ |
96 | spin_unlock(&vdev->ud.lock); | 101 | spin_unlock(&vdev->ud.lock); |
97 | spin_unlock(&the_controller->lock); | 102 | spin_unlock_irqrestore(&the_controller->lock, flags); |
98 | 103 | ||
99 | return -EINVAL; | 104 | return -EINVAL; |
100 | } | 105 | } |
101 | 106 | ||
102 | /* unlock */ | 107 | /* unlock */ |
103 | spin_unlock(&vdev->ud.lock); | 108 | spin_unlock(&vdev->ud.lock); |
104 | spin_unlock(&the_controller->lock); | 109 | spin_unlock_irqrestore(&the_controller->lock, flags); |
105 | 110 | ||
106 | usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN); | 111 | usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN); |
107 | 112 | ||
@@ -177,6 +182,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, | |||
177 | int sockfd = 0; | 182 | int sockfd = 0; |
178 | __u32 rhport = 0, devid = 0, speed = 0; | 183 | __u32 rhport = 0, devid = 0, speed = 0; |
179 | int err; | 184 | int err; |
185 | unsigned long flags; | ||
180 | 186 | ||
181 | /* | 187 | /* |
182 | * @rhport: port number of vhci_hcd | 188 | * @rhport: port number of vhci_hcd |
@@ -202,14 +208,14 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, | |||
202 | /* now need lock until setting vdev status as used */ | 208 | /* now need lock until setting vdev status as used */ |
203 | 209 | ||
204 | /* begin a lock */ | 210 | /* begin a lock */ |
205 | spin_lock(&the_controller->lock); | 211 | spin_lock_irqsave(&the_controller->lock, flags); |
206 | vdev = port_to_vdev(rhport); | 212 | vdev = port_to_vdev(rhport); |
207 | spin_lock(&vdev->ud.lock); | 213 | spin_lock(&vdev->ud.lock); |
208 | 214 | ||
209 | if (vdev->ud.status != VDEV_ST_NULL) { | 215 | if (vdev->ud.status != VDEV_ST_NULL) { |
210 | /* end of the lock */ | 216 | /* end of the lock */ |
211 | spin_unlock(&vdev->ud.lock); | 217 | spin_unlock(&vdev->ud.lock); |
212 | spin_unlock(&the_controller->lock); | 218 | spin_unlock_irqrestore(&the_controller->lock, flags); |
213 | 219 | ||
214 | sockfd_put(socket); | 220 | sockfd_put(socket); |
215 | 221 | ||
@@ -223,11 +229,12 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, | |||
223 | 229 | ||
224 | vdev->devid = devid; | 230 | vdev->devid = devid; |
225 | vdev->speed = speed; | 231 | vdev->speed = speed; |
232 | vdev->ud.sockfd = sockfd; | ||
226 | vdev->ud.tcp_socket = socket; | 233 | vdev->ud.tcp_socket = socket; |
227 | vdev->ud.status = VDEV_ST_NOTASSIGNED; | 234 | vdev->ud.status = VDEV_ST_NOTASSIGNED; |
228 | 235 | ||
229 | spin_unlock(&vdev->ud.lock); | 236 | spin_unlock(&vdev->ud.lock); |
230 | spin_unlock(&the_controller->lock); | 237 | spin_unlock_irqrestore(&the_controller->lock, flags); |
231 | /* end the lock */ | 238 | /* end the lock */ |
232 | 239 | ||
233 | vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); | 240 | vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); |
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c index 409fd99f3257..a9a663a578b6 100644 --- a/drivers/usb/usbip/vhci_tx.c +++ b/drivers/usb/usbip/vhci_tx.c | |||
@@ -47,16 +47,17 @@ static void setup_cmd_submit_pdu(struct usbip_header *pdup, struct urb *urb) | |||
47 | static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev) | 47 | static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev) |
48 | { | 48 | { |
49 | struct vhci_priv *priv, *tmp; | 49 | struct vhci_priv *priv, *tmp; |
50 | unsigned long flags; | ||
50 | 51 | ||
51 | spin_lock(&vdev->priv_lock); | 52 | spin_lock_irqsave(&vdev->priv_lock, flags); |
52 | 53 | ||
53 | list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) { | 54 | list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) { |
54 | list_move_tail(&priv->list, &vdev->priv_rx); | 55 | list_move_tail(&priv->list, &vdev->priv_rx); |
55 | spin_unlock(&vdev->priv_lock); | 56 | spin_unlock_irqrestore(&vdev->priv_lock, flags); |
56 | return priv; | 57 | return priv; |
57 | } | 58 | } |
58 | 59 | ||
59 | spin_unlock(&vdev->priv_lock); | 60 | spin_unlock_irqrestore(&vdev->priv_lock, flags); |
60 | 61 | ||
61 | return NULL; | 62 | return NULL; |
62 | } | 63 | } |
@@ -82,7 +83,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) | |||
82 | memset(&msg, 0, sizeof(msg)); | 83 | memset(&msg, 0, sizeof(msg)); |
83 | memset(&iov, 0, sizeof(iov)); | 84 | memset(&iov, 0, sizeof(iov)); |
84 | 85 | ||
85 | usbip_dbg_vhci_tx("setup txdata urb %p\n", urb); | 86 | usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n", |
87 | priv->seqnum); | ||
86 | 88 | ||
87 | /* 1. setup usbip_header */ | 89 | /* 1. setup usbip_header */ |
88 | setup_cmd_submit_pdu(&pdu_header, urb); | 90 | setup_cmd_submit_pdu(&pdu_header, urb); |
@@ -136,16 +138,17 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) | |||
136 | static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev) | 138 | static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev) |
137 | { | 139 | { |
138 | struct vhci_unlink *unlink, *tmp; | 140 | struct vhci_unlink *unlink, *tmp; |
141 | unsigned long flags; | ||
139 | 142 | ||
140 | spin_lock(&vdev->priv_lock); | 143 | spin_lock_irqsave(&vdev->priv_lock, flags); |
141 | 144 | ||
142 | list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) { | 145 | list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) { |
143 | list_move_tail(&unlink->list, &vdev->unlink_rx); | 146 | list_move_tail(&unlink->list, &vdev->unlink_rx); |
144 | spin_unlock(&vdev->priv_lock); | 147 | spin_unlock_irqrestore(&vdev->priv_lock, flags); |
145 | return unlink; | 148 | return unlink; |
146 | } | 149 | } |
147 | 150 | ||
148 | spin_unlock(&vdev->priv_lock); | 151 | spin_unlock_irqrestore(&vdev->priv_lock, flags); |
149 | 152 | ||
150 | return NULL; | 153 | return NULL; |
151 | } | 154 | } |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 9eda69e40678..44a5a8777053 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -981,6 +981,7 @@ static long vhost_net_reset_owner(struct vhost_net *n) | |||
981 | } | 981 | } |
982 | vhost_net_stop(n, &tx_sock, &rx_sock); | 982 | vhost_net_stop(n, &tx_sock, &rx_sock); |
983 | vhost_net_flush(n); | 983 | vhost_net_flush(n); |
984 | vhost_dev_stop(&n->dev); | ||
984 | vhost_dev_reset_owner(&n->dev, memory); | 985 | vhost_dev_reset_owner(&n->dev, memory); |
985 | vhost_net_vq_reset(n); | 986 | vhost_net_vq_reset(n); |
986 | done: | 987 | done: |
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index d7a592877311..fbe72ca385bc 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c | |||
@@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb) | |||
79 | static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) | 79 | static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) |
80 | { | 80 | { |
81 | unsigned int lth = pb->lth_brightness; | 81 | unsigned int lth = pb->lth_brightness; |
82 | int duty_cycle; | 82 | u64 duty_cycle; |
83 | 83 | ||
84 | if (pb->levels) | 84 | if (pb->levels) |
85 | duty_cycle = pb->levels[brightness]; | 85 | duty_cycle = pb->levels[brightness]; |
86 | else | 86 | else |
87 | duty_cycle = brightness; | 87 | duty_cycle = brightness; |
88 | 88 | ||
89 | return (duty_cycle * (pb->period - lth) / pb->scale) + lth; | 89 | duty_cycle *= pb->period - lth; |
90 | do_div(duty_cycle, pb->scale); | ||
91 | |||
92 | return duty_cycle + lth; | ||
90 | } | 93 | } |
91 | 94 | ||
92 | static int pwm_backlight_update_status(struct backlight_device *bl) | 95 | static int pwm_backlight_update_status(struct backlight_device *bl) |
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c index 0efc52f11ad0..b30e7d87804b 100644 --- a/drivers/video/console/dummycon.c +++ b/drivers/video/console/dummycon.c | |||
@@ -68,7 +68,6 @@ const struct consw dummy_con = { | |||
68 | .con_switch = DUMMY, | 68 | .con_switch = DUMMY, |
69 | .con_blank = DUMMY, | 69 | .con_blank = DUMMY, |
70 | .con_font_set = DUMMY, | 70 | .con_font_set = DUMMY, |
71 | .con_font_get = DUMMY, | ||
72 | .con_font_default = DUMMY, | 71 | .con_font_default = DUMMY, |
73 | .con_font_copy = DUMMY, | 72 | .con_font_copy = DUMMY, |
74 | .con_set_palette = DUMMY, | 73 | .con_set_palette = DUMMY, |
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 19eb42b57d87..a6da82648c92 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c | |||
@@ -1120,7 +1120,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) | |||
1120 | goto put_display_node; | 1120 | goto put_display_node; |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | timings_np = of_find_node_by_name(display_np, "display-timings"); | 1123 | timings_np = of_get_child_by_name(display_np, "display-timings"); |
1124 | if (!timings_np) { | 1124 | if (!timings_np) { |
1125 | dev_err(dev, "failed to find display-timings node\n"); | 1125 | dev_err(dev, "failed to find display-timings node\n"); |
1126 | ret = -ENODEV; | 1126 | ret = -ENODEV; |
@@ -1141,6 +1141,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) | |||
1141 | fb_add_videomode(&fb_vm, &info->modelist); | 1141 | fb_add_videomode(&fb_vm, &info->modelist); |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | /* | ||
1145 | * FIXME: Make sure we are not referencing any fields in display_np | ||
1146 | * and timings_np and drop our references to them before returning to | ||
1147 | * avoid leaking the nodes on probe deferral and driver unbind. | ||
1148 | */ | ||
1149 | |||
1144 | return 0; | 1150 | return 0; |
1145 | 1151 | ||
1146 | put_timings_node: | 1152 | put_timings_node: |
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 29ef719a6a3c..d69ab1e28d7d 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c | |||
@@ -161,15 +161,21 @@ static void imx2_wdt_timer_ping(unsigned long arg) | |||
161 | mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2); | 161 | mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2); |
162 | } | 162 | } |
163 | 163 | ||
164 | static int imx2_wdt_set_timeout(struct watchdog_device *wdog, | 164 | static void __imx2_wdt_set_timeout(struct watchdog_device *wdog, |
165 | unsigned int new_timeout) | 165 | unsigned int new_timeout) |
166 | { | 166 | { |
167 | struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); | 167 | struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); |
168 | 168 | ||
169 | wdog->timeout = new_timeout; | ||
170 | |||
171 | regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT, | 169 | regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT, |
172 | WDOG_SEC_TO_COUNT(new_timeout)); | 170 | WDOG_SEC_TO_COUNT(new_timeout)); |
171 | } | ||
172 | |||
173 | static int imx2_wdt_set_timeout(struct watchdog_device *wdog, | ||
174 | unsigned int new_timeout) | ||
175 | { | ||
176 | __imx2_wdt_set_timeout(wdog, new_timeout); | ||
177 | |||
178 | wdog->timeout = new_timeout; | ||
173 | return 0; | 179 | return 0; |
174 | } | 180 | } |
175 | 181 | ||
@@ -353,7 +359,11 @@ static int imx2_wdt_suspend(struct device *dev) | |||
353 | 359 | ||
354 | /* The watchdog IP block is running */ | 360 | /* The watchdog IP block is running */ |
355 | if (imx2_wdt_is_running(wdev)) { | 361 | if (imx2_wdt_is_running(wdev)) { |
356 | imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); | 362 | /* |
363 | * Don't update wdog->timeout, we'll restore the current value | ||
364 | * during resume. | ||
365 | */ | ||
366 | __imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); | ||
357 | imx2_wdt_ping(wdog); | 367 | imx2_wdt_ping(wdog); |
358 | 368 | ||
359 | /* The watchdog is not active */ | 369 | /* The watchdog is not active */ |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index cfe99bec49de..45934deacfd7 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -1258,7 +1258,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
1258 | /* Lock all pages first so we can lock the extent safely. */ | 1258 | /* Lock all pages first so we can lock the extent safely. */ |
1259 | ret = io_ctl_prepare_pages(io_ctl, inode, 0); | 1259 | ret = io_ctl_prepare_pages(io_ctl, inode, 0); |
1260 | if (ret) | 1260 | if (ret) |
1261 | goto out; | 1261 | goto out_unlock; |
1262 | 1262 | ||
1263 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, | 1263 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, |
1264 | 0, &cached_state); | 1264 | 0, &cached_state); |
@@ -1351,6 +1351,7 @@ out_nospc_locked: | |||
1351 | out_nospc: | 1351 | out_nospc: |
1352 | cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list); | 1352 | cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list); |
1353 | 1353 | ||
1354 | out_unlock: | ||
1354 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) | 1355 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) |
1355 | up_write(&block_group->data_rwsem); | 1356 | up_write(&block_group->data_rwsem); |
1356 | 1357 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index af1da85da509..81b5a461d94e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1292,8 +1292,11 @@ next_slot: | |||
1292 | leaf = path->nodes[0]; | 1292 | leaf = path->nodes[0]; |
1293 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | 1293 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { |
1294 | ret = btrfs_next_leaf(root, path); | 1294 | ret = btrfs_next_leaf(root, path); |
1295 | if (ret < 0) | 1295 | if (ret < 0) { |
1296 | if (cow_start != (u64)-1) | ||
1297 | cur_offset = cow_start; | ||
1296 | goto error; | 1298 | goto error; |
1299 | } | ||
1297 | if (ret > 0) | 1300 | if (ret > 0) |
1298 | break; | 1301 | break; |
1299 | leaf = path->nodes[0]; | 1302 | leaf = path->nodes[0]; |
@@ -2015,7 +2018,15 @@ again: | |||
2015 | goto out; | 2018 | goto out; |
2016 | } | 2019 | } |
2017 | 2020 | ||
2018 | btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); | 2021 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, |
2022 | &cached_state); | ||
2023 | if (ret) { | ||
2024 | mapping_set_error(page->mapping, ret); | ||
2025 | end_extent_writepage(page, ret, page_start, page_end); | ||
2026 | ClearPageChecked(page); | ||
2027 | goto out; | ||
2028 | } | ||
2029 | |||
2019 | ClearPageChecked(page); | 2030 | ClearPageChecked(page); |
2020 | set_page_dirty(page); | 2031 | set_page_dirty(page); |
2021 | out: | 2032 | out: |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index ee7832e2d39d..d6359af9789d 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "print-tree.h" | 26 | #include "print-tree.h" |
27 | #include "backref.h" | 27 | #include "backref.h" |
28 | #include "hash.h" | 28 | #include "hash.h" |
29 | #include "inode-map.h" | ||
29 | 30 | ||
30 | /* magic values for the inode_only field in btrfs_log_inode: | 31 | /* magic values for the inode_only field in btrfs_log_inode: |
31 | * | 32 | * |
@@ -2445,6 +2446,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, | |||
2445 | next); | 2446 | next); |
2446 | btrfs_wait_tree_block_writeback(next); | 2447 | btrfs_wait_tree_block_writeback(next); |
2447 | btrfs_tree_unlock(next); | 2448 | btrfs_tree_unlock(next); |
2449 | } else { | ||
2450 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) | ||
2451 | clear_extent_buffer_dirty(next); | ||
2448 | } | 2452 | } |
2449 | 2453 | ||
2450 | WARN_ON(root_owner != | 2454 | WARN_ON(root_owner != |
@@ -2524,6 +2528,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, | |||
2524 | next); | 2528 | next); |
2525 | btrfs_wait_tree_block_writeback(next); | 2529 | btrfs_wait_tree_block_writeback(next); |
2526 | btrfs_tree_unlock(next); | 2530 | btrfs_tree_unlock(next); |
2531 | } else { | ||
2532 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) | ||
2533 | clear_extent_buffer_dirty(next); | ||
2527 | } | 2534 | } |
2528 | 2535 | ||
2529 | WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); | 2536 | WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); |
@@ -2600,6 +2607,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, | |||
2600 | clean_tree_block(trans, log->fs_info, next); | 2607 | clean_tree_block(trans, log->fs_info, next); |
2601 | btrfs_wait_tree_block_writeback(next); | 2608 | btrfs_wait_tree_block_writeback(next); |
2602 | btrfs_tree_unlock(next); | 2609 | btrfs_tree_unlock(next); |
2610 | } else { | ||
2611 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) | ||
2612 | clear_extent_buffer_dirty(next); | ||
2603 | } | 2613 | } |
2604 | 2614 | ||
2605 | WARN_ON(log->root_key.objectid != | 2615 | WARN_ON(log->root_key.objectid != |
@@ -5514,6 +5524,23 @@ again: | |||
5514 | path); | 5524 | path); |
5515 | } | 5525 | } |
5516 | 5526 | ||
5527 | if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { | ||
5528 | struct btrfs_root *root = wc.replay_dest; | ||
5529 | |||
5530 | btrfs_release_path(path); | ||
5531 | |||
5532 | /* | ||
5533 | * We have just replayed everything, and the highest | ||
5534 | * objectid of fs roots probably has changed in case | ||
5535 | * some inode_item's got replayed. | ||
5536 | * | ||
5537 | * root->objectid_mutex is not acquired as log replay | ||
5538 | * could only happen during mount. | ||
5539 | */ | ||
5540 | ret = btrfs_find_highest_objectid(root, | ||
5541 | &root->highest_objectid); | ||
5542 | } | ||
5543 | |||
5517 | key.offset = found_key.offset - 1; | 5544 | key.offset = found_key.offset - 1; |
5518 | wc.replay_dest->log_root = NULL; | 5545 | wc.replay_dest->log_root = NULL; |
5519 | free_extent_buffer(log->node); | 5546 | free_extent_buffer(log->node); |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 4acbc390a7d6..1d707a67f8ac 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -306,9 +306,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt, | |||
306 | { | 306 | { |
307 | int i; | 307 | int i; |
308 | int rc; | 308 | int rc; |
309 | char password_with_pad[CIFS_ENCPWD_SIZE]; | 309 | char password_with_pad[CIFS_ENCPWD_SIZE] = {0}; |
310 | 310 | ||
311 | memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); | ||
312 | if (password) | 311 | if (password) |
313 | strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); | 312 | strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); |
314 | 313 | ||
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 0a2bf9462637..077ad3a06c9a 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1695,7 +1695,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1695 | tmp_end++; | 1695 | tmp_end++; |
1696 | if (!(tmp_end < end && tmp_end[1] == delim)) { | 1696 | if (!(tmp_end < end && tmp_end[1] == delim)) { |
1697 | /* No it is not. Set the password to NULL */ | 1697 | /* No it is not. Set the password to NULL */ |
1698 | kfree(vol->password); | 1698 | kzfree(vol->password); |
1699 | vol->password = NULL; | 1699 | vol->password = NULL; |
1700 | break; | 1700 | break; |
1701 | } | 1701 | } |
@@ -1733,7 +1733,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1733 | options = end; | 1733 | options = end; |
1734 | } | 1734 | } |
1735 | 1735 | ||
1736 | kfree(vol->password); | 1736 | kzfree(vol->password); |
1737 | /* Now build new password string */ | 1737 | /* Now build new password string */ |
1738 | temp_len = strlen(value); | 1738 | temp_len = strlen(value); |
1739 | vol->password = kzalloc(temp_len+1, GFP_KERNEL); | 1739 | vol->password = kzalloc(temp_len+1, GFP_KERNEL); |
@@ -4148,7 +4148,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) | |||
4148 | reset_cifs_unix_caps(0, tcon, NULL, vol_info); | 4148 | reset_cifs_unix_caps(0, tcon, NULL, vol_info); |
4149 | out: | 4149 | out: |
4150 | kfree(vol_info->username); | 4150 | kfree(vol_info->username); |
4151 | kfree(vol_info->password); | 4151 | kzfree(vol_info->password); |
4152 | kfree(vol_info); | 4152 | kfree(vol_info); |
4153 | 4153 | ||
4154 | return tcon; | 4154 | return tcon; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index ec2d07bb9beb..744be3c146f5 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -3241,20 +3241,18 @@ static const struct vm_operations_struct cifs_file_vm_ops = { | |||
3241 | 3241 | ||
3242 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) | 3242 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) |
3243 | { | 3243 | { |
3244 | int rc, xid; | 3244 | int xid, rc = 0; |
3245 | struct inode *inode = file_inode(file); | 3245 | struct inode *inode = file_inode(file); |
3246 | 3246 | ||
3247 | xid = get_xid(); | 3247 | xid = get_xid(); |
3248 | 3248 | ||
3249 | if (!CIFS_CACHE_READ(CIFS_I(inode))) { | 3249 | if (!CIFS_CACHE_READ(CIFS_I(inode))) |
3250 | rc = cifs_zap_mapping(inode); | 3250 | rc = cifs_zap_mapping(inode); |
3251 | if (rc) | 3251 | if (!rc) |
3252 | return rc; | 3252 | rc = generic_file_mmap(file, vma); |
3253 | } | 3253 | if (!rc) |
3254 | |||
3255 | rc = generic_file_mmap(file, vma); | ||
3256 | if (rc == 0) | ||
3257 | vma->vm_ops = &cifs_file_vm_ops; | 3254 | vma->vm_ops = &cifs_file_vm_ops; |
3255 | |||
3258 | free_xid(xid); | 3256 | free_xid(xid); |
3259 | return rc; | 3257 | return rc; |
3260 | } | 3258 | } |
@@ -3264,16 +3262,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
3264 | int rc, xid; | 3262 | int rc, xid; |
3265 | 3263 | ||
3266 | xid = get_xid(); | 3264 | xid = get_xid(); |
3265 | |||
3267 | rc = cifs_revalidate_file(file); | 3266 | rc = cifs_revalidate_file(file); |
3268 | if (rc) { | 3267 | if (rc) |
3269 | cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", | 3268 | cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", |
3270 | rc); | 3269 | rc); |
3271 | free_xid(xid); | 3270 | if (!rc) |
3272 | return rc; | 3271 | rc = generic_file_mmap(file, vma); |
3273 | } | 3272 | if (!rc) |
3274 | rc = generic_file_mmap(file, vma); | ||
3275 | if (rc == 0) | ||
3276 | vma->vm_ops = &cifs_file_vm_ops; | 3273 | vma->vm_ops = &cifs_file_vm_ops; |
3274 | |||
3277 | free_xid(xid); | 3275 | free_xid(xid); |
3278 | return rc; | 3276 | return rc; |
3279 | } | 3277 | } |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 2396ab099849..0cc699d9b932 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -99,14 +99,11 @@ sesInfoFree(struct cifs_ses *buf_to_free) | |||
99 | kfree(buf_to_free->serverOS); | 99 | kfree(buf_to_free->serverOS); |
100 | kfree(buf_to_free->serverDomain); | 100 | kfree(buf_to_free->serverDomain); |
101 | kfree(buf_to_free->serverNOS); | 101 | kfree(buf_to_free->serverNOS); |
102 | if (buf_to_free->password) { | 102 | kzfree(buf_to_free->password); |
103 | memset(buf_to_free->password, 0, strlen(buf_to_free->password)); | ||
104 | kfree(buf_to_free->password); | ||
105 | } | ||
106 | kfree(buf_to_free->user_name); | 103 | kfree(buf_to_free->user_name); |
107 | kfree(buf_to_free->domainName); | 104 | kfree(buf_to_free->domainName); |
108 | kfree(buf_to_free->auth_key.response); | 105 | kzfree(buf_to_free->auth_key.response); |
109 | kfree(buf_to_free); | 106 | kzfree(buf_to_free); |
110 | } | 107 | } |
111 | 108 | ||
112 | struct cifs_tcon * | 109 | struct cifs_tcon * |
@@ -137,10 +134,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free) | |||
137 | } | 134 | } |
138 | atomic_dec(&tconInfoAllocCount); | 135 | atomic_dec(&tconInfoAllocCount); |
139 | kfree(buf_to_free->nativeFileSystem); | 136 | kfree(buf_to_free->nativeFileSystem); |
140 | if (buf_to_free->password) { | 137 | kzfree(buf_to_free->password); |
141 | memset(buf_to_free->password, 0, strlen(buf_to_free->password)); | ||
142 | kfree(buf_to_free->password); | ||
143 | } | ||
144 | kfree(buf_to_free); | 138 | kfree(buf_to_free); |
145 | } | 139 | } |
146 | 140 | ||
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index f2ff60e58ec8..84614a5edb87 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -580,8 +580,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
580 | } | 580 | } |
581 | 581 | ||
582 | /* check validate negotiate info response matches what we got earlier */ | 582 | /* check validate negotiate info response matches what we got earlier */ |
583 | if (pneg_rsp->Dialect != | 583 | if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect)) |
584 | cpu_to_le16(tcon->ses->server->vals->protocol_id)) | ||
585 | goto vneg_out; | 584 | goto vneg_out; |
586 | 585 | ||
587 | if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode)) | 586 | if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode)) |
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c index d6aeb84e90b6..d882d873c5a3 100644 --- a/fs/ext2/acl.c +++ b/fs/ext2/acl.c | |||
@@ -178,11 +178,8 @@ ext2_get_acl(struct inode *inode, int type) | |||
178 | return acl; | 178 | return acl; |
179 | } | 179 | } |
180 | 180 | ||
181 | /* | 181 | static int |
182 | * inode->i_mutex: down | 182 | __ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
183 | */ | ||
184 | int | ||
185 | ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) | ||
186 | { | 183 | { |
187 | int name_index; | 184 | int name_index; |
188 | void *value = NULL; | 185 | void *value = NULL; |
@@ -192,13 +189,6 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) | |||
192 | switch(type) { | 189 | switch(type) { |
193 | case ACL_TYPE_ACCESS: | 190 | case ACL_TYPE_ACCESS: |
194 | name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; | 191 | name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; |
195 | if (acl) { | ||
196 | error = posix_acl_update_mode(inode, &inode->i_mode, &acl); | ||
197 | if (error) | ||
198 | return error; | ||
199 | inode->i_ctime = CURRENT_TIME_SEC; | ||
200 | mark_inode_dirty(inode); | ||
201 | } | ||
202 | break; | 192 | break; |
203 | 193 | ||
204 | case ACL_TYPE_DEFAULT: | 194 | case ACL_TYPE_DEFAULT: |
@@ -225,6 +215,24 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) | |||
225 | } | 215 | } |
226 | 216 | ||
227 | /* | 217 | /* |
218 | * inode->i_mutex: down | ||
219 | */ | ||
220 | int | ||
221 | ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) | ||
222 | { | ||
223 | int error; | ||
224 | |||
225 | if (type == ACL_TYPE_ACCESS && acl) { | ||
226 | error = posix_acl_update_mode(inode, &inode->i_mode, &acl); | ||
227 | if (error) | ||
228 | return error; | ||
229 | inode->i_ctime = CURRENT_TIME_SEC; | ||
230 | mark_inode_dirty(inode); | ||
231 | } | ||
232 | return __ext2_set_acl(inode, acl, type); | ||
233 | } | ||
234 | |||
235 | /* | ||
228 | * Initialize the ACLs of a new inode. Called from ext2_new_inode. | 236 | * Initialize the ACLs of a new inode. Called from ext2_new_inode. |
229 | * | 237 | * |
230 | * dir->i_mutex: down | 238 | * dir->i_mutex: down |
@@ -241,12 +249,12 @@ ext2_init_acl(struct inode *inode, struct inode *dir) | |||
241 | return error; | 249 | return error; |
242 | 250 | ||
243 | if (default_acl) { | 251 | if (default_acl) { |
244 | error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); | 252 | error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); |
245 | posix_acl_release(default_acl); | 253 | posix_acl_release(default_acl); |
246 | } | 254 | } |
247 | if (acl) { | 255 | if (acl) { |
248 | if (!error) | 256 | if (!error) |
249 | error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS); | 257 | error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS); |
250 | posix_acl_release(acl); | 258 | posix_acl_release(acl); |
251 | } | 259 | } |
252 | return error; | 260 | return error; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 530e790f22e0..19f08e6b20a7 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -4471,6 +4471,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
4471 | inode->i_op = &ext4_symlink_inode_operations; | 4471 | inode->i_op = &ext4_symlink_inode_operations; |
4472 | ext4_set_aops(inode); | 4472 | ext4_set_aops(inode); |
4473 | } | 4473 | } |
4474 | inode_nohighmem(inode); | ||
4474 | } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || | 4475 | } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || |
4475 | S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { | 4476 | S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { |
4476 | inode->i_op = &ext4_special_inode_operations; | 4477 | inode->i_op = &ext4_special_inode_operations; |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 4c36dca486cc..32960b3ecd4f 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -3151,6 +3151,7 @@ static int ext4_symlink(struct inode *dir, | |||
3151 | if ((disk_link.len > EXT4_N_BLOCKS * 4)) { | 3151 | if ((disk_link.len > EXT4_N_BLOCKS * 4)) { |
3152 | if (!encryption_required) | 3152 | if (!encryption_required) |
3153 | inode->i_op = &ext4_symlink_inode_operations; | 3153 | inode->i_op = &ext4_symlink_inode_operations; |
3154 | inode_nohighmem(inode); | ||
3154 | ext4_set_aops(inode); | 3155 | ext4_set_aops(inode); |
3155 | /* | 3156 | /* |
3156 | * We cannot call page_symlink() with transaction started | 3157 | * We cannot call page_symlink() with transaction started |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index d3cbdbc8ad33..bc79e2ca4adb 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -688,6 +688,7 @@ __acquires(bitlock) | |||
688 | } | 688 | } |
689 | 689 | ||
690 | ext4_unlock_group(sb, grp); | 690 | ext4_unlock_group(sb, grp); |
691 | ext4_commit_super(sb, 1); | ||
691 | ext4_handle_error(sb); | 692 | ext4_handle_error(sb); |
692 | /* | 693 | /* |
693 | * We only get here in the ERRORS_RO case; relocking the group | 694 | * We only get here in the ERRORS_RO case; relocking the group |
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c index e8e7af62ac95..287c3980fa0b 100644 --- a/fs/ext4/symlink.c +++ b/fs/ext4/symlink.c | |||
@@ -45,7 +45,7 @@ static const char *ext4_encrypted_follow_link(struct dentry *dentry, void **cook | |||
45 | cpage = read_mapping_page(inode->i_mapping, 0, NULL); | 45 | cpage = read_mapping_page(inode->i_mapping, 0, NULL); |
46 | if (IS_ERR(cpage)) | 46 | if (IS_ERR(cpage)) |
47 | return ERR_CAST(cpage); | 47 | return ERR_CAST(cpage); |
48 | caddr = kmap(cpage); | 48 | caddr = page_address(cpage); |
49 | caddr[size] = 0; | 49 | caddr[size] = 0; |
50 | } | 50 | } |
51 | 51 | ||
@@ -75,16 +75,12 @@ static const char *ext4_encrypted_follow_link(struct dentry *dentry, void **cook | |||
75 | /* Null-terminate the name */ | 75 | /* Null-terminate the name */ |
76 | if (res <= plen) | 76 | if (res <= plen) |
77 | paddr[res] = '\0'; | 77 | paddr[res] = '\0'; |
78 | if (cpage) { | 78 | if (cpage) |
79 | kunmap(cpage); | ||
80 | page_cache_release(cpage); | 79 | page_cache_release(cpage); |
81 | } | ||
82 | return *cookie = paddr; | 80 | return *cookie = paddr; |
83 | errout: | 81 | errout: |
84 | if (cpage) { | 82 | if (cpage) |
85 | kunmap(cpage); | ||
86 | page_cache_release(cpage); | 83 | page_cache_release(cpage); |
87 | } | ||
88 | kfree(paddr); | 84 | kfree(paddr); |
89 | return ERR_PTR(res); | 85 | return ERR_PTR(res); |
90 | } | 86 | } |
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 97e20decacb4..5528801a5baf 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c | |||
@@ -202,6 +202,7 @@ make_now: | |||
202 | inode->i_op = &f2fs_encrypted_symlink_inode_operations; | 202 | inode->i_op = &f2fs_encrypted_symlink_inode_operations; |
203 | else | 203 | else |
204 | inode->i_op = &f2fs_symlink_inode_operations; | 204 | inode->i_op = &f2fs_symlink_inode_operations; |
205 | inode_nohighmem(inode); | ||
205 | inode->i_mapping->a_ops = &f2fs_dblock_aops; | 206 | inode->i_mapping->a_ops = &f2fs_dblock_aops; |
206 | } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || | 207 | } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || |
207 | S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { | 208 | S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { |
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 2c32110f9fc0..484df6850747 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c | |||
@@ -351,6 +351,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, | |||
351 | inode->i_op = &f2fs_encrypted_symlink_inode_operations; | 351 | inode->i_op = &f2fs_encrypted_symlink_inode_operations; |
352 | else | 352 | else |
353 | inode->i_op = &f2fs_symlink_inode_operations; | 353 | inode->i_op = &f2fs_symlink_inode_operations; |
354 | inode_nohighmem(inode); | ||
354 | inode->i_mapping->a_ops = &f2fs_dblock_aops; | 355 | inode->i_mapping->a_ops = &f2fs_dblock_aops; |
355 | 356 | ||
356 | f2fs_lock_op(sbi); | 357 | f2fs_lock_op(sbi); |
@@ -942,7 +943,7 @@ static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cook | |||
942 | cpage = read_mapping_page(inode->i_mapping, 0, NULL); | 943 | cpage = read_mapping_page(inode->i_mapping, 0, NULL); |
943 | if (IS_ERR(cpage)) | 944 | if (IS_ERR(cpage)) |
944 | return ERR_CAST(cpage); | 945 | return ERR_CAST(cpage); |
945 | caddr = kmap(cpage); | 946 | caddr = page_address(cpage); |
946 | caddr[size] = 0; | 947 | caddr[size] = 0; |
947 | 948 | ||
948 | /* Symlink is encrypted */ | 949 | /* Symlink is encrypted */ |
@@ -982,13 +983,11 @@ static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cook | |||
982 | /* Null-terminate the name */ | 983 | /* Null-terminate the name */ |
983 | paddr[res] = '\0'; | 984 | paddr[res] = '\0'; |
984 | 985 | ||
985 | kunmap(cpage); | ||
986 | page_cache_release(cpage); | 986 | page_cache_release(cpage); |
987 | return *cookie = paddr; | 987 | return *cookie = paddr; |
988 | errout: | 988 | errout: |
989 | kfree(cstr.name); | 989 | kfree(cstr.name); |
990 | f2fs_fname_crypto_free_buffer(&pstr); | 990 | f2fs_fname_crypto_free_buffer(&pstr); |
991 | kunmap(cpage); | ||
992 | page_cache_release(cpage); | 991 | page_cache_release(cpage); |
993 | return ERR_PTR(res); | 992 | return ERR_PTR(res); |
994 | } | 993 | } |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 62376451bbce..5df914943d96 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -113,6 +113,10 @@ void f_setown(struct file *filp, unsigned long arg, int force) | |||
113 | int who = arg; | 113 | int who = arg; |
114 | type = PIDTYPE_PID; | 114 | type = PIDTYPE_PID; |
115 | if (who < 0) { | 115 | if (who < 0) { |
116 | /* avoid overflow below */ | ||
117 | if (who == INT_MIN) | ||
118 | return; | ||
119 | |||
116 | type = PIDTYPE_PGID; | 120 | type = PIDTYPE_PGID; |
117 | who = -who; | 121 | who = -who; |
118 | } | 122 | } |
diff --git a/fs/inode.c b/fs/inode.c index 6a7234f0afea..bd16497b3bba 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -2034,3 +2034,9 @@ void inode_set_flags(struct inode *inode, unsigned int flags, | |||
2034 | new_flags) != old_flags)); | 2034 | new_flags) != old_flags)); |
2035 | } | 2035 | } |
2036 | EXPORT_SYMBOL(inode_set_flags); | 2036 | EXPORT_SYMBOL(inode_set_flags); |
2037 | |||
2038 | void inode_nohighmem(struct inode *inode) | ||
2039 | { | ||
2040 | mapping_set_gfp_mask(inode->i_mapping, GFP_USER); | ||
2041 | } | ||
2042 | EXPORT_SYMBOL(inode_nohighmem); | ||
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 6e9a912d394c..6875bd5d35f6 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c | |||
@@ -272,7 +272,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, | |||
272 | { | 272 | { |
273 | struct kernfs_open_file *of = kernfs_of(file); | 273 | struct kernfs_open_file *of = kernfs_of(file); |
274 | const struct kernfs_ops *ops; | 274 | const struct kernfs_ops *ops; |
275 | size_t len; | 275 | ssize_t len; |
276 | char *buf; | 276 | char *buf; |
277 | 277 | ||
278 | if (of->atomic_write_len) { | 278 | if (of->atomic_write_len) { |
diff --git a/fs/locks.c b/fs/locks.c index 8eddae23e10b..b515e65f1376 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -2220,10 +2220,12 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, | |||
2220 | error = do_lock_file_wait(filp, cmd, file_lock); | 2220 | error = do_lock_file_wait(filp, cmd, file_lock); |
2221 | 2221 | ||
2222 | /* | 2222 | /* |
2223 | * Attempt to detect a close/fcntl race and recover by | 2223 | * Attempt to detect a close/fcntl race and recover by releasing the |
2224 | * releasing the lock that was just acquired. | 2224 | * lock that was just acquired. There is no need to do that when we're |
2225 | * unlocking though, or for OFD locks. | ||
2225 | */ | 2226 | */ |
2226 | if (!error && file_lock->fl_type != F_UNLCK) { | 2227 | if (!error && file_lock->fl_type != F_UNLCK && |
2228 | !(file_lock->fl_flags & FL_OFDLCK)) { | ||
2227 | /* | 2229 | /* |
2228 | * We need that spin_lock here - it prevents reordering between | 2230 | * We need that spin_lock here - it prevents reordering between |
2229 | * update of i_flctx->flc_posix and check for it done in | 2231 | * update of i_flctx->flc_posix and check for it done in |
@@ -2362,10 +2364,12 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, | |||
2362 | error = do_lock_file_wait(filp, cmd, file_lock); | 2364 | error = do_lock_file_wait(filp, cmd, file_lock); |
2363 | 2365 | ||
2364 | /* | 2366 | /* |
2365 | * Attempt to detect a close/fcntl race and recover by | 2367 | * Attempt to detect a close/fcntl race and recover by releasing the |
2366 | * releasing the lock that was just acquired. | 2368 | * lock that was just acquired. There is no need to do that when we're |
2369 | * unlocking though, or for OFD locks. | ||
2367 | */ | 2370 | */ |
2368 | if (!error && file_lock->fl_type != F_UNLCK) { | 2371 | if (!error && file_lock->fl_type != F_UNLCK && |
2372 | !(file_lock->fl_flags & FL_OFDLCK)) { | ||
2369 | /* | 2373 | /* |
2370 | * We need that spin_lock here - it prevents reordering between | 2374 | * We need that spin_lock here - it prevents reordering between |
2371 | * update of i_flctx->flc_posix and check for it done in | 2375 | * update of i_flctx->flc_posix and check for it done in |
diff --git a/fs/namei.c b/fs/namei.c index f8eeea956503..c54aaa759ed1 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -2015,6 +2015,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags) | |||
2015 | int retval = 0; | 2015 | int retval = 0; |
2016 | const char *s = nd->name->name; | 2016 | const char *s = nd->name->name; |
2017 | 2017 | ||
2018 | if (!*s) | ||
2019 | flags &= ~LOOKUP_RCU; | ||
2020 | |||
2018 | nd->last_type = LAST_ROOT; /* if there are only slashes... */ | 2021 | nd->last_type = LAST_ROOT; /* if there are only slashes... */ |
2019 | nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; | 2022 | nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; |
2020 | nd->depth = 0; | 2023 | nd->depth = 0; |
diff --git a/fs/namespace.c b/fs/namespace.c index 15b91b36ecab..7e14bf1c851c 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -227,6 +227,7 @@ static struct mount *alloc_vfsmnt(const char *name) | |||
227 | mnt->mnt_count = 1; | 227 | mnt->mnt_count = 1; |
228 | mnt->mnt_writers = 0; | 228 | mnt->mnt_writers = 0; |
229 | #endif | 229 | #endif |
230 | mnt->mnt.data = NULL; | ||
230 | 231 | ||
231 | INIT_HLIST_NODE(&mnt->mnt_hash); | 232 | INIT_HLIST_NODE(&mnt->mnt_hash); |
232 | INIT_LIST_HEAD(&mnt->mnt_child); | 233 | INIT_LIST_HEAD(&mnt->mnt_child); |
@@ -976,7 +977,6 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void | |||
976 | if (!mnt) | 977 | if (!mnt) |
977 | return ERR_PTR(-ENOMEM); | 978 | return ERR_PTR(-ENOMEM); |
978 | 979 | ||
979 | mnt->mnt.data = NULL; | ||
980 | if (type->alloc_mnt_data) { | 980 | if (type->alloc_mnt_data) { |
981 | mnt->mnt.data = type->alloc_mnt_data(); | 981 | mnt->mnt.data = type->alloc_mnt_data(); |
982 | if (!mnt->mnt.data) { | 982 | if (!mnt->mnt.data) { |
@@ -990,7 +990,6 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void | |||
990 | 990 | ||
991 | root = mount_fs(type, flags, name, &mnt->mnt, data); | 991 | root = mount_fs(type, flags, name, &mnt->mnt, data); |
992 | if (IS_ERR(root)) { | 992 | if (IS_ERR(root)) { |
993 | kfree(mnt->mnt.data); | ||
994 | mnt_free_id(mnt); | 993 | mnt_free_id(mnt); |
995 | free_vfsmnt(mnt); | 994 | free_vfsmnt(mnt); |
996 | return ERR_CAST(root); | 995 | return ERR_CAST(root); |
@@ -1094,7 +1093,6 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, | |||
1094 | return mnt; | 1093 | return mnt; |
1095 | 1094 | ||
1096 | out_free: | 1095 | out_free: |
1097 | kfree(mnt->mnt.data); | ||
1098 | mnt_free_id(mnt); | 1096 | mnt_free_id(mnt); |
1099 | free_vfsmnt(mnt); | 1097 | free_vfsmnt(mnt); |
1100 | return ERR_PTR(err); | 1098 | return ERR_PTR(err); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 4b1d08f56aba..5fd3cf54b2b3 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -787,10 +787,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |||
787 | 787 | ||
788 | spin_lock(&dreq->lock); | 788 | spin_lock(&dreq->lock); |
789 | 789 | ||
790 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { | 790 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) |
791 | dreq->flags = 0; | ||
792 | dreq->error = hdr->error; | 791 | dreq->error = hdr->error; |
793 | } | ||
794 | if (dreq->error == 0) { | 792 | if (dreq->error == 0) { |
795 | nfs_direct_good_bytes(dreq, hdr); | 793 | nfs_direct_good_bytes(dreq, hdr); |
796 | if (nfs_write_need_commit(hdr)) { | 794 | if (nfs_write_need_commit(hdr)) { |
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 5ba22c6b0ffa..1ee62e62ea76 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c | |||
@@ -567,9 +567,13 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, | |||
567 | struct idmap_msg *im; | 567 | struct idmap_msg *im; |
568 | struct idmap *idmap = (struct idmap *)aux; | 568 | struct idmap *idmap = (struct idmap *)aux; |
569 | struct key *key = cons->key; | 569 | struct key *key = cons->key; |
570 | int ret = -ENOMEM; | 570 | int ret = -ENOKEY; |
571 | |||
572 | if (!aux) | ||
573 | goto out1; | ||
571 | 574 | ||
572 | /* msg and im are freed in idmap_pipe_destroy_msg */ | 575 | /* msg and im are freed in idmap_pipe_destroy_msg */ |
576 | ret = -ENOMEM; | ||
573 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 577 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
574 | if (!data) | 578 | if (!data) |
575 | goto out1; | 579 | goto out1; |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 7af7bedd7c02..c8e75e5e6a67 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -1943,7 +1943,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, | |||
1943 | nfs_pageio_reset_write_mds(desc); | 1943 | nfs_pageio_reset_write_mds(desc); |
1944 | mirror->pg_recoalesce = 1; | 1944 | mirror->pg_recoalesce = 1; |
1945 | } | 1945 | } |
1946 | hdr->release(hdr); | 1946 | hdr->completion_ops->completion(hdr); |
1947 | } | 1947 | } |
1948 | 1948 | ||
1949 | static enum pnfs_try_status | 1949 | static enum pnfs_try_status |
@@ -2058,7 +2058,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, | |||
2058 | nfs_pageio_reset_read_mds(desc); | 2058 | nfs_pageio_reset_read_mds(desc); |
2059 | mirror->pg_recoalesce = 1; | 2059 | mirror->pg_recoalesce = 1; |
2060 | } | 2060 | } |
2061 | hdr->release(hdr); | 2061 | hdr->completion_ops->completion(hdr); |
2062 | } | 2062 | } |
2063 | 2063 | ||
2064 | /* | 2064 | /* |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 7a9b6e347249..6e81a5b5858e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1746,6 +1746,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) | |||
1746 | set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); | 1746 | set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); |
1747 | next: | 1747 | next: |
1748 | nfs_unlock_and_release_request(req); | 1748 | nfs_unlock_and_release_request(req); |
1749 | /* Latency breaker */ | ||
1750 | cond_resched(); | ||
1749 | } | 1751 | } |
1750 | nfss = NFS_SERVER(data->inode); | 1752 | nfss = NFS_SERVER(data->inode); |
1751 | if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) | 1753 | if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) |
diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c index fd8c9a5bcac4..77d136ac8909 100644 --- a/fs/nfs_common/grace.c +++ b/fs/nfs_common/grace.c | |||
@@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm) | |||
30 | struct list_head *grace_list = net_generic(net, grace_net_id); | 30 | struct list_head *grace_list = net_generic(net, grace_net_id); |
31 | 31 | ||
32 | spin_lock(&grace_lock); | 32 | spin_lock(&grace_lock); |
33 | list_add(&lm->list, grace_list); | 33 | if (list_empty(&lm->list)) |
34 | list_add(&lm->list, grace_list); | ||
35 | else | ||
36 | WARN(1, "double list_add attempt detected in net %x %s\n", | ||
37 | net->ns.inum, (net == &init_net) ? "(init_net)" : ""); | ||
34 | spin_unlock(&grace_lock); | 38 | spin_unlock(&grace_lock); |
35 | } | 39 | } |
36 | EXPORT_SYMBOL_GPL(locks_start_grace); | 40 | EXPORT_SYMBOL_GPL(locks_start_grace); |
@@ -104,7 +108,9 @@ grace_exit_net(struct net *net) | |||
104 | { | 108 | { |
105 | struct list_head *grace_list = net_generic(net, grace_net_id); | 109 | struct list_head *grace_list = net_generic(net, grace_net_id); |
106 | 110 | ||
107 | BUG_ON(!list_empty(grace_list)); | 111 | WARN_ONCE(!list_empty(grace_list), |
112 | "net %x %s: grace_list is not empty\n", | ||
113 | net->ns.inum, __func__); | ||
108 | } | 114 | } |
109 | 115 | ||
110 | static struct pernet_operations grace_net_ops = { | 116 | static struct pernet_operations grace_net_ops = { |
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index 9d46a0bdd9f9..67eb154af881 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c | |||
@@ -59,7 +59,11 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) | |||
59 | GROUP_AT(gi, i) = exp->ex_anon_gid; | 59 | GROUP_AT(gi, i) = exp->ex_anon_gid; |
60 | else | 60 | else |
61 | GROUP_AT(gi, i) = GROUP_AT(rqgi, i); | 61 | GROUP_AT(gi, i) = GROUP_AT(rqgi, i); |
62 | |||
62 | } | 63 | } |
64 | |||
65 | /* Each thread allocates its own gi, no race */ | ||
66 | groups_sort(gi); | ||
63 | } else { | 67 | } else { |
64 | gi = get_group_info(rqgi); | 68 | gi = get_group_info(rqgi); |
65 | } | 69 | } |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 11c67e8b939d..ba27a5ff8677 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = { | |||
63 | static const stateid_t currentstateid = { | 63 | static const stateid_t currentstateid = { |
64 | .si_generation = 1, | 64 | .si_generation = 1, |
65 | }; | 65 | }; |
66 | static const stateid_t close_stateid = { | ||
67 | .si_generation = 0xffffffffU, | ||
68 | }; | ||
66 | 69 | ||
67 | static u64 current_sessionid = 1; | 70 | static u64 current_sessionid = 1; |
68 | 71 | ||
69 | #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) | 72 | #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) |
70 | #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) | 73 | #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) |
71 | #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) | 74 | #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) |
75 | #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t))) | ||
72 | 76 | ||
73 | /* forward declarations */ | 77 | /* forward declarations */ |
74 | static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); | 78 | static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); |
@@ -4701,7 +4705,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) | |||
4701 | struct nfs4_stid *s; | 4705 | struct nfs4_stid *s; |
4702 | __be32 status = nfserr_bad_stateid; | 4706 | __be32 status = nfserr_bad_stateid; |
4703 | 4707 | ||
4704 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) | 4708 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || |
4709 | CLOSE_STATEID(stateid)) | ||
4705 | return status; | 4710 | return status; |
4706 | /* Client debugging aid. */ | 4711 | /* Client debugging aid. */ |
4707 | if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { | 4712 | if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { |
@@ -4759,7 +4764,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, | |||
4759 | else if (typemask & NFS4_DELEG_STID) | 4764 | else if (typemask & NFS4_DELEG_STID) |
4760 | typemask |= NFS4_REVOKED_DELEG_STID; | 4765 | typemask |= NFS4_REVOKED_DELEG_STID; |
4761 | 4766 | ||
4762 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) | 4767 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || |
4768 | CLOSE_STATEID(stateid)) | ||
4763 | return nfserr_bad_stateid; | 4769 | return nfserr_bad_stateid; |
4764 | status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); | 4770 | status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); |
4765 | if (status == nfserr_stale_clientid) { | 4771 | if (status == nfserr_stale_clientid) { |
@@ -5011,15 +5017,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_ | |||
5011 | status = nfsd4_check_seqid(cstate, sop, seqid); | 5017 | status = nfsd4_check_seqid(cstate, sop, seqid); |
5012 | if (status) | 5018 | if (status) |
5013 | return status; | 5019 | return status; |
5014 | if (stp->st_stid.sc_type == NFS4_CLOSED_STID | 5020 | status = nfsd4_lock_ol_stateid(stp); |
5015 | || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID) | 5021 | if (status != nfs_ok) |
5016 | /* | 5022 | return status; |
5017 | * "Closed" stateid's exist *only* to return | ||
5018 | * nfserr_replay_me from the previous step, and | ||
5019 | * revoked delegations are kept only for free_stateid. | ||
5020 | */ | ||
5021 | return nfserr_bad_stateid; | ||
5022 | mutex_lock(&stp->st_mutex); | ||
5023 | status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); | 5023 | status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); |
5024 | if (status == nfs_ok) | 5024 | if (status == nfs_ok) |
5025 | status = nfs4_check_fh(current_fh, &stp->st_stid); | 5025 | status = nfs4_check_fh(current_fh, &stp->st_stid); |
@@ -5243,6 +5243,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
5243 | nfsd4_close_open_stateid(stp); | 5243 | nfsd4_close_open_stateid(stp); |
5244 | mutex_unlock(&stp->st_mutex); | 5244 | mutex_unlock(&stp->st_mutex); |
5245 | 5245 | ||
5246 | /* See RFC5661 sectionm 18.2.4 */ | ||
5247 | if (stp->st_stid.sc_client->cl_minorversion) | ||
5248 | memcpy(&close->cl_stateid, &close_stateid, | ||
5249 | sizeof(close->cl_stateid)); | ||
5250 | |||
5246 | /* put reference from nfs4_preprocess_seqid_op */ | 5251 | /* put reference from nfs4_preprocess_seqid_op */ |
5247 | nfs4_put_stid(&stp->st_stid); | 5252 | nfs4_put_stid(&stp->st_stid); |
5248 | out: | 5253 | out: |
@@ -6787,6 +6792,10 @@ static int nfs4_state_create_net(struct net *net) | |||
6787 | INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); | 6792 | INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); |
6788 | nn->conf_name_tree = RB_ROOT; | 6793 | nn->conf_name_tree = RB_ROOT; |
6789 | nn->unconf_name_tree = RB_ROOT; | 6794 | nn->unconf_name_tree = RB_ROOT; |
6795 | nn->boot_time = get_seconds(); | ||
6796 | nn->grace_ended = false; | ||
6797 | nn->nfsd4_manager.block_opens = true; | ||
6798 | INIT_LIST_HEAD(&nn->nfsd4_manager.list); | ||
6790 | INIT_LIST_HEAD(&nn->client_lru); | 6799 | INIT_LIST_HEAD(&nn->client_lru); |
6791 | INIT_LIST_HEAD(&nn->close_lru); | 6800 | INIT_LIST_HEAD(&nn->close_lru); |
6792 | INIT_LIST_HEAD(&nn->del_recall_lru); | 6801 | INIT_LIST_HEAD(&nn->del_recall_lru); |
@@ -6841,9 +6850,6 @@ nfs4_state_start_net(struct net *net) | |||
6841 | ret = nfs4_state_create_net(net); | 6850 | ret = nfs4_state_create_net(net); |
6842 | if (ret) | 6851 | if (ret) |
6843 | return ret; | 6852 | return ret; |
6844 | nn->boot_time = get_seconds(); | ||
6845 | nn->grace_ended = false; | ||
6846 | nn->nfsd4_manager.block_opens = true; | ||
6847 | locks_start_grace(net, &nn->nfsd4_manager); | 6853 | locks_start_grace(net, &nn->nfsd4_manager); |
6848 | nfsd4_client_tracking_init(net); | 6854 | nfsd4_client_tracking_init(net); |
6849 | printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", | 6855 | printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", |
@@ -95,6 +95,7 @@ slow: | |||
95 | return ERR_PTR(-ENOMEM); | 95 | return ERR_PTR(-ENOMEM); |
96 | } | 96 | } |
97 | d_instantiate(dentry, inode); | 97 | d_instantiate(dentry, inode); |
98 | dentry->d_flags |= DCACHE_RCUACCESS; | ||
98 | dentry->d_fsdata = (void *)ns_ops; | 99 | dentry->d_fsdata = (void *)ns_ops; |
99 | d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry); | 100 | d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry); |
100 | if (d) { | 101 | if (d) { |
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index adcb1398c481..299a6e1d6b77 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c | |||
@@ -441,10 +441,14 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, | |||
441 | struct dentry *dentry = file->f_path.dentry; | 441 | struct dentry *dentry = file->f_path.dentry; |
442 | struct file *realfile = od->realfile; | 442 | struct file *realfile = od->realfile; |
443 | 443 | ||
444 | /* Nothing to sync for lower */ | ||
445 | if (!OVL_TYPE_UPPER(ovl_path_type(dentry))) | ||
446 | return 0; | ||
447 | |||
444 | /* | 448 | /* |
445 | * Need to check if we started out being a lower dir, but got copied up | 449 | * Need to check if we started out being a lower dir, but got copied up |
446 | */ | 450 | */ |
447 | if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { | 451 | if (!od->is_upper) { |
448 | struct inode *inode = file_inode(file); | 452 | struct inode *inode = file_inode(file); |
449 | 453 | ||
450 | realfile = lockless_dereference(od->upperfile); | 454 | realfile = lockless_dereference(od->upperfile); |
@@ -1001,6 +1001,9 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) | |||
1001 | { | 1001 | { |
1002 | struct pipe_buffer *bufs; | 1002 | struct pipe_buffer *bufs; |
1003 | 1003 | ||
1004 | if (!nr_pages) | ||
1005 | return -EINVAL; | ||
1006 | |||
1004 | /* | 1007 | /* |
1005 | * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't | 1008 | * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't |
1006 | * expect a lot of shrink+grow operations, just free and allocate | 1009 | * expect a lot of shrink+grow operations, just free and allocate |
@@ -1045,13 +1048,19 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) | |||
1045 | 1048 | ||
1046 | /* | 1049 | /* |
1047 | * Currently we rely on the pipe array holding a power-of-2 number | 1050 | * Currently we rely on the pipe array holding a power-of-2 number |
1048 | * of pages. | 1051 | * of pages. Returns 0 on error. |
1049 | */ | 1052 | */ |
1050 | static inline unsigned int round_pipe_size(unsigned int size) | 1053 | static inline unsigned int round_pipe_size(unsigned int size) |
1051 | { | 1054 | { |
1052 | unsigned long nr_pages; | 1055 | unsigned long nr_pages; |
1053 | 1056 | ||
1057 | if (size < pipe_min_size) | ||
1058 | size = pipe_min_size; | ||
1059 | |||
1054 | nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1060 | nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1061 | if (nr_pages == 0) | ||
1062 | return 0; | ||
1063 | |||
1055 | return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; | 1064 | return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; |
1056 | } | 1065 | } |
1057 | 1066 | ||
@@ -1062,13 +1071,18 @@ static inline unsigned int round_pipe_size(unsigned int size) | |||
1062 | int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, | 1071 | int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, |
1063 | size_t *lenp, loff_t *ppos) | 1072 | size_t *lenp, loff_t *ppos) |
1064 | { | 1073 | { |
1074 | unsigned int rounded_pipe_max_size; | ||
1065 | int ret; | 1075 | int ret; |
1066 | 1076 | ||
1067 | ret = proc_dointvec_minmax(table, write, buf, lenp, ppos); | 1077 | ret = proc_dointvec_minmax(table, write, buf, lenp, ppos); |
1068 | if (ret < 0 || !write) | 1078 | if (ret < 0 || !write) |
1069 | return ret; | 1079 | return ret; |
1070 | 1080 | ||
1071 | pipe_max_size = round_pipe_size(pipe_max_size); | 1081 | rounded_pipe_max_size = round_pipe_size(pipe_max_size); |
1082 | if (rounded_pipe_max_size == 0) | ||
1083 | return -EINVAL; | ||
1084 | |||
1085 | pipe_max_size = rounded_pipe_max_size; | ||
1072 | return ret; | 1086 | return ret; |
1073 | } | 1087 | } |
1074 | 1088 | ||
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 353ff31dcee1..1cb1d02c5937 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -2919,7 +2919,8 @@ static int __init dquot_init(void) | |||
2919 | pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld," | 2919 | pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld," |
2920 | " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); | 2920 | " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); |
2921 | 2921 | ||
2922 | register_shrinker(&dqcache_shrinker); | 2922 | if (register_shrinker(&dqcache_shrinker)) |
2923 | panic("Cannot register dquot shrinker"); | ||
2923 | 2924 | ||
2924 | return 0; | 2925 | return 0; |
2925 | } | 2926 | } |
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c index dc198bc64c61..edc8ef78b63f 100644 --- a/fs/reiserfs/bitmap.c +++ b/fs/reiserfs/bitmap.c | |||
@@ -513,9 +513,17 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th, | |||
513 | "inode has negative prealloc blocks count."); | 513 | "inode has negative prealloc blocks count."); |
514 | #endif | 514 | #endif |
515 | while (ei->i_prealloc_count > 0) { | 515 | while (ei->i_prealloc_count > 0) { |
516 | reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block); | 516 | b_blocknr_t block_to_free; |
517 | ei->i_prealloc_block++; | 517 | |
518 | /* | ||
519 | * reiserfs_free_prealloc_block can drop the write lock, | ||
520 | * which could allow another caller to free the same block. | ||
521 | * We can protect against it by modifying the prealloc | ||
522 | * state before calling it. | ||
523 | */ | ||
524 | block_to_free = ei->i_prealloc_block++; | ||
518 | ei->i_prealloc_count--; | 525 | ei->i_prealloc_count--; |
526 | reiserfs_free_prealloc_block(th, inode, block_to_free); | ||
519 | dirty = 1; | 527 | dirty = 1; |
520 | } | 528 | } |
521 | if (dirty) | 529 | if (dirty) |
@@ -1128,7 +1136,7 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint) | |||
1128 | hint->prealloc_size = 0; | 1136 | hint->prealloc_size = 0; |
1129 | 1137 | ||
1130 | if (!hint->formatted_node && hint->preallocate) { | 1138 | if (!hint->formatted_node && hint->preallocate) { |
1131 | if (S_ISREG(hint->inode->i_mode) | 1139 | if (S_ISREG(hint->inode->i_mode) && !IS_PRIVATE(hint->inode) |
1132 | && hint->inode->i_size >= | 1140 | && hint->inode->i_size >= |
1133 | REISERFS_SB(hint->th->t_super)->s_alloc_options. | 1141 | REISERFS_SB(hint->th->t_super)->s_alloc_options. |
1134 | preallocmin * hint->inode->i_sb->s_blocksize) | 1142 | preallocmin * hint->inode->i_sb->s_blocksize) |
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index 9b1824f35501..91b036902a17 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c | |||
@@ -37,7 +37,14 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) | |||
37 | error = journal_begin(&th, inode->i_sb, jcreate_blocks); | 37 | error = journal_begin(&th, inode->i_sb, jcreate_blocks); |
38 | reiserfs_write_unlock(inode->i_sb); | 38 | reiserfs_write_unlock(inode->i_sb); |
39 | if (error == 0) { | 39 | if (error == 0) { |
40 | if (type == ACL_TYPE_ACCESS && acl) { | ||
41 | error = posix_acl_update_mode(inode, &inode->i_mode, | ||
42 | &acl); | ||
43 | if (error) | ||
44 | goto unlock; | ||
45 | } | ||
40 | error = __reiserfs_set_acl(&th, inode, type, acl); | 46 | error = __reiserfs_set_acl(&th, inode, type, acl); |
47 | unlock: | ||
41 | reiserfs_write_lock(inode->i_sb); | 48 | reiserfs_write_lock(inode->i_sb); |
42 | error2 = journal_end(&th); | 49 | error2 = journal_end(&th); |
43 | reiserfs_write_unlock(inode->i_sb); | 50 | reiserfs_write_unlock(inode->i_sb); |
@@ -245,11 +252,6 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, | |||
245 | switch (type) { | 252 | switch (type) { |
246 | case ACL_TYPE_ACCESS: | 253 | case ACL_TYPE_ACCESS: |
247 | name = POSIX_ACL_XATTR_ACCESS; | 254 | name = POSIX_ACL_XATTR_ACCESS; |
248 | if (acl) { | ||
249 | error = posix_acl_update_mode(inode, &inode->i_mode, &acl); | ||
250 | if (error) | ||
251 | return error; | ||
252 | } | ||
253 | break; | 255 | break; |
254 | case ACL_TYPE_DEFAULT: | 256 | case ACL_TYPE_DEFAULT: |
255 | name = POSIX_ACL_XATTR_DEFAULT; | 257 | name = POSIX_ACL_XATTR_DEFAULT; |
diff --git a/fs/select.c b/fs/select.c index 09e71a00a9b8..3d38808dbcb6 100644 --- a/fs/select.c +++ b/fs/select.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/sched/rt.h> | 29 | #include <linux/sched/rt.h> |
30 | #include <linux/freezer.h> | 30 | #include <linux/freezer.h> |
31 | #include <net/busy_poll.h> | 31 | #include <net/busy_poll.h> |
32 | #include <linux/vmalloc.h> | ||
32 | 33 | ||
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
34 | 35 | ||
@@ -550,7 +551,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, | |||
550 | fd_set_bits fds; | 551 | fd_set_bits fds; |
551 | void *bits; | 552 | void *bits; |
552 | int ret, max_fds; | 553 | int ret, max_fds; |
553 | unsigned int size; | 554 | size_t size, alloc_size; |
554 | struct fdtable *fdt; | 555 | struct fdtable *fdt; |
555 | /* Allocate small arguments on the stack to save memory and be faster */ | 556 | /* Allocate small arguments on the stack to save memory and be faster */ |
556 | long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; | 557 | long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; |
@@ -577,7 +578,14 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, | |||
577 | if (size > sizeof(stack_fds) / 6) { | 578 | if (size > sizeof(stack_fds) / 6) { |
578 | /* Not enough space in on-stack array; must use kmalloc */ | 579 | /* Not enough space in on-stack array; must use kmalloc */ |
579 | ret = -ENOMEM; | 580 | ret = -ENOMEM; |
580 | bits = kmalloc(6 * size, GFP_KERNEL); | 581 | if (size > (SIZE_MAX / 6)) |
582 | goto out_nofds; | ||
583 | |||
584 | alloc_size = 6 * size; | ||
585 | bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN); | ||
586 | if (!bits && alloc_size > PAGE_SIZE) | ||
587 | bits = vmalloc(alloc_size); | ||
588 | |||
581 | if (!bits) | 589 | if (!bits) |
582 | goto out_nofds; | 590 | goto out_nofds; |
583 | } | 591 | } |
@@ -614,7 +622,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, | |||
614 | 622 | ||
615 | out: | 623 | out: |
616 | if (bits != stack_fds) | 624 | if (bits != stack_fds) |
617 | kfree(bits); | 625 | kvfree(bits); |
618 | out_nofds: | 626 | out_nofds: |
619 | return ret; | 627 | return ret; |
620 | } | 628 | } |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index a9063ac50c4e..da72090b9ce7 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -310,7 +310,7 @@ xfs_map_blocks( | |||
310 | (ip->i_df.if_flags & XFS_IFEXTENTS)); | 310 | (ip->i_df.if_flags & XFS_IFEXTENTS)); |
311 | ASSERT(offset <= mp->m_super->s_maxbytes); | 311 | ASSERT(offset <= mp->m_super->s_maxbytes); |
312 | 312 | ||
313 | if (offset + count > mp->m_super->s_maxbytes) | 313 | if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes) |
314 | count = mp->m_super->s_maxbytes - offset; | 314 | count = mp->m_super->s_maxbytes - offset; |
315 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); | 315 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); |
316 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 316 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
@@ -1360,7 +1360,7 @@ xfs_map_trim_size( | |||
1360 | if (mapping_size > size) | 1360 | if (mapping_size > size) |
1361 | mapping_size = size; | 1361 | mapping_size = size; |
1362 | if (offset < i_size_read(inode) && | 1362 | if (offset < i_size_read(inode) && |
1363 | offset + mapping_size >= i_size_read(inode)) { | 1363 | (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) { |
1364 | /* limit mapping to block that spans EOF */ | 1364 | /* limit mapping to block that spans EOF */ |
1365 | mapping_size = roundup_64(i_size_read(inode) - offset, | 1365 | mapping_size = roundup_64(i_size_read(inode) - offset, |
1366 | i_blocksize(inode)); | 1366 | i_blocksize(inode)); |
@@ -1416,7 +1416,7 @@ __xfs_get_blocks( | |||
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | ASSERT(offset <= mp->m_super->s_maxbytes); | 1418 | ASSERT(offset <= mp->m_super->s_maxbytes); |
1419 | if (offset + size > mp->m_super->s_maxbytes) | 1419 | if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes) |
1420 | size = mp->m_super->s_maxbytes - offset; | 1420 | size = mp->m_super->s_maxbytes - offset; |
1421 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); | 1421 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); |
1422 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 1422 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h new file mode 100644 index 000000000000..df13637e4017 --- /dev/null +++ b/include/asm-generic/asm-prototypes.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #include <linux/bitops.h> | ||
2 | extern void *__memset(void *, int, __kernel_size_t); | ||
3 | extern void *__memcpy(void *, const void *, __kernel_size_t); | ||
4 | extern void *__memmove(void *, const void *, __kernel_size_t); | ||
5 | extern void *memset(void *, int, __kernel_size_t); | ||
6 | extern void *memcpy(void *, const void *, __kernel_size_t); | ||
7 | extern void *memmove(void *, const void *, __kernel_size_t); | ||
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h new file mode 100644 index 000000000000..43199a049da5 --- /dev/null +++ b/include/asm-generic/export.h | |||
@@ -0,0 +1,94 @@ | |||
1 | #ifndef __ASM_GENERIC_EXPORT_H | ||
2 | #define __ASM_GENERIC_EXPORT_H | ||
3 | |||
4 | #ifndef KSYM_FUNC | ||
5 | #define KSYM_FUNC(x) x | ||
6 | #endif | ||
7 | #ifdef CONFIG_64BIT | ||
8 | #define __put .quad | ||
9 | #ifndef KSYM_ALIGN | ||
10 | #define KSYM_ALIGN 8 | ||
11 | #endif | ||
12 | #ifndef KCRC_ALIGN | ||
13 | #define KCRC_ALIGN 8 | ||
14 | #endif | ||
15 | #else | ||
16 | #define __put .long | ||
17 | #ifndef KSYM_ALIGN | ||
18 | #define KSYM_ALIGN 4 | ||
19 | #endif | ||
20 | #ifndef KCRC_ALIGN | ||
21 | #define KCRC_ALIGN 4 | ||
22 | #endif | ||
23 | #endif | ||
24 | |||
25 | #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX | ||
26 | #define KSYM(name) _##name | ||
27 | #else | ||
28 | #define KSYM(name) name | ||
29 | #endif | ||
30 | |||
31 | /* | ||
32 | * note on .section use: @progbits vs %progbits nastiness doesn't matter, | ||
33 | * since we immediately emit into those sections anyway. | ||
34 | */ | ||
35 | .macro ___EXPORT_SYMBOL name,val,sec | ||
36 | #ifdef CONFIG_MODULES | ||
37 | .globl KSYM(__ksymtab_\name) | ||
38 | .section ___ksymtab\sec+\name,"a" | ||
39 | .balign KSYM_ALIGN | ||
40 | KSYM(__ksymtab_\name): | ||
41 | __put \val, KSYM(__kstrtab_\name) | ||
42 | .previous | ||
43 | .section __ksymtab_strings,"a" | ||
44 | KSYM(__kstrtab_\name): | ||
45 | #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX | ||
46 | .asciz "_\name" | ||
47 | #else | ||
48 | .asciz "\name" | ||
49 | #endif | ||
50 | .previous | ||
51 | #ifdef CONFIG_MODVERSIONS | ||
52 | .section ___kcrctab\sec+\name,"a" | ||
53 | .balign KCRC_ALIGN | ||
54 | KSYM(__kcrctab_\name): | ||
55 | __put KSYM(__crc_\name) | ||
56 | .weak KSYM(__crc_\name) | ||
57 | .previous | ||
58 | #endif | ||
59 | #endif | ||
60 | .endm | ||
61 | #undef __put | ||
62 | |||
63 | #if defined(__KSYM_DEPS__) | ||
64 | |||
65 | #define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym === | ||
66 | |||
67 | #elif defined(CONFIG_TRIM_UNUSED_KSYMS) | ||
68 | |||
69 | #include <linux/kconfig.h> | ||
70 | #include <generated/autoksyms.h> | ||
71 | |||
72 | #define __EXPORT_SYMBOL(sym, val, sec) \ | ||
73 | __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym)) | ||
74 | #define __cond_export_sym(sym, val, sec, conf) \ | ||
75 | ___cond_export_sym(sym, val, sec, conf) | ||
76 | #define ___cond_export_sym(sym, val, sec, enabled) \ | ||
77 | __cond_export_sym_##enabled(sym, val, sec) | ||
78 | #define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec | ||
79 | #define __cond_export_sym_0(sym, val, sec) /* nothing */ | ||
80 | |||
81 | #else | ||
82 | #define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec | ||
83 | #endif | ||
84 | |||
85 | #define EXPORT_SYMBOL(name) \ | ||
86 | __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),) | ||
87 | #define EXPORT_SYMBOL_GPL(name) \ | ||
88 | __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl) | ||
89 | #define EXPORT_DATA_SYMBOL(name) \ | ||
90 | __EXPORT_SYMBOL(name, KSYM(name),) | ||
91 | #define EXPORT_DATA_SYMBOL_GPL(name) \ | ||
92 | __EXPORT_SYMBOL(name, KSYM(name),_gpl) | ||
93 | |||
94 | #endif | ||
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index a65eedc15e93..265e4f1493c3 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -734,7 +734,14 @@ | |||
734 | */ | 734 | */ |
735 | #define PERCPU_INPUT(cacheline) \ | 735 | #define PERCPU_INPUT(cacheline) \ |
736 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 736 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
737 | VMLINUX_SYMBOL(__per_cpu_user_mapped_start) = .; \ | ||
737 | *(.data..percpu..first) \ | 738 | *(.data..percpu..first) \ |
739 | . = ALIGN(cacheline); \ | ||
740 | *(.data..percpu..user_mapped) \ | ||
741 | *(.data..percpu..user_mapped..shared_aligned) \ | ||
742 | . = ALIGN(PAGE_SIZE); \ | ||
743 | *(.data..percpu..user_mapped..page_aligned) \ | ||
744 | VMLINUX_SYMBOL(__per_cpu_user_mapped_end) = .; \ | ||
738 | . = ALIGN(PAGE_SIZE); \ | 745 | . = ALIGN(PAGE_SIZE); \ |
739 | *(.data..percpu..page_aligned) \ | 746 | *(.data..percpu..page_aligned) \ |
740 | . = ALIGN(cacheline); \ | 747 | . = ALIGN(cacheline); \ |
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 0ebdb4f2f0c8..90aa5cb7ea82 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h | |||
@@ -91,6 +91,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) | |||
91 | return alg->setkey != shash_no_setkey; | 91 | return alg->setkey != shash_no_setkey; |
92 | } | 92 | } |
93 | 93 | ||
94 | bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); | ||
95 | |||
94 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, | 96 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, |
95 | struct hash_alg_common *alg, | 97 | struct hash_alg_common *alg, |
96 | struct crypto_instance *inst); | 98 | struct crypto_instance *inst); |
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h index c23ee1f7ee80..c2ff077168d3 100644 --- a/include/crypto/mcryptd.h +++ b/include/crypto/mcryptd.h | |||
@@ -26,6 +26,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast( | |||
26 | 26 | ||
27 | struct mcryptd_cpu_queue { | 27 | struct mcryptd_cpu_queue { |
28 | struct crypto_queue queue; | 28 | struct crypto_queue queue; |
29 | spinlock_t q_lock; | ||
29 | struct work_struct work; | 30 | struct work_struct work; |
30 | }; | 31 | }; |
31 | 32 | ||
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index 894df59b74e4..d586f741cab5 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h | |||
@@ -30,8 +30,6 @@ struct poly1305_desc_ctx { | |||
30 | }; | 30 | }; |
31 | 31 | ||
32 | int crypto_poly1305_init(struct shash_desc *desc); | 32 | int crypto_poly1305_init(struct shash_desc *desc); |
33 | int crypto_poly1305_setkey(struct crypto_shash *tfm, | ||
34 | const u8 *key, unsigned int keylen); | ||
35 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, | 33 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, |
36 | const u8 *src, unsigned int srclen); | 34 | const u8 *src, unsigned int srclen); |
37 | int crypto_poly1305_update(struct shash_desc *desc, | 35 | int crypto_poly1305_update(struct shash_desc *desc, |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 4f6d29c8e3d8..132585a7fbd8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -31,16 +31,25 @@ struct bpf_map_ops { | |||
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct bpf_map { | 33 | struct bpf_map { |
34 | atomic_t refcnt; | 34 | /* 1st cacheline with read-mostly members of which some |
35 | * are also accessed in fast-path (e.g. ops, max_entries). | ||
36 | */ | ||
37 | const struct bpf_map_ops *ops ____cacheline_aligned; | ||
35 | enum bpf_map_type map_type; | 38 | enum bpf_map_type map_type; |
36 | u32 key_size; | 39 | u32 key_size; |
37 | u32 value_size; | 40 | u32 value_size; |
38 | u32 max_entries; | 41 | u32 max_entries; |
39 | u32 pages; | 42 | u32 pages; |
40 | struct user_struct *user; | 43 | bool unpriv_array; |
41 | const struct bpf_map_ops *ops; | 44 | /* 7 bytes hole */ |
42 | struct work_struct work; | 45 | |
46 | /* 2nd cacheline with misc members to avoid false sharing | ||
47 | * particularly with refcounting. | ||
48 | */ | ||
49 | struct user_struct *user ____cacheline_aligned; | ||
50 | atomic_t refcnt; | ||
43 | atomic_t usercnt; | 51 | atomic_t usercnt; |
52 | struct work_struct work; | ||
44 | }; | 53 | }; |
45 | 54 | ||
46 | struct bpf_map_type_list { | 55 | struct bpf_map_type_list { |
@@ -141,6 +150,7 @@ struct bpf_prog_aux { | |||
141 | struct bpf_array { | 150 | struct bpf_array { |
142 | struct bpf_map map; | 151 | struct bpf_map map; |
143 | u32 elem_size; | 152 | u32 elem_size; |
153 | u32 index_mask; | ||
144 | /* 'ownership' of prog_array is claimed by the first program that | 154 | /* 'ownership' of prog_array is claimed by the first program that |
145 | * is going to use this map or by the first program which FD is stored | 155 | * is going to use this map or by the first program which FD is stored |
146 | * in the map to make sure that all callers and callees have the same | 156 | * in the map to make sure that all callers and callees have the same |
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2189935075b4..a951fd10aaaa 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h | |||
@@ -71,6 +71,7 @@ struct cpu_cacheinfo { | |||
71 | struct cacheinfo *info_list; | 71 | struct cacheinfo *info_list; |
72 | unsigned int num_levels; | 72 | unsigned int num_levels; |
73 | unsigned int num_leaves; | 73 | unsigned int num_leaves; |
74 | bool cpu_map_populated; | ||
74 | }; | 75 | }; |
75 | 76 | ||
76 | /* | 77 | /* |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 1c95ef5a6ed9..99f7f5a13d92 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -40,6 +40,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr); | |||
40 | extern int cpu_add_dev_attr_group(struct attribute_group *attrs); | 40 | extern int cpu_add_dev_attr_group(struct attribute_group *attrs); |
41 | extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); | 41 | extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); |
42 | 42 | ||
43 | extern ssize_t cpu_show_meltdown(struct device *dev, | ||
44 | struct device_attribute *attr, char *buf); | ||
45 | extern ssize_t cpu_show_spectre_v1(struct device *dev, | ||
46 | struct device_attribute *attr, char *buf); | ||
47 | extern ssize_t cpu_show_spectre_v2(struct device *dev, | ||
48 | struct device_attribute *attr, char *buf); | ||
49 | |||
43 | extern __printf(4, 5) | 50 | extern __printf(4, 5) |
44 | struct device *cpu_device_create(struct device *parent, void *drvdata, | 51 | struct device *cpu_device_create(struct device *parent, void *drvdata, |
45 | const struct attribute_group **groups, | 52 | const struct attribute_group **groups, |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 257db64562e5..9e120c92551b 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -87,6 +87,7 @@ extern int set_current_groups(struct group_info *); | |||
87 | extern void set_groups(struct cred *, struct group_info *); | 87 | extern void set_groups(struct cred *, struct group_info *); |
88 | extern int groups_search(const struct group_info *, kgid_t); | 88 | extern int groups_search(const struct group_info *, kgid_t); |
89 | extern bool may_setgroups(void); | 89 | extern bool may_setgroups(void); |
90 | extern void groups_sort(struct group_info *); | ||
90 | 91 | ||
91 | /* access the groups "array" with this macro */ | 92 | /* access the groups "array" with this macro */ |
92 | #define GROUP_AT(gi, i) \ | 93 | #define GROUP_AT(gi, i) \ |
diff --git a/include/linux/filter.h b/include/linux/filter.h index ccb98b459c59..677fa3b42194 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -466,6 +466,9 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | |||
466 | void bpf_int_jit_compile(struct bpf_prog *fp); | 466 | void bpf_int_jit_compile(struct bpf_prog *fp); |
467 | bool bpf_helper_changes_skb_data(void *func); | 467 | bool bpf_helper_changes_skb_data(void *func); |
468 | 468 | ||
469 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | ||
470 | const struct bpf_insn *patch, u32 len); | ||
471 | |||
469 | #ifdef CONFIG_BPF_JIT | 472 | #ifdef CONFIG_BPF_JIT |
470 | typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); | 473 | typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); |
471 | 474 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index bff4ce57b77e..c05b126b57a2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -3088,5 +3088,6 @@ static inline bool dir_relax(struct inode *inode) | |||
3088 | } | 3088 | } |
3089 | 3089 | ||
3090 | extern bool path_noexec(const struct path *path); | 3090 | extern bool path_noexec(const struct path *path); |
3091 | extern void inode_nohighmem(struct inode *inode); | ||
3091 | 3092 | ||
3092 | #endif /* _LINUX_FS_H */ | 3093 | #endif /* _LINUX_FS_H */ |
diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 115bb81912cc..94a8aae8f9e2 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h | |||
@@ -764,7 +764,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie, | |||
764 | { | 764 | { |
765 | if (fscache_cookie_valid(cookie) && PageFsCache(page)) | 765 | if (fscache_cookie_valid(cookie) && PageFsCache(page)) |
766 | return __fscache_maybe_release_page(cookie, page, gfp); | 766 | return __fscache_maybe_release_page(cookie, page, gfp); |
767 | return false; | 767 | return true; |
768 | } | 768 | } |
769 | 769 | ||
770 | /** | 770 | /** |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index a0fc3cf932af..e4e22ed3fc0c 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -218,7 +218,8 @@ struct ipv6_pinfo { | |||
218 | * 100: prefer care-of address | 218 | * 100: prefer care-of address |
219 | */ | 219 | */ |
220 | dontfrag:1, | 220 | dontfrag:1, |
221 | autoflowlabel:1; | 221 | autoflowlabel:1, |
222 | autoflowlabel_set:1; | ||
222 | __u8 min_hopcount; | 223 | __u8 min_hopcount; |
223 | __u8 tclass; | 224 | __u8 tclass; |
224 | __be32 rcv_flowinfo; | 225 | __be32 rcv_flowinfo; |
diff --git a/include/linux/kaiser.h b/include/linux/kaiser.h new file mode 100644 index 000000000000..b56c19010480 --- /dev/null +++ b/include/linux/kaiser.h | |||
@@ -0,0 +1,52 @@ | |||
1 | #ifndef _LINUX_KAISER_H | ||
2 | #define _LINUX_KAISER_H | ||
3 | |||
4 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
5 | #include <asm/kaiser.h> | ||
6 | |||
7 | static inline int kaiser_map_thread_stack(void *stack) | ||
8 | { | ||
9 | /* | ||
10 | * Map that page of kernel stack on which we enter from user context. | ||
11 | */ | ||
12 | return kaiser_add_mapping((unsigned long)stack + | ||
13 | THREAD_SIZE - PAGE_SIZE, PAGE_SIZE, __PAGE_KERNEL); | ||
14 | } | ||
15 | |||
16 | static inline void kaiser_unmap_thread_stack(void *stack) | ||
17 | { | ||
18 | /* | ||
19 | * Note: may be called even when kaiser_map_thread_stack() failed. | ||
20 | */ | ||
21 | kaiser_remove_mapping((unsigned long)stack + | ||
22 | THREAD_SIZE - PAGE_SIZE, PAGE_SIZE); | ||
23 | } | ||
24 | #else | ||
25 | |||
26 | /* | ||
27 | * These stubs are used whenever CONFIG_PAGE_TABLE_ISOLATION is off, which | ||
28 | * includes architectures that support KAISER, but have it disabled. | ||
29 | */ | ||
30 | |||
31 | static inline void kaiser_init(void) | ||
32 | { | ||
33 | } | ||
34 | static inline int kaiser_add_mapping(unsigned long addr, | ||
35 | unsigned long size, u64 flags) | ||
36 | { | ||
37 | return 0; | ||
38 | } | ||
39 | static inline void kaiser_remove_mapping(unsigned long start, | ||
40 | unsigned long size) | ||
41 | { | ||
42 | } | ||
43 | static inline int kaiser_map_thread_stack(void *stack) | ||
44 | { | ||
45 | return 0; | ||
46 | } | ||
47 | static inline void kaiser_unmap_thread_stack(void *stack) | ||
48 | { | ||
49 | } | ||
50 | |||
51 | #endif /* !CONFIG_PAGE_TABLE_ISOLATION */ | ||
52 | #endif /* _LINUX_KAISER_H */ | ||
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index b33c7797eb57..a94b5bf57f51 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h | |||
@@ -17,10 +17,11 @@ | |||
17 | * the last step cherry picks the 2nd arg, we get a zero. | 17 | * the last step cherry picks the 2nd arg, we get a zero. |
18 | */ | 18 | */ |
19 | #define __ARG_PLACEHOLDER_1 0, | 19 | #define __ARG_PLACEHOLDER_1 0, |
20 | #define config_enabled(cfg) _config_enabled(cfg) | 20 | #define config_enabled(cfg) ___is_defined(cfg) |
21 | #define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) | 21 | #define __is_defined(x) ___is_defined(x) |
22 | #define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) | 22 | #define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) |
23 | #define ___config_enabled(__ignored, val, ...) val | 23 | #define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) |
24 | #define __take_second_arg(__ignored, val, ...) val | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 | 27 | * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 |
@@ -42,7 +43,7 @@ | |||
42 | * built-in code when CONFIG_FOO is set to 'm'. | 43 | * built-in code when CONFIG_FOO is set to 'm'. |
43 | */ | 44 | */ |
44 | #define IS_REACHABLE(option) (config_enabled(option) || \ | 45 | #define IS_REACHABLE(option) (config_enabled(option) || \ |
45 | (config_enabled(option##_MODULE) && config_enabled(MODULE))) | 46 | (config_enabled(option##_MODULE) && __is_defined(MODULE))) |
46 | 47 | ||
47 | /* | 48 | /* |
48 | * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', | 49 | * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', |
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d86f17cf3530..b7bb5c49e85a 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -241,8 +241,6 @@ extern void crash_kexec(struct pt_regs *); | |||
241 | int kexec_should_crash(struct task_struct *); | 241 | int kexec_should_crash(struct task_struct *); |
242 | void crash_save_cpu(struct pt_regs *regs, int cpu); | 242 | void crash_save_cpu(struct pt_regs *regs, int cpu); |
243 | void crash_save_vmcoreinfo(void); | 243 | void crash_save_vmcoreinfo(void); |
244 | void crash_map_reserved_pages(void); | ||
245 | void crash_unmap_reserved_pages(void); | ||
246 | void arch_crash_save_vmcoreinfo(void); | 244 | void arch_crash_save_vmcoreinfo(void); |
247 | __printf(1, 2) | 245 | __printf(1, 2) |
248 | void vmcoreinfo_append_str(const char *fmt, ...); | 246 | void vmcoreinfo_append_str(const char *fmt, ...); |
@@ -328,6 +326,8 @@ int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, | |||
328 | Elf_Shdr *sechdrs, unsigned int relsec); | 326 | Elf_Shdr *sechdrs, unsigned int relsec); |
329 | int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, | 327 | int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, |
330 | unsigned int relsec); | 328 | unsigned int relsec); |
329 | void arch_kexec_protect_crashkres(void); | ||
330 | void arch_kexec_unprotect_crashkres(void); | ||
331 | 331 | ||
332 | #else /* !CONFIG_KEXEC_CORE */ | 332 | #else /* !CONFIG_KEXEC_CORE */ |
333 | struct pt_regs; | 333 | struct pt_regs; |
diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 2b6a204bd8d4..3ffc69ebe967 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h | |||
@@ -64,6 +64,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) | |||
64 | ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) | 64 | ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Same as ktime_add(), but avoids undefined behaviour on overflow; however, | ||
68 | * this means that you must check the result for overflow yourself. | ||
69 | */ | ||
70 | #define ktime_add_unsafe(lhs, rhs) \ | ||
71 | ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; }) | ||
72 | |||
73 | /* | ||
67 | * Add a ktime_t variable and a scalar nanosecond value. | 74 | * Add a ktime_t variable and a scalar nanosecond value. |
68 | * res = kt + nsval: | 75 | * res = kt + nsval: |
69 | */ | 76 | */ |
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 637522da6082..0a941fca6b9b 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h | |||
@@ -3706,6 +3706,9 @@ enum usb_irq_events { | |||
3706 | #define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01 | 3706 | #define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01 |
3707 | #define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00 | 3707 | #define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00 |
3708 | 3708 | ||
3709 | /* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */ | ||
3710 | #define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC | ||
3711 | |||
3709 | /* Registers for function RESOURCE */ | 3712 | /* Registers for function RESOURCE */ |
3710 | #define TPS65917_REGEN1_CTRL 0x2 | 3713 | #define TPS65917_REGEN1_CTRL 0x2 |
3711 | #define TPS65917_PLLEN_CTRL 0x3 | 3714 | #define TPS65917_PLLEN_CTRL 0x3 |
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index 70fffeba7495..a4441784503b 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h | |||
@@ -1,9 +1,16 @@ | |||
1 | #ifndef _LINUX_MMU_CONTEXT_H | 1 | #ifndef _LINUX_MMU_CONTEXT_H |
2 | #define _LINUX_MMU_CONTEXT_H | 2 | #define _LINUX_MMU_CONTEXT_H |
3 | 3 | ||
4 | #include <asm/mmu_context.h> | ||
5 | |||
4 | struct mm_struct; | 6 | struct mm_struct; |
5 | 7 | ||
6 | void use_mm(struct mm_struct *mm); | 8 | void use_mm(struct mm_struct *mm); |
7 | void unuse_mm(struct mm_struct *mm); | 9 | void unuse_mm(struct mm_struct *mm); |
8 | 10 | ||
11 | /* Architectures that care about IRQ state in switch_mm can override this. */ | ||
12 | #ifndef switch_mm_irqs_off | ||
13 | # define switch_mm_irqs_off switch_mm | ||
14 | #endif | ||
15 | |||
9 | #endif | 16 | #endif |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5fab999b320b..5d8b0349fb26 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -133,8 +133,9 @@ enum zone_stat_item { | |||
133 | NR_SLAB_RECLAIMABLE, | 133 | NR_SLAB_RECLAIMABLE, |
134 | NR_SLAB_UNRECLAIMABLE, | 134 | NR_SLAB_UNRECLAIMABLE, |
135 | NR_PAGETABLE, /* used for pagetables */ | 135 | NR_PAGETABLE, /* used for pagetables */ |
136 | NR_KERNEL_STACK, | ||
137 | /* Second 128 byte cacheline */ | 136 | /* Second 128 byte cacheline */ |
137 | NR_KERNEL_STACK, | ||
138 | NR_KAISERTABLE, | ||
138 | NR_UNSTABLE_NFS, /* NFS unstable pages */ | 139 | NR_UNSTABLE_NFS, /* NFS unstable pages */ |
139 | NR_BOUNCE, | 140 | NR_BOUNCE, |
140 | NR_VMSCAN_WRITE, | 141 | NR_VMSCAN_WRITE, |
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 806d0ab845e0..676d3d2a1a0a 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h | |||
@@ -265,75 +265,67 @@ void map_destroy(struct mtd_info *mtd); | |||
265 | #define INVALIDATE_CACHED_RANGE(map, from, size) \ | 265 | #define INVALIDATE_CACHED_RANGE(map, from, size) \ |
266 | do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) | 266 | do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) |
267 | 267 | ||
268 | 268 | #define map_word_equal(map, val1, val2) \ | |
269 | static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2) | 269 | ({ \ |
270 | { | 270 | int i, ret = 1; \ |
271 | int i; | 271 | for (i = 0; i < map_words(map); i++) \ |
272 | 272 | if ((val1).x[i] != (val2).x[i]) { \ | |
273 | for (i = 0; i < map_words(map); i++) { | 273 | ret = 0; \ |
274 | if (val1.x[i] != val2.x[i]) | 274 | break; \ |
275 | return 0; | 275 | } \ |
276 | } | 276 | ret; \ |
277 | 277 | }) | |
278 | return 1; | 278 | |
279 | } | 279 | #define map_word_and(map, val1, val2) \ |
280 | 280 | ({ \ | |
281 | static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2) | 281 | map_word r; \ |
282 | { | 282 | int i; \ |
283 | map_word r; | 283 | for (i = 0; i < map_words(map); i++) \ |
284 | int i; | 284 | r.x[i] = (val1).x[i] & (val2).x[i]; \ |
285 | 285 | r; \ | |
286 | for (i = 0; i < map_words(map); i++) | 286 | }) |
287 | r.x[i] = val1.x[i] & val2.x[i]; | 287 | |
288 | 288 | #define map_word_clr(map, val1, val2) \ | |
289 | return r; | 289 | ({ \ |
290 | } | 290 | map_word r; \ |
291 | 291 | int i; \ | |
292 | static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2) | 292 | for (i = 0; i < map_words(map); i++) \ |
293 | { | 293 | r.x[i] = (val1).x[i] & ~(val2).x[i]; \ |
294 | map_word r; | 294 | r; \ |
295 | int i; | 295 | }) |
296 | 296 | ||
297 | for (i = 0; i < map_words(map); i++) | 297 | #define map_word_or(map, val1, val2) \ |
298 | r.x[i] = val1.x[i] & ~val2.x[i]; | 298 | ({ \ |
299 | 299 | map_word r; \ | |
300 | return r; | 300 | int i; \ |
301 | } | 301 | for (i = 0; i < map_words(map); i++) \ |
302 | 302 | r.x[i] = (val1).x[i] | (val2).x[i]; \ | |
303 | static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2) | 303 | r; \ |
304 | { | 304 | }) |
305 | map_word r; | 305 | |
306 | int i; | 306 | #define map_word_andequal(map, val1, val2, val3) \ |
307 | 307 | ({ \ | |
308 | for (i = 0; i < map_words(map); i++) | 308 | int i, ret = 1; \ |
309 | r.x[i] = val1.x[i] | val2.x[i]; | 309 | for (i = 0; i < map_words(map); i++) { \ |
310 | 310 | if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ | |
311 | return r; | 311 | ret = 0; \ |
312 | } | 312 | break; \ |
313 | 313 | } \ | |
314 | static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3) | 314 | } \ |
315 | { | 315 | ret; \ |
316 | int i; | 316 | }) |
317 | 317 | ||
318 | for (i = 0; i < map_words(map); i++) { | 318 | #define map_word_bitsset(map, val1, val2) \ |
319 | if ((val1.x[i] & val2.x[i]) != val3.x[i]) | 319 | ({ \ |
320 | return 0; | 320 | int i, ret = 0; \ |
321 | } | 321 | for (i = 0; i < map_words(map); i++) { \ |
322 | 322 | if ((val1).x[i] & (val2).x[i]) { \ | |
323 | return 1; | 323 | ret = 1; \ |
324 | } | 324 | break; \ |
325 | 325 | } \ | |
326 | static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2) | 326 | } \ |
327 | { | 327 | ret; \ |
328 | int i; | 328 | }) |
329 | |||
330 | for (i = 0; i < map_words(map); i++) { | ||
331 | if (val1.x[i] & val2.x[i]) | ||
332 | return 1; | ||
333 | } | ||
334 | |||
335 | return 0; | ||
336 | } | ||
337 | 329 | ||
338 | static inline map_word map_word_load(struct map_info *map, const void *ptr) | 330 | static inline map_word map_word_load(struct map_info *map, const void *ptr) |
339 | { | 331 | { |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 04078e8a4803..d6c53fce006b 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -243,6 +243,10 @@ int xt_check_entry_offsets(const void *base, const char *elems, | |||
243 | unsigned int target_offset, | 243 | unsigned int target_offset, |
244 | unsigned int next_offset); | 244 | unsigned int next_offset); |
245 | 245 | ||
246 | unsigned int *xt_alloc_entry_offsets(unsigned int size); | ||
247 | bool xt_find_jump_offset(const unsigned int *offsets, | ||
248 | unsigned int target, unsigned int size); | ||
249 | |||
246 | int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, | 250 | int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, |
247 | bool inv_proto); | 251 | bool inv_proto); |
248 | int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, | 252 | int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, |
@@ -377,16 +381,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a, | |||
377 | * allows us to return 0 for single core systems without forcing | 381 | * allows us to return 0 for single core systems without forcing |
378 | * callers to deal with SMP vs. NONSMP issues. | 382 | * callers to deal with SMP vs. NONSMP issues. |
379 | */ | 383 | */ |
380 | static inline u64 xt_percpu_counter_alloc(void) | 384 | static inline unsigned long xt_percpu_counter_alloc(void) |
381 | { | 385 | { |
382 | if (nr_cpu_ids > 1) { | 386 | if (nr_cpu_ids > 1) { |
383 | void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), | 387 | void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), |
384 | sizeof(struct xt_counters)); | 388 | sizeof(struct xt_counters)); |
385 | 389 | ||
386 | if (res == NULL) | 390 | if (res == NULL) |
387 | return (u64) -ENOMEM; | 391 | return -ENOMEM; |
388 | 392 | ||
389 | return (u64) (__force unsigned long) res; | 393 | return (__force unsigned long) res; |
390 | } | 394 | } |
391 | 395 | ||
392 | return 0; | 396 | return 0; |
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 8f16299ca068..8902f23bb770 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -35,6 +35,12 @@ | |||
35 | 35 | ||
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | ||
39 | #define USER_MAPPED_SECTION "..user_mapped" | ||
40 | #else | ||
41 | #define USER_MAPPED_SECTION "" | ||
42 | #endif | ||
43 | |||
38 | /* | 44 | /* |
39 | * Base implementations of per-CPU variable declarations and definitions, where | 45 | * Base implementations of per-CPU variable declarations and definitions, where |
40 | * the section in which the variable is to be placed is provided by the | 46 | * the section in which the variable is to be placed is provided by the |
@@ -115,6 +121,12 @@ | |||
115 | #define DEFINE_PER_CPU(type, name) \ | 121 | #define DEFINE_PER_CPU(type, name) \ |
116 | DEFINE_PER_CPU_SECTION(type, name, "") | 122 | DEFINE_PER_CPU_SECTION(type, name, "") |
117 | 123 | ||
124 | #define DECLARE_PER_CPU_USER_MAPPED(type, name) \ | ||
125 | DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION) | ||
126 | |||
127 | #define DEFINE_PER_CPU_USER_MAPPED(type, name) \ | ||
128 | DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION) | ||
129 | |||
118 | /* | 130 | /* |
119 | * Declaration/definition used for per-CPU variables that must come first in | 131 | * Declaration/definition used for per-CPU variables that must come first in |
120 | * the set of variables. | 132 | * the set of variables. |
@@ -144,6 +156,14 @@ | |||
144 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | 156 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
145 | ____cacheline_aligned_in_smp | 157 | ____cacheline_aligned_in_smp |
146 | 158 | ||
159 | #define DECLARE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \ | ||
160 | DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \ | ||
161 | ____cacheline_aligned_in_smp | ||
162 | |||
163 | #define DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \ | ||
164 | DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \ | ||
165 | ____cacheline_aligned_in_smp | ||
166 | |||
147 | #define DECLARE_PER_CPU_ALIGNED(type, name) \ | 167 | #define DECLARE_PER_CPU_ALIGNED(type, name) \ |
148 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ | 168 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ |
149 | ____cacheline_aligned | 169 | ____cacheline_aligned |
@@ -162,11 +182,21 @@ | |||
162 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | 182 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
163 | DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ | 183 | DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
164 | __aligned(PAGE_SIZE) | 184 | __aligned(PAGE_SIZE) |
185 | /* | ||
186 | * Declaration/definition used for per-CPU variables that must be page aligned and need to be mapped in user mode. | ||
187 | */ | ||
188 | #define DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \ | ||
189 | DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \ | ||
190 | __aligned(PAGE_SIZE) | ||
191 | |||
192 | #define DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \ | ||
193 | DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \ | ||
194 | __aligned(PAGE_SIZE) | ||
165 | 195 | ||
166 | /* | 196 | /* |
167 | * Declaration/definition used for per-CPU variables that must be read mostly. | 197 | * Declaration/definition used for per-CPU variables that must be read mostly. |
168 | */ | 198 | */ |
169 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ | 199 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ |
170 | DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") | 200 | DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") |
171 | 201 | ||
172 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ | 202 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 5bc4b9d563a9..dbfd5ce9350f 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -683,6 +683,17 @@ static inline bool phy_is_internal(struct phy_device *phydev) | |||
683 | } | 683 | } |
684 | 684 | ||
685 | /** | 685 | /** |
686 | * phy_interface_mode_is_rgmii - Convenience function for testing if a | ||
687 | * PHY interface mode is RGMII (all variants) | ||
688 | * @mode: the phy_interface_t enum | ||
689 | */ | ||
690 | static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) | ||
691 | { | ||
692 | return mode >= PHY_INTERFACE_MODE_RGMII && | ||
693 | mode <= PHY_INTERFACE_MODE_RGMII_TXID; | ||
694 | }; | ||
695 | |||
696 | /** | ||
686 | * phy_interface_is_rgmii - Convenience function for testing if a PHY interface | 697 | * phy_interface_is_rgmii - Convenience function for testing if a PHY interface |
687 | * is RGMII (all variants) | 698 | * is RGMII (all variants) |
688 | * @phydev: the phy_device struct | 699 | * @phydev: the phy_device struct |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1cb21ffe1fb0..2414b7c0990c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1453,6 +1453,7 @@ struct sched_dl_entity { | |||
1453 | u64 dl_deadline; /* relative deadline of each instance */ | 1453 | u64 dl_deadline; /* relative deadline of each instance */ |
1454 | u64 dl_period; /* separation of two instances (period) */ | 1454 | u64 dl_period; /* separation of two instances (period) */ |
1455 | u64 dl_bw; /* dl_runtime / dl_deadline */ | 1455 | u64 dl_bw; /* dl_runtime / dl_deadline */ |
1456 | u64 dl_density; /* dl_runtime / dl_deadline */ | ||
1456 | 1457 | ||
1457 | /* | 1458 | /* |
1458 | * Actual scheduling parameters. Initialized with the values above, | 1459 | * Actual scheduling parameters. Initialized with the values above, |
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index 8c9131db2b25..b050ef51e27e 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h | |||
@@ -16,7 +16,6 @@ struct sh_eth_plat_data { | |||
16 | unsigned char mac_addr[ETH_ALEN]; | 16 | unsigned char mac_addr[ETH_ALEN]; |
17 | unsigned no_ether_link:1; | 17 | unsigned no_ether_link:1; |
18 | unsigned ether_link_active_low:1; | 18 | unsigned ether_link_active_low:1; |
19 | unsigned needs_init:1; | ||
20 | }; | 19 | }; |
21 | 20 | ||
22 | #endif | 21 | #endif |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 318c24612458..2260f92f1492 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) | |||
29 | return (struct tcphdr *)skb_transport_header(skb); | 29 | return (struct tcphdr *)skb_transport_header(skb); |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline unsigned int __tcp_hdrlen(const struct tcphdr *th) | ||
33 | { | ||
34 | return th->doff * 4; | ||
35 | } | ||
36 | |||
32 | static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) | 37 | static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) |
33 | { | 38 | { |
34 | return tcp_hdr(skb)->doff * 4; | 39 | return __tcp_hdrlen(tcp_hdr(skb)); |
35 | } | 40 | } |
36 | 41 | ||
37 | static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) | 42 | static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) |
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 8bb60e38b8a3..8d3934a42654 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h | |||
@@ -164,6 +164,7 @@ int usb_otg_unregister(struct device *dev); | |||
164 | int usb_otg_register_hcd(struct usb_hcd *hcd, unsigned int irqnum, | 164 | int usb_otg_register_hcd(struct usb_hcd *hcd, unsigned int irqnum, |
165 | unsigned long irqflags, struct otg_hcd_ops *ops); | 165 | unsigned long irqflags, struct otg_hcd_ops *ops); |
166 | int usb_otg_unregister_hcd(struct usb_hcd *hcd); | 166 | int usb_otg_unregister_hcd(struct usb_hcd *hcd); |
167 | int usb_otg_shutdown_hcd(struct usb_hcd *hcd); | ||
167 | int usb_otg_register_gadget(struct usb_gadget *gadget, | 168 | int usb_otg_register_gadget(struct usb_gadget *gadget, |
168 | struct otg_gadget_ops *ops); | 169 | struct otg_gadget_ops *ops); |
169 | int usb_otg_unregister_gadget(struct usb_gadget *gadget); | 170 | int usb_otg_unregister_gadget(struct usb_gadget *gadget); |
@@ -198,6 +199,11 @@ static inline int usb_otg_unregister_hcd(struct usb_hcd *hcd) | |||
198 | return -ENOTSUPP; | 199 | return -ENOTSUPP; |
199 | } | 200 | } |
200 | 201 | ||
202 | static inline int usb_otg_shutdown_hcd(struct usb_hcd *hcd) | ||
203 | { | ||
204 | return -ENOTSUPP; | ||
205 | } | ||
206 | |||
201 | static inline int usb_otg_register_gadget(struct usb_gadget *gadget, | 207 | static inline int usb_otg_register_gadget(struct usb_gadget *gadget, |
202 | struct otg_gadget_ops *ops) | 208 | struct otg_gadget_ops *ops) |
203 | { | 209 | { |
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index e623d392db0c..8ef3a61fdc74 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h | |||
@@ -80,10 +80,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
80 | #endif | 80 | #endif |
81 | #endif | 81 | #endif |
82 | #ifdef CONFIG_DEBUG_TLBFLUSH | 82 | #ifdef CONFIG_DEBUG_TLBFLUSH |
83 | #ifdef CONFIG_SMP | ||
84 | NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ | 83 | NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ |
85 | NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */ | 84 | NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */ |
86 | #endif /* CONFIG_SMP */ | ||
87 | NR_TLB_LOCAL_FLUSH_ALL, | 85 | NR_TLB_LOCAL_FLUSH_ALL, |
88 | NR_TLB_LOCAL_FLUSH_ONE, | 86 | NR_TLB_LOCAL_FLUSH_ONE, |
89 | #endif /* CONFIG_DEBUG_TLBFLUSH */ | 87 | #endif /* CONFIG_DEBUG_TLBFLUSH */ |
diff --git a/include/net/arp.h b/include/net/arp.h index 5e0f891d476c..1b3f86981757 100644 --- a/include/net/arp.h +++ b/include/net/arp.h | |||
@@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 | |||
19 | 19 | ||
20 | static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) | 20 | static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) |
21 | { | 21 | { |
22 | if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) | ||
23 | key = INADDR_ANY; | ||
24 | |||
22 | return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); | 25 | return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); |
23 | } | 26 | } |
24 | 27 | ||
diff --git a/include/net/ip.h b/include/net/ip.h index c5d8ee796b38..119c6cae8380 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <net/flow.h> | 33 | #include <net/flow.h> |
34 | #include <net/flow_dissector.h> | 34 | #include <net/flow_dissector.h> |
35 | 35 | ||
36 | #define IPV4_MIN_MTU 68 /* RFC 791 */ | ||
37 | |||
36 | struct sock; | 38 | struct sock; |
37 | 39 | ||
38 | struct inet_skb_parm { | 40 | struct inet_skb_parm { |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 7a8066b90289..84f0d0602433 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -281,6 +281,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, | |||
281 | int flags); | 281 | int flags); |
282 | int ip6_flowlabel_init(void); | 282 | int ip6_flowlabel_init(void); |
283 | void ip6_flowlabel_cleanup(void); | 283 | void ip6_flowlabel_cleanup(void); |
284 | bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np); | ||
284 | 285 | ||
285 | static inline void fl6_sock_release(struct ip6_flowlabel *fl) | 286 | static inline void fl6_sock_release(struct ip6_flowlabel *fl) |
286 | { | 287 | { |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 2dcea635ecce..93328c61934a 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -209,6 +209,11 @@ int net_eq(const struct net *net1, const struct net *net2) | |||
209 | return net1 == net2; | 209 | return net1 == net2; |
210 | } | 210 | } |
211 | 211 | ||
212 | static inline int check_net(const struct net *net) | ||
213 | { | ||
214 | return atomic_read(&net->count) != 0; | ||
215 | } | ||
216 | |||
212 | void net_drop_ns(void *); | 217 | void net_drop_ns(void *); |
213 | 218 | ||
214 | #else | 219 | #else |
@@ -233,6 +238,11 @@ int net_eq(const struct net *net1, const struct net *net2) | |||
233 | return 1; | 238 | return 1; |
234 | } | 239 | } |
235 | 240 | ||
241 | static inline int check_net(const struct net *net) | ||
242 | { | ||
243 | return 1; | ||
244 | } | ||
245 | |||
236 | #define net_drop_ns NULL | 246 | #define net_drop_ns NULL |
237 | #endif | 247 | #endif |
238 | 248 | ||
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 9c5638ad872e..0dbce55437f2 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h | |||
@@ -28,8 +28,8 @@ struct nf_queue_handler { | |||
28 | struct nf_hook_ops *ops); | 28 | struct nf_hook_ops *ops); |
29 | }; | 29 | }; |
30 | 30 | ||
31 | void nf_register_queue_handler(const struct nf_queue_handler *qh); | 31 | void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); |
32 | void nf_unregister_queue_handler(void); | 32 | void nf_unregister_queue_handler(struct net *net); |
33 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); | 33 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); |
34 | 34 | ||
35 | void nf_queue_entry_get_refs(struct nf_queue_entry *entry); | 35 | void nf_queue_entry_get_refs(struct nf_queue_entry *entry); |
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index 38aa4983e2a9..36d723579af2 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h | |||
@@ -5,11 +5,13 @@ | |||
5 | 5 | ||
6 | struct proc_dir_entry; | 6 | struct proc_dir_entry; |
7 | struct nf_logger; | 7 | struct nf_logger; |
8 | struct nf_queue_handler; | ||
8 | 9 | ||
9 | struct netns_nf { | 10 | struct netns_nf { |
10 | #if defined CONFIG_PROC_FS | 11 | #if defined CONFIG_PROC_FS |
11 | struct proc_dir_entry *proc_netfilter; | 12 | struct proc_dir_entry *proc_netfilter; |
12 | #endif | 13 | #endif |
14 | const struct nf_queue_handler __rcu *queue_handler; | ||
13 | const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; | 15 | const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; |
14 | #ifdef CONFIG_SYSCTL | 16 | #ifdef CONFIG_SYSCTL |
15 | struct ctl_table_header *nf_log_dir_header; | 17 | struct ctl_table_header *nf_log_dir_header; |
diff --git a/include/scsi/sg.h b/include/scsi/sg.h index 3afec7032448..20bc71c3e0b8 100644 --- a/include/scsi/sg.h +++ b/include/scsi/sg.h | |||
@@ -197,7 +197,6 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ | |||
197 | #define SG_DEFAULT_RETRIES 0 | 197 | #define SG_DEFAULT_RETRIES 0 |
198 | 198 | ||
199 | /* Defaults, commented if they differ from original sg driver */ | 199 | /* Defaults, commented if they differ from original sg driver */ |
200 | #define SG_DEF_FORCE_LOW_DMA 0 /* was 1 -> memory below 16MB on i386 */ | ||
201 | #define SG_DEF_FORCE_PACK_ID 0 | 200 | #define SG_DEF_FORCE_PACK_ID 0 |
202 | #define SG_DEF_KEEP_ORPHAN 0 | 201 | #define SG_DEF_KEEP_ORPHAN 0 |
203 | #define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */ | 202 | #define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */ |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 9982a2bcb880..0eed9fd79ea5 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -496,6 +496,7 @@ struct se_cmd { | |||
496 | #define CMD_T_BUSY (1 << 9) | 496 | #define CMD_T_BUSY (1 << 9) |
497 | #define CMD_T_TAS (1 << 10) | 497 | #define CMD_T_TAS (1 << 10) |
498 | #define CMD_T_FABRIC_STOP (1 << 11) | 498 | #define CMD_T_FABRIC_STOP (1 << 11) |
499 | #define CMD_T_PRE_EXECUTE (1 << 12) | ||
499 | spinlock_t t_state_lock; | 500 | spinlock_t t_state_lock; |
500 | struct kref cmd_kref; | 501 | struct kref cmd_kref; |
501 | struct completion t_transport_stop_comp; | 502 | struct completion t_transport_stop_comp; |
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index d6f83222a6a1..67ff6555967f 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h | |||
@@ -204,7 +204,7 @@ TRACE_EVENT(kvm_ack_irq, | |||
204 | { KVM_TRACE_MMIO_WRITE, "write" } | 204 | { KVM_TRACE_MMIO_WRITE, "write" } |
205 | 205 | ||
206 | TRACE_EVENT(kvm_mmio, | 206 | TRACE_EVENT(kvm_mmio, |
207 | TP_PROTO(int type, int len, u64 gpa, u64 val), | 207 | TP_PROTO(int type, int len, u64 gpa, void *val), |
208 | TP_ARGS(type, len, gpa, val), | 208 | TP_ARGS(type, len, gpa, val), |
209 | 209 | ||
210 | TP_STRUCT__entry( | 210 | TP_STRUCT__entry( |
@@ -218,7 +218,10 @@ TRACE_EVENT(kvm_mmio, | |||
218 | __entry->type = type; | 218 | __entry->type = type; |
219 | __entry->len = len; | 219 | __entry->len = len; |
220 | __entry->gpa = gpa; | 220 | __entry->gpa = gpa; |
221 | __entry->val = val; | 221 | __entry->val = 0; |
222 | if (val) | ||
223 | memcpy(&__entry->val, val, | ||
224 | min_t(u32, sizeof(__entry->val), len)); | ||
222 | ), | 225 | ), |
223 | 226 | ||
224 | TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx", | 227 | TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx", |
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h index bc81fb2e1f0e..6f04cb419115 100644 --- a/include/uapi/linux/eventpoll.h +++ b/include/uapi/linux/eventpoll.h | |||
@@ -26,6 +26,19 @@ | |||
26 | #define EPOLL_CTL_DEL 2 | 26 | #define EPOLL_CTL_DEL 2 |
27 | #define EPOLL_CTL_MOD 3 | 27 | #define EPOLL_CTL_MOD 3 |
28 | 28 | ||
29 | /* Epoll event masks */ | ||
30 | #define EPOLLIN 0x00000001 | ||
31 | #define EPOLLPRI 0x00000002 | ||
32 | #define EPOLLOUT 0x00000004 | ||
33 | #define EPOLLERR 0x00000008 | ||
34 | #define EPOLLHUP 0x00000010 | ||
35 | #define EPOLLRDNORM 0x00000040 | ||
36 | #define EPOLLRDBAND 0x00000080 | ||
37 | #define EPOLLWRNORM 0x00000100 | ||
38 | #define EPOLLWRBAND 0x00000200 | ||
39 | #define EPOLLMSG 0x00000400 | ||
40 | #define EPOLLRDHUP 0x00002000 | ||
41 | |||
29 | /* | 42 | /* |
30 | * Request the handling of system wakeup events so as to prevent system suspends | 43 | * Request the handling of system wakeup events so as to prevent system suspends |
31 | * from happening while those events are being processed. | 44 | * from happening while those events are being processed. |
diff --git a/init/Kconfig b/init/Kconfig index f5500e552254..a35e73a4e55d 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1630,6 +1630,13 @@ config BPF_SYSCALL | |||
1630 | Enable the bpf() system call that allows to manipulate eBPF | 1630 | Enable the bpf() system call that allows to manipulate eBPF |
1631 | programs and maps via file descriptors. | 1631 | programs and maps via file descriptors. |
1632 | 1632 | ||
1633 | config BPF_JIT_ALWAYS_ON | ||
1634 | bool "Permanently enable BPF JIT and remove BPF interpreter" | ||
1635 | depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT | ||
1636 | help | ||
1637 | Enables BPF JIT and removes BPF interpreter to avoid | ||
1638 | speculative execution of BPF instructions by the interpreter | ||
1639 | |||
1633 | config SHMEM | 1640 | config SHMEM |
1634 | bool "Use full shmem filesystem" if EXPERT | 1641 | bool "Use full shmem filesystem" if EXPERT |
1635 | default y | 1642 | default y |
diff --git a/init/main.c b/init/main.c index 86f5ce9ede86..db6b6cbb846b 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -81,6 +81,7 @@ | |||
81 | #include <linux/integrity.h> | 81 | #include <linux/integrity.h> |
82 | #include <linux/proc_ns.h> | 82 | #include <linux/proc_ns.h> |
83 | #include <linux/io.h> | 83 | #include <linux/io.h> |
84 | #include <linux/kaiser.h> | ||
84 | 85 | ||
85 | #include <asm/io.h> | 86 | #include <asm/io.h> |
86 | #include <asm/bugs.h> | 87 | #include <asm/bugs.h> |
@@ -489,6 +490,7 @@ static void __init mm_init(void) | |||
489 | pgtable_init(); | 490 | pgtable_init(); |
490 | vmalloc_init(); | 491 | vmalloc_init(); |
491 | ioremap_huge_init(); | 492 | ioremap_huge_init(); |
493 | kaiser_init(); | ||
492 | } | 494 | } |
493 | 495 | ||
494 | asmlinkage __visible void __init start_kernel(void) | 496 | asmlinkage __visible void __init start_kernel(void) |
@@ -742,7 +742,10 @@ static inline int convert_mode(long *msgtyp, int msgflg) | |||
742 | if (*msgtyp == 0) | 742 | if (*msgtyp == 0) |
743 | return SEARCH_ANY; | 743 | return SEARCH_ANY; |
744 | if (*msgtyp < 0) { | 744 | if (*msgtyp < 0) { |
745 | *msgtyp = -*msgtyp; | 745 | if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */ |
746 | *msgtyp = LONG_MAX; | ||
747 | else | ||
748 | *msgtyp = -*msgtyp; | ||
746 | return SEARCH_LESSEQUAL; | 749 | return SEARCH_LESSEQUAL; |
747 | } | 750 | } |
748 | if (msgflg & MSG_EXCEPT) | 751 | if (msgflg & MSG_EXCEPT) |
diff --git a/kernel/acct.c b/kernel/acct.c index 74963d192c5d..37f1dc696fbd 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -99,7 +99,7 @@ static int check_free_space(struct bsd_acct_struct *acct) | |||
99 | { | 99 | { |
100 | struct kstatfs sbuf; | 100 | struct kstatfs sbuf; |
101 | 101 | ||
102 | if (time_is_before_jiffies(acct->needcheck)) | 102 | if (time_is_after_jiffies(acct->needcheck)) |
103 | goto out; | 103 | goto out; |
104 | 104 | ||
105 | /* May block */ | 105 | /* May block */ |
diff --git a/kernel/async.c b/kernel/async.c index 4c3773c0bf63..f1fd155abff6 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
@@ -84,20 +84,24 @@ static atomic_t entry_count; | |||
84 | 84 | ||
85 | static async_cookie_t lowest_in_progress(struct async_domain *domain) | 85 | static async_cookie_t lowest_in_progress(struct async_domain *domain) |
86 | { | 86 | { |
87 | struct list_head *pending; | 87 | struct async_entry *first = NULL; |
88 | async_cookie_t ret = ASYNC_COOKIE_MAX; | 88 | async_cookie_t ret = ASYNC_COOKIE_MAX; |
89 | unsigned long flags; | 89 | unsigned long flags; |
90 | 90 | ||
91 | spin_lock_irqsave(&async_lock, flags); | 91 | spin_lock_irqsave(&async_lock, flags); |
92 | 92 | ||
93 | if (domain) | 93 | if (domain) { |
94 | pending = &domain->pending; | 94 | if (!list_empty(&domain->pending)) |
95 | else | 95 | first = list_first_entry(&domain->pending, |
96 | pending = &async_global_pending; | 96 | struct async_entry, domain_list); |
97 | } else { | ||
98 | if (!list_empty(&async_global_pending)) | ||
99 | first = list_first_entry(&async_global_pending, | ||
100 | struct async_entry, global_list); | ||
101 | } | ||
97 | 102 | ||
98 | if (!list_empty(pending)) | 103 | if (first) |
99 | ret = list_first_entry(pending, struct async_entry, | 104 | ret = first->cookie; |
100 | domain_list)->cookie; | ||
101 | 105 | ||
102 | spin_unlock_irqrestore(&async_lock, flags); | 106 | spin_unlock_irqrestore(&async_lock, flags); |
103 | return ret; | 107 | return ret; |
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index b0799bced518..3608fa1aec8a 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
@@ -20,8 +20,10 @@ | |||
20 | /* Called from syscall */ | 20 | /* Called from syscall */ |
21 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) | 21 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) |
22 | { | 22 | { |
23 | u32 elem_size, array_size, index_mask, max_entries; | ||
24 | bool unpriv = !capable(CAP_SYS_ADMIN); | ||
23 | struct bpf_array *array; | 25 | struct bpf_array *array; |
24 | u32 elem_size, array_size; | 26 | u64 mask64; |
25 | 27 | ||
26 | /* check sanity of attributes */ | 28 | /* check sanity of attributes */ |
27 | if (attr->max_entries == 0 || attr->key_size != 4 || | 29 | if (attr->max_entries == 0 || attr->key_size != 4 || |
@@ -36,12 +38,33 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
36 | 38 | ||
37 | elem_size = round_up(attr->value_size, 8); | 39 | elem_size = round_up(attr->value_size, 8); |
38 | 40 | ||
41 | max_entries = attr->max_entries; | ||
42 | |||
43 | /* On 32 bit archs roundup_pow_of_two() with max_entries that has | ||
44 | * upper most bit set in u32 space is undefined behavior due to | ||
45 | * resulting 1U << 32, so do it manually here in u64 space. | ||
46 | */ | ||
47 | mask64 = fls_long(max_entries - 1); | ||
48 | mask64 = 1ULL << mask64; | ||
49 | mask64 -= 1; | ||
50 | |||
51 | index_mask = mask64; | ||
52 | if (unpriv) { | ||
53 | /* round up array size to nearest power of 2, | ||
54 | * since cpu will speculate within index_mask limits | ||
55 | */ | ||
56 | max_entries = index_mask + 1; | ||
57 | /* Check for overflows. */ | ||
58 | if (max_entries < attr->max_entries) | ||
59 | return ERR_PTR(-E2BIG); | ||
60 | } | ||
61 | |||
39 | /* check round_up into zero and u32 overflow */ | 62 | /* check round_up into zero and u32 overflow */ |
40 | if (elem_size == 0 || | 63 | if (elem_size == 0 || |
41 | attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size) | 64 | max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size) |
42 | return ERR_PTR(-ENOMEM); | 65 | return ERR_PTR(-ENOMEM); |
43 | 66 | ||
44 | array_size = sizeof(*array) + attr->max_entries * elem_size; | 67 | array_size = sizeof(*array) + max_entries * elem_size; |
45 | 68 | ||
46 | /* allocate all map elements and zero-initialize them */ | 69 | /* allocate all map elements and zero-initialize them */ |
47 | array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); | 70 | array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); |
@@ -50,6 +73,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
50 | if (!array) | 73 | if (!array) |
51 | return ERR_PTR(-ENOMEM); | 74 | return ERR_PTR(-ENOMEM); |
52 | } | 75 | } |
76 | array->index_mask = index_mask; | ||
77 | array->map.unpriv_array = unpriv; | ||
53 | 78 | ||
54 | /* copy mandatory map attributes */ | 79 | /* copy mandatory map attributes */ |
55 | array->map.key_size = attr->key_size; | 80 | array->map.key_size = attr->key_size; |
@@ -70,7 +95,7 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key) | |||
70 | if (index >= array->map.max_entries) | 95 | if (index >= array->map.max_entries) |
71 | return NULL; | 96 | return NULL; |
72 | 97 | ||
73 | return array->value + array->elem_size * index; | 98 | return array->value + array->elem_size * (index & array->index_mask); |
74 | } | 99 | } |
75 | 100 | ||
76 | /* Called from syscall */ | 101 | /* Called from syscall */ |
@@ -111,7 +136,9 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
111 | /* all elements already exist */ | 136 | /* all elements already exist */ |
112 | return -EEXIST; | 137 | return -EEXIST; |
113 | 138 | ||
114 | memcpy(array->value + array->elem_size * index, value, map->value_size); | 139 | memcpy(array->value + |
140 | array->elem_size * (index & array->index_mask), | ||
141 | value, map->value_size); | ||
115 | return 0; | 142 | return 0; |
116 | } | 143 | } |
117 | 144 | ||
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 334b1bdd572c..eb52d11fdaa7 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -137,6 +137,77 @@ void __bpf_prog_free(struct bpf_prog *fp) | |||
137 | } | 137 | } |
138 | EXPORT_SYMBOL_GPL(__bpf_prog_free); | 138 | EXPORT_SYMBOL_GPL(__bpf_prog_free); |
139 | 139 | ||
140 | static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn) | ||
141 | { | ||
142 | return BPF_CLASS(insn->code) == BPF_JMP && | ||
143 | /* Call and Exit are both special jumps with no | ||
144 | * target inside the BPF instruction image. | ||
145 | */ | ||
146 | BPF_OP(insn->code) != BPF_CALL && | ||
147 | BPF_OP(insn->code) != BPF_EXIT; | ||
148 | } | ||
149 | |||
150 | static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta) | ||
151 | { | ||
152 | struct bpf_insn *insn = prog->insnsi; | ||
153 | u32 i, insn_cnt = prog->len; | ||
154 | |||
155 | for (i = 0; i < insn_cnt; i++, insn++) { | ||
156 | if (!bpf_is_jmp_and_has_target(insn)) | ||
157 | continue; | ||
158 | |||
159 | /* Adjust offset of jmps if we cross boundaries. */ | ||
160 | if (i < pos && i + insn->off + 1 > pos) | ||
161 | insn->off += delta; | ||
162 | else if (i > pos + delta && i + insn->off + 1 <= pos + delta) | ||
163 | insn->off -= delta; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | ||
168 | const struct bpf_insn *patch, u32 len) | ||
169 | { | ||
170 | u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; | ||
171 | struct bpf_prog *prog_adj; | ||
172 | |||
173 | /* Since our patchlet doesn't expand the image, we're done. */ | ||
174 | if (insn_delta == 0) { | ||
175 | memcpy(prog->insnsi + off, patch, sizeof(*patch)); | ||
176 | return prog; | ||
177 | } | ||
178 | |||
179 | insn_adj_cnt = prog->len + insn_delta; | ||
180 | |||
181 | /* Several new instructions need to be inserted. Make room | ||
182 | * for them. Likely, there's no need for a new allocation as | ||
183 | * last page could have large enough tailroom. | ||
184 | */ | ||
185 | prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), | ||
186 | GFP_USER); | ||
187 | if (!prog_adj) | ||
188 | return NULL; | ||
189 | |||
190 | prog_adj->len = insn_adj_cnt; | ||
191 | |||
192 | /* Patching happens in 3 steps: | ||
193 | * | ||
194 | * 1) Move over tail of insnsi from next instruction onwards, | ||
195 | * so we can patch the single target insn with one or more | ||
196 | * new ones (patching is always from 1 to n insns, n > 0). | ||
197 | * 2) Inject new instructions at the target location. | ||
198 | * 3) Adjust branch offsets if necessary. | ||
199 | */ | ||
200 | insn_rest = insn_adj_cnt - off - len; | ||
201 | |||
202 | memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, | ||
203 | sizeof(*patch) * insn_rest); | ||
204 | memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); | ||
205 | |||
206 | bpf_adj_branches(prog_adj, off, insn_delta); | ||
207 | |||
208 | return prog_adj; | ||
209 | } | ||
210 | |||
140 | #ifdef CONFIG_BPF_JIT | 211 | #ifdef CONFIG_BPF_JIT |
141 | struct bpf_binary_header * | 212 | struct bpf_binary_header * |
142 | bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | 213 | bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, |
@@ -185,6 +256,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | |||
185 | } | 256 | } |
186 | EXPORT_SYMBOL_GPL(__bpf_call_base); | 257 | EXPORT_SYMBOL_GPL(__bpf_call_base); |
187 | 258 | ||
259 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON | ||
188 | /** | 260 | /** |
189 | * __bpf_prog_run - run eBPF program on a given context | 261 | * __bpf_prog_run - run eBPF program on a given context |
190 | * @ctx: is the data we are operating on | 262 | * @ctx: is the data we are operating on |
@@ -372,7 +444,7 @@ select_insn: | |||
372 | DST = tmp; | 444 | DST = tmp; |
373 | CONT; | 445 | CONT; |
374 | ALU_MOD_X: | 446 | ALU_MOD_X: |
375 | if (unlikely(SRC == 0)) | 447 | if (unlikely((u32)SRC == 0)) |
376 | return 0; | 448 | return 0; |
377 | tmp = (u32) DST; | 449 | tmp = (u32) DST; |
378 | DST = do_div(tmp, (u32) SRC); | 450 | DST = do_div(tmp, (u32) SRC); |
@@ -391,7 +463,7 @@ select_insn: | |||
391 | DST = div64_u64(DST, SRC); | 463 | DST = div64_u64(DST, SRC); |
392 | CONT; | 464 | CONT; |
393 | ALU_DIV_X: | 465 | ALU_DIV_X: |
394 | if (unlikely(SRC == 0)) | 466 | if (unlikely((u32)SRC == 0)) |
395 | return 0; | 467 | return 0; |
396 | tmp = (u32) DST; | 468 | tmp = (u32) DST; |
397 | do_div(tmp, (u32) SRC); | 469 | do_div(tmp, (u32) SRC); |
@@ -446,7 +518,7 @@ select_insn: | |||
446 | struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; | 518 | struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; |
447 | struct bpf_array *array = container_of(map, struct bpf_array, map); | 519 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
448 | struct bpf_prog *prog; | 520 | struct bpf_prog *prog; |
449 | u64 index = BPF_R3; | 521 | u32 index = BPF_R3; |
450 | 522 | ||
451 | if (unlikely(index >= array->map.max_entries)) | 523 | if (unlikely(index >= array->map.max_entries)) |
452 | goto out; | 524 | goto out; |
@@ -654,6 +726,13 @@ load_byte: | |||
654 | return 0; | 726 | return 0; |
655 | } | 727 | } |
656 | 728 | ||
729 | #else | ||
730 | static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn) | ||
731 | { | ||
732 | return 0; | ||
733 | } | ||
734 | #endif | ||
735 | |||
657 | bool bpf_prog_array_compatible(struct bpf_array *array, | 736 | bool bpf_prog_array_compatible(struct bpf_array *array, |
658 | const struct bpf_prog *fp) | 737 | const struct bpf_prog *fp) |
659 | { | 738 | { |
@@ -700,9 +779,23 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) | |||
700 | */ | 779 | */ |
701 | int bpf_prog_select_runtime(struct bpf_prog *fp) | 780 | int bpf_prog_select_runtime(struct bpf_prog *fp) |
702 | { | 781 | { |
782 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON | ||
703 | fp->bpf_func = (void *) __bpf_prog_run; | 783 | fp->bpf_func = (void *) __bpf_prog_run; |
704 | 784 | #else | |
785 | fp->bpf_func = (void *) __bpf_prog_ret0; | ||
786 | #endif | ||
787 | |||
788 | /* eBPF JITs can rewrite the program in case constant | ||
789 | * blinding is active. However, in case of error during | ||
790 | * blinding, bpf_int_jit_compile() must always return a | ||
791 | * valid program, which in this case would simply not | ||
792 | * be JITed, but falls back to the interpreter. | ||
793 | */ | ||
705 | bpf_int_jit_compile(fp); | 794 | bpf_int_jit_compile(fp); |
795 | #ifdef CONFIG_BPF_JIT_ALWAYS_ON | ||
796 | if (!fp->jited) | ||
797 | return -ENOTSUPP; | ||
798 | #endif | ||
706 | bpf_prog_lock_ro(fp); | 799 | bpf_prog_lock_ro(fp); |
707 | 800 | ||
708 | /* The tail call compatibility check can only be done at | 801 | /* The tail call compatibility check can only be done at |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 4e32cc94edd9..424accd20c2d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -447,57 +447,6 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl) | |||
447 | list_add(&tl->list_node, &bpf_prog_types); | 447 | list_add(&tl->list_node, &bpf_prog_types); |
448 | } | 448 | } |
449 | 449 | ||
450 | /* fixup insn->imm field of bpf_call instructions: | ||
451 | * if (insn->imm == BPF_FUNC_map_lookup_elem) | ||
452 | * insn->imm = bpf_map_lookup_elem - __bpf_call_base; | ||
453 | * else if (insn->imm == BPF_FUNC_map_update_elem) | ||
454 | * insn->imm = bpf_map_update_elem - __bpf_call_base; | ||
455 | * else ... | ||
456 | * | ||
457 | * this function is called after eBPF program passed verification | ||
458 | */ | ||
459 | static void fixup_bpf_calls(struct bpf_prog *prog) | ||
460 | { | ||
461 | const struct bpf_func_proto *fn; | ||
462 | int i; | ||
463 | |||
464 | for (i = 0; i < prog->len; i++) { | ||
465 | struct bpf_insn *insn = &prog->insnsi[i]; | ||
466 | |||
467 | if (insn->code == (BPF_JMP | BPF_CALL)) { | ||
468 | /* we reach here when program has bpf_call instructions | ||
469 | * and it passed bpf_check(), means that | ||
470 | * ops->get_func_proto must have been supplied, check it | ||
471 | */ | ||
472 | BUG_ON(!prog->aux->ops->get_func_proto); | ||
473 | |||
474 | if (insn->imm == BPF_FUNC_get_route_realm) | ||
475 | prog->dst_needed = 1; | ||
476 | if (insn->imm == BPF_FUNC_get_prandom_u32) | ||
477 | bpf_user_rnd_init_once(); | ||
478 | if (insn->imm == BPF_FUNC_tail_call) { | ||
479 | /* mark bpf_tail_call as different opcode | ||
480 | * to avoid conditional branch in | ||
481 | * interpeter for every normal call | ||
482 | * and to prevent accidental JITing by | ||
483 | * JIT compiler that doesn't support | ||
484 | * bpf_tail_call yet | ||
485 | */ | ||
486 | insn->imm = 0; | ||
487 | insn->code |= BPF_X; | ||
488 | continue; | ||
489 | } | ||
490 | |||
491 | fn = prog->aux->ops->get_func_proto(insn->imm); | ||
492 | /* all functions that have prototype and verifier allowed | ||
493 | * programs to call them, must be real in-kernel functions | ||
494 | */ | ||
495 | BUG_ON(!fn->func); | ||
496 | insn->imm = fn->func - __bpf_call_base; | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | |||
501 | /* drop refcnt on maps used by eBPF program and free auxilary data */ | 450 | /* drop refcnt on maps used by eBPF program and free auxilary data */ |
502 | static void free_used_maps(struct bpf_prog_aux *aux) | 451 | static void free_used_maps(struct bpf_prog_aux *aux) |
503 | { | 452 | { |
@@ -680,9 +629,6 @@ static int bpf_prog_load(union bpf_attr *attr) | |||
680 | if (err < 0) | 629 | if (err < 0) |
681 | goto free_used_maps; | 630 | goto free_used_maps; |
682 | 631 | ||
683 | /* fixup BPF_CALL->imm field */ | ||
684 | fixup_bpf_calls(prog); | ||
685 | |||
686 | /* eBPF program is ready to be JITed */ | 632 | /* eBPF program is ready to be JITed */ |
687 | err = bpf_prog_select_runtime(prog); | 633 | err = bpf_prog_select_runtime(prog); |
688 | if (err < 0) | 634 | if (err < 0) |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index eb759f5008b8..c14003840bc5 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -186,6 +186,14 @@ struct verifier_stack_elem { | |||
186 | struct verifier_stack_elem *next; | 186 | struct verifier_stack_elem *next; |
187 | }; | 187 | }; |
188 | 188 | ||
189 | struct bpf_insn_aux_data { | ||
190 | union { | ||
191 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ | ||
192 | struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ | ||
193 | }; | ||
194 | bool seen; /* this insn was processed by the verifier */ | ||
195 | }; | ||
196 | |||
189 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ | 197 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ |
190 | 198 | ||
191 | /* single container for all structs | 199 | /* single container for all structs |
@@ -200,6 +208,7 @@ struct verifier_env { | |||
200 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ | 208 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ |
201 | u32 used_map_cnt; /* number of used maps */ | 209 | u32 used_map_cnt; /* number of used maps */ |
202 | bool allow_ptr_leaks; | 210 | bool allow_ptr_leaks; |
211 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ | ||
203 | }; | 212 | }; |
204 | 213 | ||
205 | /* verbose verifier prints what it's seeing | 214 | /* verbose verifier prints what it's seeing |
@@ -674,6 +683,13 @@ static bool is_pointer_value(struct verifier_env *env, int regno) | |||
674 | } | 683 | } |
675 | } | 684 | } |
676 | 685 | ||
686 | static bool is_ctx_reg(struct verifier_env *env, int regno) | ||
687 | { | ||
688 | const struct reg_state *reg = &env->cur_state.regs[regno]; | ||
689 | |||
690 | return reg->type == PTR_TO_CTX; | ||
691 | } | ||
692 | |||
677 | /* check whether memory at (regno + off) is accessible for t = (read | write) | 693 | /* check whether memory at (regno + off) is accessible for t = (read | write) |
678 | * if t==write, value_regno is a register which value is stored into memory | 694 | * if t==write, value_regno is a register which value is stored into memory |
679 | * if t==read, value_regno is a register which will receive the value from memory | 695 | * if t==read, value_regno is a register which will receive the value from memory |
@@ -770,6 +786,12 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) | |||
770 | return -EACCES; | 786 | return -EACCES; |
771 | } | 787 | } |
772 | 788 | ||
789 | if (is_ctx_reg(env, insn->dst_reg)) { | ||
790 | verbose("BPF_XADD stores into R%d context is not allowed\n", | ||
791 | insn->dst_reg); | ||
792 | return -EACCES; | ||
793 | } | ||
794 | |||
773 | /* check whether atomic_add can read the memory */ | 795 | /* check whether atomic_add can read the memory */ |
774 | err = check_mem_access(env, insn->dst_reg, insn->off, | 796 | err = check_mem_access(env, insn->dst_reg, insn->off, |
775 | BPF_SIZE(insn->code), BPF_READ, -1); | 797 | BPF_SIZE(insn->code), BPF_READ, -1); |
@@ -945,7 +967,7 @@ error: | |||
945 | return -EINVAL; | 967 | return -EINVAL; |
946 | } | 968 | } |
947 | 969 | ||
948 | static int check_call(struct verifier_env *env, int func_id) | 970 | static int check_call(struct verifier_env *env, int func_id, int insn_idx) |
949 | { | 971 | { |
950 | struct verifier_state *state = &env->cur_state; | 972 | struct verifier_state *state = &env->cur_state; |
951 | const struct bpf_func_proto *fn = NULL; | 973 | const struct bpf_func_proto *fn = NULL; |
@@ -981,6 +1003,13 @@ static int check_call(struct verifier_env *env, int func_id) | |||
981 | err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map); | 1003 | err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map); |
982 | if (err) | 1004 | if (err) |
983 | return err; | 1005 | return err; |
1006 | if (func_id == BPF_FUNC_tail_call) { | ||
1007 | if (map == NULL) { | ||
1008 | verbose("verifier bug\n"); | ||
1009 | return -EINVAL; | ||
1010 | } | ||
1011 | env->insn_aux_data[insn_idx].map_ptr = map; | ||
1012 | } | ||
984 | err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map); | 1013 | err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map); |
985 | if (err) | 1014 | if (err) |
986 | return err; | 1015 | return err; |
@@ -1149,6 +1178,11 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) | |||
1149 | return -EINVAL; | 1178 | return -EINVAL; |
1150 | } | 1179 | } |
1151 | 1180 | ||
1181 | if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) { | ||
1182 | verbose("BPF_ARSH not supported for 32 bit ALU\n"); | ||
1183 | return -EINVAL; | ||
1184 | } | ||
1185 | |||
1152 | if ((opcode == BPF_LSH || opcode == BPF_RSH || | 1186 | if ((opcode == BPF_LSH || opcode == BPF_RSH || |
1153 | opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { | 1187 | opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { |
1154 | int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; | 1188 | int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; |
@@ -1778,13 +1812,14 @@ static int do_check(struct verifier_env *env) | |||
1778 | print_bpf_insn(env, insn); | 1812 | print_bpf_insn(env, insn); |
1779 | } | 1813 | } |
1780 | 1814 | ||
1815 | env->insn_aux_data[insn_idx].seen = true; | ||
1781 | if (class == BPF_ALU || class == BPF_ALU64) { | 1816 | if (class == BPF_ALU || class == BPF_ALU64) { |
1782 | err = check_alu_op(env, insn); | 1817 | err = check_alu_op(env, insn); |
1783 | if (err) | 1818 | if (err) |
1784 | return err; | 1819 | return err; |
1785 | 1820 | ||
1786 | } else if (class == BPF_LDX) { | 1821 | } else if (class == BPF_LDX) { |
1787 | enum bpf_reg_type src_reg_type; | 1822 | enum bpf_reg_type *prev_src_type, src_reg_type; |
1788 | 1823 | ||
1789 | /* check for reserved fields is already done */ | 1824 | /* check for reserved fields is already done */ |
1790 | 1825 | ||
@@ -1813,16 +1848,18 @@ static int do_check(struct verifier_env *env) | |||
1813 | continue; | 1848 | continue; |
1814 | } | 1849 | } |
1815 | 1850 | ||
1816 | if (insn->imm == 0) { | 1851 | prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; |
1852 | |||
1853 | if (*prev_src_type == NOT_INIT) { | ||
1817 | /* saw a valid insn | 1854 | /* saw a valid insn |
1818 | * dst_reg = *(u32 *)(src_reg + off) | 1855 | * dst_reg = *(u32 *)(src_reg + off) |
1819 | * use reserved 'imm' field to mark this insn | 1856 | * save type to validate intersecting paths |
1820 | */ | 1857 | */ |
1821 | insn->imm = src_reg_type; | 1858 | *prev_src_type = src_reg_type; |
1822 | 1859 | ||
1823 | } else if (src_reg_type != insn->imm && | 1860 | } else if (src_reg_type != *prev_src_type && |
1824 | (src_reg_type == PTR_TO_CTX || | 1861 | (src_reg_type == PTR_TO_CTX || |
1825 | insn->imm == PTR_TO_CTX)) { | 1862 | *prev_src_type == PTR_TO_CTX)) { |
1826 | /* ABuser program is trying to use the same insn | 1863 | /* ABuser program is trying to use the same insn |
1827 | * dst_reg = *(u32*) (src_reg + off) | 1864 | * dst_reg = *(u32*) (src_reg + off) |
1828 | * with different pointer types: | 1865 | * with different pointer types: |
@@ -1835,7 +1872,7 @@ static int do_check(struct verifier_env *env) | |||
1835 | } | 1872 | } |
1836 | 1873 | ||
1837 | } else if (class == BPF_STX) { | 1874 | } else if (class == BPF_STX) { |
1838 | enum bpf_reg_type dst_reg_type; | 1875 | enum bpf_reg_type *prev_dst_type, dst_reg_type; |
1839 | 1876 | ||
1840 | if (BPF_MODE(insn->code) == BPF_XADD) { | 1877 | if (BPF_MODE(insn->code) == BPF_XADD) { |
1841 | err = check_xadd(env, insn); | 1878 | err = check_xadd(env, insn); |
@@ -1863,11 +1900,13 @@ static int do_check(struct verifier_env *env) | |||
1863 | if (err) | 1900 | if (err) |
1864 | return err; | 1901 | return err; |
1865 | 1902 | ||
1866 | if (insn->imm == 0) { | 1903 | prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; |
1867 | insn->imm = dst_reg_type; | 1904 | |
1868 | } else if (dst_reg_type != insn->imm && | 1905 | if (*prev_dst_type == NOT_INIT) { |
1906 | *prev_dst_type = dst_reg_type; | ||
1907 | } else if (dst_reg_type != *prev_dst_type && | ||
1869 | (dst_reg_type == PTR_TO_CTX || | 1908 | (dst_reg_type == PTR_TO_CTX || |
1870 | insn->imm == PTR_TO_CTX)) { | 1909 | *prev_dst_type == PTR_TO_CTX)) { |
1871 | verbose("same insn cannot be used with different pointers\n"); | 1910 | verbose("same insn cannot be used with different pointers\n"); |
1872 | return -EINVAL; | 1911 | return -EINVAL; |
1873 | } | 1912 | } |
@@ -1883,6 +1922,12 @@ static int do_check(struct verifier_env *env) | |||
1883 | if (err) | 1922 | if (err) |
1884 | return err; | 1923 | return err; |
1885 | 1924 | ||
1925 | if (is_ctx_reg(env, insn->dst_reg)) { | ||
1926 | verbose("BPF_ST stores into R%d context is not allowed\n", | ||
1927 | insn->dst_reg); | ||
1928 | return -EACCES; | ||
1929 | } | ||
1930 | |||
1886 | /* check that memory (dst_reg + off) is writeable */ | 1931 | /* check that memory (dst_reg + off) is writeable */ |
1887 | err = check_mem_access(env, insn->dst_reg, insn->off, | 1932 | err = check_mem_access(env, insn->dst_reg, insn->off, |
1888 | BPF_SIZE(insn->code), BPF_WRITE, | 1933 | BPF_SIZE(insn->code), BPF_WRITE, |
@@ -1902,7 +1947,7 @@ static int do_check(struct verifier_env *env) | |||
1902 | return -EINVAL; | 1947 | return -EINVAL; |
1903 | } | 1948 | } |
1904 | 1949 | ||
1905 | err = check_call(env, insn->imm); | 1950 | err = check_call(env, insn->imm, insn_idx); |
1906 | if (err) | 1951 | if (err) |
1907 | return err; | 1952 | return err; |
1908 | 1953 | ||
@@ -1969,6 +2014,7 @@ process_bpf_exit: | |||
1969 | return err; | 2014 | return err; |
1970 | 2015 | ||
1971 | insn_idx++; | 2016 | insn_idx++; |
2017 | env->insn_aux_data[insn_idx].seen = true; | ||
1972 | } else { | 2018 | } else { |
1973 | verbose("invalid BPF_LD mode\n"); | 2019 | verbose("invalid BPF_LD mode\n"); |
1974 | return -EINVAL; | 2020 | return -EINVAL; |
@@ -2098,23 +2144,60 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env) | |||
2098 | insn->src_reg = 0; | 2144 | insn->src_reg = 0; |
2099 | } | 2145 | } |
2100 | 2146 | ||
2101 | static void adjust_branches(struct bpf_prog *prog, int pos, int delta) | 2147 | /* single env->prog->insni[off] instruction was replaced with the range |
2148 | * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying | ||
2149 | * [0, off) and [off, end) to new locations, so the patched range stays zero | ||
2150 | */ | ||
2151 | static int adjust_insn_aux_data(struct verifier_env *env, u32 prog_len, | ||
2152 | u32 off, u32 cnt) | ||
2102 | { | 2153 | { |
2103 | struct bpf_insn *insn = prog->insnsi; | 2154 | struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; |
2104 | int insn_cnt = prog->len; | ||
2105 | int i; | 2155 | int i; |
2106 | 2156 | ||
2107 | for (i = 0; i < insn_cnt; i++, insn++) { | 2157 | if (cnt == 1) |
2108 | if (BPF_CLASS(insn->code) != BPF_JMP || | 2158 | return 0; |
2109 | BPF_OP(insn->code) == BPF_CALL || | 2159 | new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); |
2110 | BPF_OP(insn->code) == BPF_EXIT) | 2160 | if (!new_data) |
2111 | continue; | 2161 | return -ENOMEM; |
2162 | memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); | ||
2163 | memcpy(new_data + off + cnt - 1, old_data + off, | ||
2164 | sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); | ||
2165 | for (i = off; i < off + cnt - 1; i++) | ||
2166 | new_data[i].seen = true; | ||
2167 | env->insn_aux_data = new_data; | ||
2168 | vfree(old_data); | ||
2169 | return 0; | ||
2170 | } | ||
2171 | |||
2172 | static struct bpf_prog *bpf_patch_insn_data(struct verifier_env *env, u32 off, | ||
2173 | const struct bpf_insn *patch, u32 len) | ||
2174 | { | ||
2175 | struct bpf_prog *new_prog; | ||
2112 | 2176 | ||
2113 | /* adjust offset of jmps if necessary */ | 2177 | new_prog = bpf_patch_insn_single(env->prog, off, patch, len); |
2114 | if (i < pos && i + insn->off + 1 > pos) | 2178 | if (!new_prog) |
2115 | insn->off += delta; | 2179 | return NULL; |
2116 | else if (i > pos + delta && i + insn->off + 1 <= pos + delta) | 2180 | if (adjust_insn_aux_data(env, new_prog->len, off, len)) |
2117 | insn->off -= delta; | 2181 | return NULL; |
2182 | return new_prog; | ||
2183 | } | ||
2184 | |||
2185 | /* The verifier does more data flow analysis than llvm and will not explore | ||
2186 | * branches that are dead at run time. Malicious programs can have dead code | ||
2187 | * too. Therefore replace all dead at-run-time code with nops. | ||
2188 | */ | ||
2189 | static void sanitize_dead_code(struct verifier_env *env) | ||
2190 | { | ||
2191 | struct bpf_insn_aux_data *aux_data = env->insn_aux_data; | ||
2192 | struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); | ||
2193 | struct bpf_insn *insn = env->prog->insnsi; | ||
2194 | const int insn_cnt = env->prog->len; | ||
2195 | int i; | ||
2196 | |||
2197 | for (i = 0; i < insn_cnt; i++) { | ||
2198 | if (aux_data[i].seen) | ||
2199 | continue; | ||
2200 | memcpy(insn + i, &nop, sizeof(nop)); | ||
2118 | } | 2201 | } |
2119 | } | 2202 | } |
2120 | 2203 | ||
@@ -2124,17 +2207,18 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta) | |||
2124 | static int convert_ctx_accesses(struct verifier_env *env) | 2207 | static int convert_ctx_accesses(struct verifier_env *env) |
2125 | { | 2208 | { |
2126 | struct bpf_insn *insn = env->prog->insnsi; | 2209 | struct bpf_insn *insn = env->prog->insnsi; |
2127 | int insn_cnt = env->prog->len; | 2210 | const int insn_cnt = env->prog->len; |
2128 | struct bpf_insn insn_buf[16]; | 2211 | struct bpf_insn insn_buf[16]; |
2129 | struct bpf_prog *new_prog; | 2212 | struct bpf_prog *new_prog; |
2130 | u32 cnt; | ||
2131 | int i; | ||
2132 | enum bpf_access_type type; | 2213 | enum bpf_access_type type; |
2214 | int i, delta = 0; | ||
2133 | 2215 | ||
2134 | if (!env->prog->aux->ops->convert_ctx_access) | 2216 | if (!env->prog->aux->ops->convert_ctx_access) |
2135 | return 0; | 2217 | return 0; |
2136 | 2218 | ||
2137 | for (i = 0; i < insn_cnt; i++, insn++) { | 2219 | for (i = 0; i < insn_cnt; i++, insn++) { |
2220 | u32 cnt; | ||
2221 | |||
2138 | if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) | 2222 | if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) |
2139 | type = BPF_READ; | 2223 | type = BPF_READ; |
2140 | else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) | 2224 | else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) |
@@ -2142,11 +2226,8 @@ static int convert_ctx_accesses(struct verifier_env *env) | |||
2142 | else | 2226 | else |
2143 | continue; | 2227 | continue; |
2144 | 2228 | ||
2145 | if (insn->imm != PTR_TO_CTX) { | 2229 | if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) |
2146 | /* clear internal mark */ | ||
2147 | insn->imm = 0; | ||
2148 | continue; | 2230 | continue; |
2149 | } | ||
2150 | 2231 | ||
2151 | cnt = env->prog->aux->ops-> | 2232 | cnt = env->prog->aux->ops-> |
2152 | convert_ctx_access(type, insn->dst_reg, insn->src_reg, | 2233 | convert_ctx_access(type, insn->dst_reg, insn->src_reg, |
@@ -2156,34 +2237,107 @@ static int convert_ctx_accesses(struct verifier_env *env) | |||
2156 | return -EINVAL; | 2237 | return -EINVAL; |
2157 | } | 2238 | } |
2158 | 2239 | ||
2159 | if (cnt == 1) { | 2240 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
2160 | memcpy(insn, insn_buf, sizeof(*insn)); | ||
2161 | continue; | ||
2162 | } | ||
2163 | |||
2164 | /* several new insns need to be inserted. Make room for them */ | ||
2165 | insn_cnt += cnt - 1; | ||
2166 | new_prog = bpf_prog_realloc(env->prog, | ||
2167 | bpf_prog_size(insn_cnt), | ||
2168 | GFP_USER); | ||
2169 | if (!new_prog) | 2241 | if (!new_prog) |
2170 | return -ENOMEM; | 2242 | return -ENOMEM; |
2171 | 2243 | ||
2172 | new_prog->len = insn_cnt; | 2244 | delta += cnt - 1; |
2173 | 2245 | ||
2174 | memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1, | 2246 | /* keep walking new program and skip insns we just inserted */ |
2175 | sizeof(*insn) * (insn_cnt - i - cnt)); | 2247 | env->prog = new_prog; |
2248 | insn = new_prog->insnsi + i + delta; | ||
2249 | } | ||
2176 | 2250 | ||
2177 | /* copy substitute insns in place of load instruction */ | 2251 | return 0; |
2178 | memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt); | 2252 | } |
2179 | 2253 | ||
2180 | /* adjust branches in the whole program */ | 2254 | /* fixup insn->imm field of bpf_call instructions |
2181 | adjust_branches(new_prog, i, cnt - 1); | 2255 | * |
2256 | * this function is called after eBPF program passed verification | ||
2257 | */ | ||
2258 | static int fixup_bpf_calls(struct verifier_env *env) | ||
2259 | { | ||
2260 | struct bpf_prog *prog = env->prog; | ||
2261 | struct bpf_insn *insn = prog->insnsi; | ||
2262 | const struct bpf_func_proto *fn; | ||
2263 | const int insn_cnt = prog->len; | ||
2264 | struct bpf_insn insn_buf[16]; | ||
2265 | struct bpf_prog *new_prog; | ||
2266 | struct bpf_map *map_ptr; | ||
2267 | int i, cnt, delta = 0; | ||
2182 | 2268 | ||
2183 | /* keep walking new program and skip insns we just inserted */ | 2269 | for (i = 0; i < insn_cnt; i++, insn++) { |
2184 | env->prog = new_prog; | 2270 | if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) || |
2185 | insn = new_prog->insnsi + i + cnt - 1; | 2271 | insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { |
2186 | i += cnt - 1; | 2272 | /* due to JIT bugs clear upper 32-bits of src register |
2273 | * before div/mod operation | ||
2274 | */ | ||
2275 | insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg); | ||
2276 | insn_buf[1] = *insn; | ||
2277 | cnt = 2; | ||
2278 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); | ||
2279 | if (!new_prog) | ||
2280 | return -ENOMEM; | ||
2281 | |||
2282 | delta += cnt - 1; | ||
2283 | env->prog = prog = new_prog; | ||
2284 | insn = new_prog->insnsi + i + delta; | ||
2285 | continue; | ||
2286 | } | ||
2287 | |||
2288 | if (insn->code != (BPF_JMP | BPF_CALL)) | ||
2289 | continue; | ||
2290 | |||
2291 | if (insn->imm == BPF_FUNC_get_route_realm) | ||
2292 | prog->dst_needed = 1; | ||
2293 | if (insn->imm == BPF_FUNC_get_prandom_u32) | ||
2294 | bpf_user_rnd_init_once(); | ||
2295 | if (insn->imm == BPF_FUNC_tail_call) { | ||
2296 | /* mark bpf_tail_call as different opcode to avoid | ||
2297 | * conditional branch in the interpeter for every normal | ||
2298 | * call and to prevent accidental JITing by JIT compiler | ||
2299 | * that doesn't support bpf_tail_call yet | ||
2300 | */ | ||
2301 | insn->imm = 0; | ||
2302 | insn->code |= BPF_X; | ||
2303 | |||
2304 | /* instead of changing every JIT dealing with tail_call | ||
2305 | * emit two extra insns: | ||
2306 | * if (index >= max_entries) goto out; | ||
2307 | * index &= array->index_mask; | ||
2308 | * to avoid out-of-bounds cpu speculation | ||
2309 | */ | ||
2310 | map_ptr = env->insn_aux_data[i + delta].map_ptr; | ||
2311 | if (!map_ptr->unpriv_array) | ||
2312 | continue; | ||
2313 | insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, | ||
2314 | map_ptr->max_entries, 2); | ||
2315 | insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, | ||
2316 | container_of(map_ptr, | ||
2317 | struct bpf_array, | ||
2318 | map)->index_mask); | ||
2319 | insn_buf[2] = *insn; | ||
2320 | cnt = 3; | ||
2321 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); | ||
2322 | if (!new_prog) | ||
2323 | return -ENOMEM; | ||
2324 | |||
2325 | delta += cnt - 1; | ||
2326 | env->prog = prog = new_prog; | ||
2327 | insn = new_prog->insnsi + i + delta; | ||
2328 | continue; | ||
2329 | } | ||
2330 | |||
2331 | fn = prog->aux->ops->get_func_proto(insn->imm); | ||
2332 | /* all functions that have prototype and verifier allowed | ||
2333 | * programs to call them, must be real in-kernel functions | ||
2334 | */ | ||
2335 | if (!fn->func) { | ||
2336 | verbose("kernel subsystem misconfigured func %d\n", | ||
2337 | insn->imm); | ||
2338 | return -EFAULT; | ||
2339 | } | ||
2340 | insn->imm = fn->func - __bpf_call_base; | ||
2187 | } | 2341 | } |
2188 | 2342 | ||
2189 | return 0; | 2343 | return 0; |
@@ -2227,6 +2381,11 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) | |||
2227 | if (!env) | 2381 | if (!env) |
2228 | return -ENOMEM; | 2382 | return -ENOMEM; |
2229 | 2383 | ||
2384 | env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * | ||
2385 | (*prog)->len); | ||
2386 | ret = -ENOMEM; | ||
2387 | if (!env->insn_aux_data) | ||
2388 | goto err_free_env; | ||
2230 | env->prog = *prog; | 2389 | env->prog = *prog; |
2231 | 2390 | ||
2232 | /* grab the mutex to protect few globals used by verifier */ | 2391 | /* grab the mutex to protect few globals used by verifier */ |
@@ -2245,12 +2404,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) | |||
2245 | /* log_* values have to be sane */ | 2404 | /* log_* values have to be sane */ |
2246 | if (log_size < 128 || log_size > UINT_MAX >> 8 || | 2405 | if (log_size < 128 || log_size > UINT_MAX >> 8 || |
2247 | log_level == 0 || log_ubuf == NULL) | 2406 | log_level == 0 || log_ubuf == NULL) |
2248 | goto free_env; | 2407 | goto err_unlock; |
2249 | 2408 | ||
2250 | ret = -ENOMEM; | 2409 | ret = -ENOMEM; |
2251 | log_buf = vmalloc(log_size); | 2410 | log_buf = vmalloc(log_size); |
2252 | if (!log_buf) | 2411 | if (!log_buf) |
2253 | goto free_env; | 2412 | goto err_unlock; |
2254 | } else { | 2413 | } else { |
2255 | log_level = 0; | 2414 | log_level = 0; |
2256 | } | 2415 | } |
@@ -2279,9 +2438,15 @@ skip_full_check: | |||
2279 | free_states(env); | 2438 | free_states(env); |
2280 | 2439 | ||
2281 | if (ret == 0) | 2440 | if (ret == 0) |
2441 | sanitize_dead_code(env); | ||
2442 | |||
2443 | if (ret == 0) | ||
2282 | /* program is valid, convert *(u32*)(ctx + off) accesses */ | 2444 | /* program is valid, convert *(u32*)(ctx + off) accesses */ |
2283 | ret = convert_ctx_accesses(env); | 2445 | ret = convert_ctx_accesses(env); |
2284 | 2446 | ||
2447 | if (ret == 0) | ||
2448 | ret = fixup_bpf_calls(env); | ||
2449 | |||
2285 | if (log_level && log_len >= log_size - 1) { | 2450 | if (log_level && log_len >= log_size - 1) { |
2286 | BUG_ON(log_len >= log_size); | 2451 | BUG_ON(log_len >= log_size); |
2287 | /* verifier log exceeded user supplied buffer */ | 2452 | /* verifier log exceeded user supplied buffer */ |
@@ -2319,14 +2484,16 @@ skip_full_check: | |||
2319 | free_log_buf: | 2484 | free_log_buf: |
2320 | if (log_level) | 2485 | if (log_level) |
2321 | vfree(log_buf); | 2486 | vfree(log_buf); |
2322 | free_env: | ||
2323 | if (!env->prog->aux->used_maps) | 2487 | if (!env->prog->aux->used_maps) |
2324 | /* if we didn't copy map pointers into bpf_prog_info, release | 2488 | /* if we didn't copy map pointers into bpf_prog_info, release |
2325 | * them now. Otherwise free_bpf_prog_info() will release them. | 2489 | * them now. Otherwise free_bpf_prog_info() will release them. |
2326 | */ | 2490 | */ |
2327 | release_maps(env); | 2491 | release_maps(env); |
2328 | *prog = env->prog; | 2492 | *prog = env->prog; |
2329 | kfree(env); | 2493 | err_unlock: |
2330 | mutex_unlock(&bpf_verifier_lock); | 2494 | mutex_unlock(&bpf_verifier_lock); |
2495 | vfree(env->insn_aux_data); | ||
2496 | err_free_env: | ||
2497 | kfree(env); | ||
2331 | return ret; | 2498 | return ret; |
2332 | } | 2499 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 5ee818516a1c..79178db0d621 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #include <linux/tsacct_kern.h> | 58 | #include <linux/tsacct_kern.h> |
59 | #include <linux/cn_proc.h> | 59 | #include <linux/cn_proc.h> |
60 | #include <linux/freezer.h> | 60 | #include <linux/freezer.h> |
61 | #include <linux/kaiser.h> | ||
61 | #include <linux/delayacct.h> | 62 | #include <linux/delayacct.h> |
62 | #include <linux/taskstats_kern.h> | 63 | #include <linux/taskstats_kern.h> |
63 | #include <linux/random.h> | 64 | #include <linux/random.h> |
@@ -171,6 +172,7 @@ static inline void free_thread_stack(unsigned long *stack) | |||
171 | { | 172 | { |
172 | struct page *page = virt_to_page(stack); | 173 | struct page *page = virt_to_page(stack); |
173 | 174 | ||
175 | kaiser_unmap_thread_stack(stack); | ||
174 | __free_kmem_pages(page, THREAD_SIZE_ORDER); | 176 | __free_kmem_pages(page, THREAD_SIZE_ORDER); |
175 | } | 177 | } |
176 | # else | 178 | # else |
@@ -354,6 +356,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) | |||
354 | goto free_stack; | 356 | goto free_stack; |
355 | 357 | ||
356 | tsk->stack = stack; | 358 | tsk->stack = stack; |
359 | |||
360 | err = kaiser_map_thread_stack(tsk->stack); | ||
361 | if (err) | ||
362 | goto free_stack; | ||
357 | #ifdef CONFIG_SECCOMP | 363 | #ifdef CONFIG_SECCOMP |
358 | /* | 364 | /* |
359 | * We must handle setting up seccomp filters once we're under | 365 | * We must handle setting up seccomp filters once we're under |
diff --git a/kernel/futex.c b/kernel/futex.c index af29863f3349..a09c1dd1f659 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1621,6 +1621,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, | |||
1621 | struct futex_q *this, *next; | 1621 | struct futex_q *this, *next; |
1622 | WAKE_Q(wake_q); | 1622 | WAKE_Q(wake_q); |
1623 | 1623 | ||
1624 | if (nr_wake < 0 || nr_requeue < 0) | ||
1625 | return -EINVAL; | ||
1626 | |||
1624 | if (requeue_pi) { | 1627 | if (requeue_pi) { |
1625 | /* | 1628 | /* |
1626 | * Requeue PI only works on two distinct uaddrs. This | 1629 | * Requeue PI only works on two distinct uaddrs. This |
@@ -1939,8 +1942,12 @@ static int unqueue_me(struct futex_q *q) | |||
1939 | 1942 | ||
1940 | /* In the common case we don't take the spinlock, which is nice. */ | 1943 | /* In the common case we don't take the spinlock, which is nice. */ |
1941 | retry: | 1944 | retry: |
1942 | lock_ptr = q->lock_ptr; | 1945 | /* |
1943 | barrier(); | 1946 | * q->lock_ptr can change between this read and the following spin_lock. |
1947 | * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and | ||
1948 | * optimizing lock_ptr out of the logic below. | ||
1949 | */ | ||
1950 | lock_ptr = READ_ONCE(q->lock_ptr); | ||
1944 | if (lock_ptr != NULL) { | 1951 | if (lock_ptr != NULL) { |
1945 | spin_lock(lock_ptr); | 1952 | spin_lock(lock_ptr); |
1946 | /* | 1953 | /* |
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index c92e44855ddd..1276aabaab55 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig | |||
@@ -37,6 +37,7 @@ config ARCH_HAS_GCOV_PROFILE_ALL | |||
37 | 37 | ||
38 | config GCOV_PROFILE_ALL | 38 | config GCOV_PROFILE_ALL |
39 | bool "Profile entire Kernel" | 39 | bool "Profile entire Kernel" |
40 | depends on !COMPILE_TEST | ||
40 | depends on GCOV_KERNEL | 41 | depends on GCOV_KERNEL |
41 | depends on ARCH_HAS_GCOV_PROFILE_ALL | 42 | depends on ARCH_HAS_GCOV_PROFILE_ALL |
42 | default n | 43 | default n |
diff --git a/kernel/groups.c b/kernel/groups.c index 74d431d25251..5ea9847f172f 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
@@ -101,7 +101,7 @@ static int groups_from_user(struct group_info *group_info, | |||
101 | } | 101 | } |
102 | 102 | ||
103 | /* a simple Shell sort */ | 103 | /* a simple Shell sort */ |
104 | static void groups_sort(struct group_info *group_info) | 104 | void groups_sort(struct group_info *group_info) |
105 | { | 105 | { |
106 | int base, max, stride; | 106 | int base, max, stride; |
107 | int gidsetsize = group_info->ngroups; | 107 | int gidsetsize = group_info->ngroups; |
@@ -128,6 +128,7 @@ static void groups_sort(struct group_info *group_info) | |||
128 | stride /= 3; | 128 | stride /= 3; |
129 | } | 129 | } |
130 | } | 130 | } |
131 | EXPORT_SYMBOL(groups_sort); | ||
131 | 132 | ||
132 | /* a simple bsearch */ | 133 | /* a simple bsearch */ |
133 | int groups_search(const struct group_info *group_info, kgid_t grp) | 134 | int groups_search(const struct group_info *group_info, kgid_t grp) |
@@ -159,7 +160,6 @@ int groups_search(const struct group_info *group_info, kgid_t grp) | |||
159 | void set_groups(struct cred *new, struct group_info *group_info) | 160 | void set_groups(struct cred *new, struct group_info *group_info) |
160 | { | 161 | { |
161 | put_group_info(new->group_info); | 162 | put_group_info(new->group_info); |
162 | groups_sort(group_info); | ||
163 | get_group_info(group_info); | 163 | get_group_info(group_info); |
164 | new->group_info = group_info; | 164 | new->group_info = group_info; |
165 | } | 165 | } |
@@ -243,6 +243,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) | |||
243 | return retval; | 243 | return retval; |
244 | } | 244 | } |
245 | 245 | ||
246 | groups_sort(group_info); | ||
246 | retval = set_current_groups(group_info); | 247 | retval = set_current_groups(group_info); |
247 | put_group_info(group_info); | 248 | put_group_info(group_info); |
248 | 249 | ||
diff --git a/kernel/kexec.c b/kernel/kexec.c index ed42e7365faa..ee51e0321599 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -104,6 +104,65 @@ out_free_image: | |||
104 | return ret; | 104 | return ret; |
105 | } | 105 | } |
106 | 106 | ||
107 | static int do_kexec_load(unsigned long entry, unsigned long nr_segments, | ||
108 | struct kexec_segment __user *segments, unsigned long flags) | ||
109 | { | ||
110 | struct kimage **dest_image, *image; | ||
111 | unsigned long i; | ||
112 | int ret; | ||
113 | |||
114 | if (flags & KEXEC_ON_CRASH) { | ||
115 | dest_image = &kexec_crash_image; | ||
116 | if (kexec_crash_image) | ||
117 | arch_kexec_unprotect_crashkres(); | ||
118 | } else { | ||
119 | dest_image = &kexec_image; | ||
120 | } | ||
121 | |||
122 | if (nr_segments == 0) { | ||
123 | /* Uninstall image */ | ||
124 | kimage_free(xchg(dest_image, NULL)); | ||
125 | return 0; | ||
126 | } | ||
127 | if (flags & KEXEC_ON_CRASH) { | ||
128 | /* | ||
129 | * Loading another kernel to switch to if this one | ||
130 | * crashes. Free any current crash dump kernel before | ||
131 | * we corrupt it. | ||
132 | */ | ||
133 | kimage_free(xchg(&kexec_crash_image, NULL)); | ||
134 | } | ||
135 | |||
136 | ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); | ||
137 | if (ret) | ||
138 | return ret; | ||
139 | |||
140 | if (flags & KEXEC_PRESERVE_CONTEXT) | ||
141 | image->preserve_context = 1; | ||
142 | |||
143 | ret = machine_kexec_prepare(image); | ||
144 | if (ret) | ||
145 | goto out; | ||
146 | |||
147 | for (i = 0; i < nr_segments; i++) { | ||
148 | ret = kimage_load_segment(image, &image->segment[i]); | ||
149 | if (ret) | ||
150 | goto out; | ||
151 | } | ||
152 | |||
153 | kimage_terminate(image); | ||
154 | |||
155 | /* Install the new kernel and uninstall the old */ | ||
156 | image = xchg(dest_image, image); | ||
157 | |||
158 | out: | ||
159 | if ((flags & KEXEC_ON_CRASH) && kexec_crash_image) | ||
160 | arch_kexec_protect_crashkres(); | ||
161 | |||
162 | kimage_free(image); | ||
163 | return ret; | ||
164 | } | ||
165 | |||
107 | /* | 166 | /* |
108 | * Exec Kernel system call: for obvious reasons only root may call it. | 167 | * Exec Kernel system call: for obvious reasons only root may call it. |
109 | * | 168 | * |
@@ -128,7 +187,6 @@ out_free_image: | |||
128 | SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, | 187 | SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, |
129 | struct kexec_segment __user *, segments, unsigned long, flags) | 188 | struct kexec_segment __user *, segments, unsigned long, flags) |
130 | { | 189 | { |
131 | struct kimage **dest_image, *image; | ||
132 | int result; | 190 | int result; |
133 | 191 | ||
134 | /* We only trust the superuser with rebooting the system. */ | 192 | /* We only trust the superuser with rebooting the system. */ |
@@ -153,9 +211,6 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, | |||
153 | if (nr_segments > KEXEC_SEGMENT_MAX) | 211 | if (nr_segments > KEXEC_SEGMENT_MAX) |
154 | return -EINVAL; | 212 | return -EINVAL; |
155 | 213 | ||
156 | image = NULL; | ||
157 | result = 0; | ||
158 | |||
159 | /* Because we write directly to the reserved memory | 214 | /* Because we write directly to the reserved memory |
160 | * region when loading crash kernels we need a mutex here to | 215 | * region when loading crash kernels we need a mutex here to |
161 | * prevent multiple crash kernels from attempting to load | 216 | * prevent multiple crash kernels from attempting to load |
@@ -167,53 +222,9 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, | |||
167 | if (!mutex_trylock(&kexec_mutex)) | 222 | if (!mutex_trylock(&kexec_mutex)) |
168 | return -EBUSY; | 223 | return -EBUSY; |
169 | 224 | ||
170 | dest_image = &kexec_image; | 225 | result = do_kexec_load(entry, nr_segments, segments, flags); |
171 | if (flags & KEXEC_ON_CRASH) | ||
172 | dest_image = &kexec_crash_image; | ||
173 | if (nr_segments > 0) { | ||
174 | unsigned long i; | ||
175 | |||
176 | if (flags & KEXEC_ON_CRASH) { | ||
177 | /* | ||
178 | * Loading another kernel to switch to if this one | ||
179 | * crashes. Free any current crash dump kernel before | ||
180 | * we corrupt it. | ||
181 | */ | ||
182 | |||
183 | kimage_free(xchg(&kexec_crash_image, NULL)); | ||
184 | result = kimage_alloc_init(&image, entry, nr_segments, | ||
185 | segments, flags); | ||
186 | crash_map_reserved_pages(); | ||
187 | } else { | ||
188 | /* Loading another kernel to reboot into. */ | ||
189 | |||
190 | result = kimage_alloc_init(&image, entry, nr_segments, | ||
191 | segments, flags); | ||
192 | } | ||
193 | if (result) | ||
194 | goto out; | ||
195 | |||
196 | if (flags & KEXEC_PRESERVE_CONTEXT) | ||
197 | image->preserve_context = 1; | ||
198 | result = machine_kexec_prepare(image); | ||
199 | if (result) | ||
200 | goto out; | ||
201 | |||
202 | for (i = 0; i < nr_segments; i++) { | ||
203 | result = kimage_load_segment(image, &image->segment[i]); | ||
204 | if (result) | ||
205 | goto out; | ||
206 | } | ||
207 | kimage_terminate(image); | ||
208 | if (flags & KEXEC_ON_CRASH) | ||
209 | crash_unmap_reserved_pages(); | ||
210 | } | ||
211 | /* Install the new kernel, and Uninstall the old */ | ||
212 | image = xchg(dest_image, image); | ||
213 | 226 | ||
214 | out: | ||
215 | mutex_unlock(&kexec_mutex); | 227 | mutex_unlock(&kexec_mutex); |
216 | kimage_free(image); | ||
217 | 228 | ||
218 | return result; | 229 | return result; |
219 | } | 230 | } |
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 17987debceec..3be8101eedba 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c | |||
@@ -953,7 +953,6 @@ int crash_shrink_memory(unsigned long new_size) | |||
953 | start = roundup(start, KEXEC_CRASH_MEM_ALIGN); | 953 | start = roundup(start, KEXEC_CRASH_MEM_ALIGN); |
954 | end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); | 954 | end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); |
955 | 955 | ||
956 | crash_map_reserved_pages(); | ||
957 | crash_free_reserved_phys_range(end, crashk_res.end); | 956 | crash_free_reserved_phys_range(end, crashk_res.end); |
958 | 957 | ||
959 | if ((start == end) && (crashk_res.parent != NULL)) | 958 | if ((start == end) && (crashk_res.parent != NULL)) |
@@ -967,7 +966,6 @@ int crash_shrink_memory(unsigned long new_size) | |||
967 | crashk_res.end = end - 1; | 966 | crashk_res.end = end - 1; |
968 | 967 | ||
969 | insert_resource(&iomem_resource, ram_res); | 968 | insert_resource(&iomem_resource, ram_res); |
970 | crash_unmap_reserved_pages(); | ||
971 | 969 | ||
972 | unlock: | 970 | unlock: |
973 | mutex_unlock(&kexec_mutex); | 971 | mutex_unlock(&kexec_mutex); |
@@ -1549,13 +1547,14 @@ int kernel_kexec(void) | |||
1549 | } | 1547 | } |
1550 | 1548 | ||
1551 | /* | 1549 | /* |
1552 | * Add and remove page tables for crashkernel memory | 1550 | * Protection mechanism for crashkernel reserved memory after |
1551 | * the kdump kernel is loaded. | ||
1553 | * | 1552 | * |
1554 | * Provide an empty default implementation here -- architecture | 1553 | * Provide an empty default implementation here -- architecture |
1555 | * code may override this | 1554 | * code may override this |
1556 | */ | 1555 | */ |
1557 | void __weak crash_map_reserved_pages(void) | 1556 | void __weak arch_kexec_protect_crashkres(void) |
1558 | {} | 1557 | {} |
1559 | 1558 | ||
1560 | void __weak crash_unmap_reserved_pages(void) | 1559 | void __weak arch_kexec_unprotect_crashkres(void) |
1561 | {} | 1560 | {} |
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 6030efd4a188..ef2cf637f840 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c | |||
@@ -327,8 +327,11 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, | |||
327 | return -EBUSY; | 327 | return -EBUSY; |
328 | 328 | ||
329 | dest_image = &kexec_image; | 329 | dest_image = &kexec_image; |
330 | if (flags & KEXEC_FILE_ON_CRASH) | 330 | if (flags & KEXEC_FILE_ON_CRASH) { |
331 | dest_image = &kexec_crash_image; | 331 | dest_image = &kexec_crash_image; |
332 | if (kexec_crash_image) | ||
333 | arch_kexec_unprotect_crashkres(); | ||
334 | } | ||
332 | 335 | ||
333 | if (flags & KEXEC_FILE_UNLOAD) | 336 | if (flags & KEXEC_FILE_UNLOAD) |
334 | goto exchange; | 337 | goto exchange; |
@@ -377,6 +380,9 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, | |||
377 | exchange: | 380 | exchange: |
378 | image = xchg(dest_image, image); | 381 | image = xchg(dest_image, image); |
379 | out: | 382 | out: |
383 | if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image) | ||
384 | arch_kexec_protect_crashkres(); | ||
385 | |||
380 | mutex_unlock(&kexec_mutex); | 386 | mutex_unlock(&kexec_mutex); |
381 | kimage_free(image); | 387 | kimage_free(image); |
382 | return ret; | 388 | return ret; |
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index f42f83a36506..a70b90db3909 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
@@ -719,6 +719,7 @@ static inline void | |||
719 | __mutex_unlock_common_slowpath(struct mutex *lock, int nested) | 719 | __mutex_unlock_common_slowpath(struct mutex *lock, int nested) |
720 | { | 720 | { |
721 | unsigned long flags; | 721 | unsigned long flags; |
722 | WAKE_Q(wake_q); | ||
722 | 723 | ||
723 | /* | 724 | /* |
724 | * As a performance measurement, release the lock before doing other | 725 | * As a performance measurement, release the lock before doing other |
@@ -746,11 +747,11 @@ __mutex_unlock_common_slowpath(struct mutex *lock, int nested) | |||
746 | struct mutex_waiter, list); | 747 | struct mutex_waiter, list); |
747 | 748 | ||
748 | debug_mutex_wake_waiter(lock, waiter); | 749 | debug_mutex_wake_waiter(lock, waiter); |
749 | 750 | wake_q_add(&wake_q, waiter->task); | |
750 | wake_up_process(waiter->task); | ||
751 | } | 751 | } |
752 | 752 | ||
753 | spin_unlock_mutex(&lock->wait_lock, flags); | 753 | spin_unlock_mutex(&lock->wait_lock, flags); |
754 | wake_up_q(&wake_q); | ||
754 | } | 755 | } |
755 | 756 | ||
756 | /* | 757 | /* |
diff --git a/kernel/module.c b/kernel/module.c index b14a4f31221f..0a56098d3738 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2404,7 +2404,7 @@ static char elf_type(const Elf_Sym *sym, const struct load_info *info) | |||
2404 | } | 2404 | } |
2405 | if (sym->st_shndx == SHN_UNDEF) | 2405 | if (sym->st_shndx == SHN_UNDEF) |
2406 | return 'U'; | 2406 | return 'U'; |
2407 | if (sym->st_shndx == SHN_ABS) | 2407 | if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) |
2408 | return 'a'; | 2408 | return 'a'; |
2409 | if (sym->st_shndx >= SHN_LORESERVE) | 2409 | if (sym->st_shndx >= SHN_LORESERVE) |
2410 | return '?'; | 2410 | return '?'; |
@@ -2433,7 +2433,7 @@ static char elf_type(const Elf_Sym *sym, const struct load_info *info) | |||
2433 | } | 2433 | } |
2434 | 2434 | ||
2435 | static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, | 2435 | static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, |
2436 | unsigned int shnum) | 2436 | unsigned int shnum, unsigned int pcpundx) |
2437 | { | 2437 | { |
2438 | const Elf_Shdr *sec; | 2438 | const Elf_Shdr *sec; |
2439 | 2439 | ||
@@ -2442,6 +2442,11 @@ static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, | |||
2442 | || !src->st_name) | 2442 | || !src->st_name) |
2443 | return false; | 2443 | return false; |
2444 | 2444 | ||
2445 | #ifdef CONFIG_KALLSYMS_ALL | ||
2446 | if (src->st_shndx == pcpundx) | ||
2447 | return true; | ||
2448 | #endif | ||
2449 | |||
2445 | sec = sechdrs + src->st_shndx; | 2450 | sec = sechdrs + src->st_shndx; |
2446 | if (!(sec->sh_flags & SHF_ALLOC) | 2451 | if (!(sec->sh_flags & SHF_ALLOC) |
2447 | #ifndef CONFIG_KALLSYMS_ALL | 2452 | #ifndef CONFIG_KALLSYMS_ALL |
@@ -2479,7 +2484,8 @@ static void layout_symtab(struct module *mod, struct load_info *info) | |||
2479 | /* Compute total space required for the core symbols' strtab. */ | 2484 | /* Compute total space required for the core symbols' strtab. */ |
2480 | for (ndst = i = 0; i < nsrc; i++) { | 2485 | for (ndst = i = 0; i < nsrc; i++) { |
2481 | if (i == 0 || | 2486 | if (i == 0 || |
2482 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { | 2487 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, |
2488 | info->index.pcpu)) { | ||
2483 | strtab_size += strlen(&info->strtab[src[i].st_name])+1; | 2489 | strtab_size += strlen(&info->strtab[src[i].st_name])+1; |
2484 | ndst++; | 2490 | ndst++; |
2485 | } | 2491 | } |
@@ -2537,7 +2543,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) | |||
2537 | src = mod->kallsyms->symtab; | 2543 | src = mod->kallsyms->symtab; |
2538 | for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { | 2544 | for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { |
2539 | if (i == 0 || | 2545 | if (i == 0 || |
2540 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { | 2546 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, |
2547 | info->index.pcpu)) { | ||
2541 | dst[ndst] = src[i]; | 2548 | dst[ndst] = src[i]; |
2542 | dst[ndst++].st_name = s - mod->core_kallsyms.strtab; | 2549 | dst[ndst++].st_name = s - mod->core_kallsyms.strtab; |
2543 | s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], | 2550 | s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], |
@@ -2881,8 +2888,12 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) | |||
2881 | return -ENOEXEC; | 2888 | return -ENOEXEC; |
2882 | } | 2889 | } |
2883 | 2890 | ||
2884 | if (!get_modinfo(info, "intree")) | 2891 | if (!get_modinfo(info, "intree")) { |
2892 | if (!test_taint(TAINT_OOT_MODULE)) | ||
2893 | pr_warn("%s: loading out-of-tree module taints kernel.\n", | ||
2894 | mod->name); | ||
2885 | add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); | 2895 | add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); |
2896 | } | ||
2886 | 2897 | ||
2887 | if (get_modinfo(info, "staging")) { | 2898 | if (get_modinfo(info, "staging")) { |
2888 | add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); | 2899 | add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); |
@@ -3047,6 +3058,8 @@ static int move_module(struct module *mod, struct load_info *info) | |||
3047 | 3058 | ||
3048 | static int check_module_license_and_versions(struct module *mod) | 3059 | static int check_module_license_and_versions(struct module *mod) |
3049 | { | 3060 | { |
3061 | int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); | ||
3062 | |||
3050 | /* | 3063 | /* |
3051 | * ndiswrapper is under GPL by itself, but loads proprietary modules. | 3064 | * ndiswrapper is under GPL by itself, but loads proprietary modules. |
3052 | * Don't use add_taint_module(), as it would prevent ndiswrapper from | 3065 | * Don't use add_taint_module(), as it would prevent ndiswrapper from |
@@ -3065,6 +3078,9 @@ static int check_module_license_and_versions(struct module *mod) | |||
3065 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, | 3078 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE, |
3066 | LOCKDEP_NOW_UNRELIABLE); | 3079 | LOCKDEP_NOW_UNRELIABLE); |
3067 | 3080 | ||
3081 | if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) | ||
3082 | pr_warn("%s: module license taints kernel.\n", mod->name); | ||
3083 | |||
3068 | #ifdef CONFIG_MODVERSIONS | 3084 | #ifdef CONFIG_MODVERSIONS |
3069 | if ((mod->num_syms && !mod->crcs) | 3085 | if ((mod->num_syms && !mod->crcs) |
3070 | || (mod->num_gpl_syms && !mod->gpl_crcs) | 3086 | || (mod->num_gpl_syms && !mod->gpl_crcs) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 717bbfd2e80f..3d3be1d3536d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
34 | #include <linux/highmem.h> | 34 | #include <linux/highmem.h> |
35 | #include <asm/mmu_context.h> | 35 | #include <linux/mmu_context.h> |
36 | #include <linux/interrupt.h> | 36 | #include <linux/interrupt.h> |
37 | #include <linux/capability.h> | 37 | #include <linux/capability.h> |
38 | #include <linux/completion.h> | 38 | #include <linux/completion.h> |
@@ -2145,6 +2145,7 @@ void __dl_clear_params(struct task_struct *p) | |||
2145 | dl_se->dl_period = 0; | 2145 | dl_se->dl_period = 0; |
2146 | dl_se->flags = 0; | 2146 | dl_se->flags = 0; |
2147 | dl_se->dl_bw = 0; | 2147 | dl_se->dl_bw = 0; |
2148 | dl_se->dl_density = 0; | ||
2148 | 2149 | ||
2149 | dl_se->dl_throttled = 0; | 2150 | dl_se->dl_throttled = 0; |
2150 | dl_se->dl_new = 1; | 2151 | dl_se->dl_new = 1; |
@@ -2750,7 +2751,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2750 | atomic_inc(&oldmm->mm_count); | 2751 | atomic_inc(&oldmm->mm_count); |
2751 | enter_lazy_tlb(oldmm, next); | 2752 | enter_lazy_tlb(oldmm, next); |
2752 | } else | 2753 | } else |
2753 | switch_mm(oldmm, mm, next); | 2754 | switch_mm_irqs_off(oldmm, mm, next); |
2754 | 2755 | ||
2755 | if (!prev->mm) { | 2756 | if (!prev->mm) { |
2756 | prev->active_mm = NULL; | 2757 | prev->active_mm = NULL; |
@@ -3815,6 +3816,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) | |||
3815 | dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; | 3816 | dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; |
3816 | dl_se->flags = attr->sched_flags; | 3817 | dl_se->flags = attr->sched_flags; |
3817 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); | 3818 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); |
3819 | dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); | ||
3818 | 3820 | ||
3819 | /* | 3821 | /* |
3820 | * Changing the parameters of a task is 'tricky' and we're not doing | 3822 | * Changing the parameters of a task is 'tricky' and we're not doing |
@@ -6139,6 +6141,19 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6139 | call_rcu_sched(&old_rd->rcu, free_rootdomain); | 6141 | call_rcu_sched(&old_rd->rcu, free_rootdomain); |
6140 | } | 6142 | } |
6141 | 6143 | ||
6144 | void sched_get_rd(struct root_domain *rd) | ||
6145 | { | ||
6146 | atomic_inc(&rd->refcount); | ||
6147 | } | ||
6148 | |||
6149 | void sched_put_rd(struct root_domain *rd) | ||
6150 | { | ||
6151 | if (!atomic_dec_and_test(&rd->refcount)) | ||
6152 | return; | ||
6153 | |||
6154 | call_rcu_sched(&rd->rcu, free_rootdomain); | ||
6155 | } | ||
6156 | |||
6142 | static int init_rootdomain(struct root_domain *rd) | 6157 | static int init_rootdomain(struct root_domain *rd) |
6143 | { | 6158 | { |
6144 | memset(rd, 0, sizeof(*rd)); | 6159 | memset(rd, 0, sizeof(*rd)); |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 772169e4775f..0b7e60de85f3 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -498,13 +498,84 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se, | |||
498 | } | 498 | } |
499 | 499 | ||
500 | /* | 500 | /* |
501 | * When a -deadline entity is queued back on the runqueue, its runtime and | 501 | * Revised wakeup rule [1]: For self-suspending tasks, rather then |
502 | * deadline might need updating. | 502 | * re-initializing task's runtime and deadline, the revised wakeup |
503 | * rule adjusts the task's runtime to avoid the task to overrun its | ||
504 | * density. | ||
503 | * | 505 | * |
504 | * The policy here is that we update the deadline of the entity only if: | 506 | * Reasoning: a task may overrun the density if: |
505 | * - the current deadline is in the past, | 507 | * runtime / (deadline - t) > dl_runtime / dl_deadline |
506 | * - using the remaining runtime with the current deadline would make | 508 | * |
507 | * the entity exceed its bandwidth. | 509 | * Therefore, runtime can be adjusted to: |
510 | * runtime = (dl_runtime / dl_deadline) * (deadline - t) | ||
511 | * | ||
512 | * In such way that runtime will be equal to the maximum density | ||
513 | * the task can use without breaking any rule. | ||
514 | * | ||
515 | * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant | ||
516 | * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24. | ||
517 | */ | ||
518 | static void | ||
519 | update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) | ||
520 | { | ||
521 | u64 laxity = dl_se->deadline - rq_clock(rq); | ||
522 | |||
523 | /* | ||
524 | * If the task has deadline < period, and the deadline is in the past, | ||
525 | * it should already be throttled before this check. | ||
526 | * | ||
527 | * See update_dl_entity() comments for further details. | ||
528 | */ | ||
529 | WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); | ||
530 | |||
531 | dl_se->runtime = (dl_se->dl_density * laxity) >> 20; | ||
532 | } | ||
533 | |||
534 | /* | ||
535 | * Regarding the deadline, a task with implicit deadline has a relative | ||
536 | * deadline == relative period. A task with constrained deadline has a | ||
537 | * relative deadline <= relative period. | ||
538 | * | ||
539 | * We support constrained deadline tasks. However, there are some restrictions | ||
540 | * applied only for tasks which do not have an implicit deadline. See | ||
541 | * update_dl_entity() to know more about such restrictions. | ||
542 | * | ||
543 | * The dl_is_implicit() returns true if the task has an implicit deadline. | ||
544 | */ | ||
545 | static inline bool dl_is_implicit(struct sched_dl_entity *dl_se) | ||
546 | { | ||
547 | return dl_se->dl_deadline == dl_se->dl_period; | ||
548 | } | ||
549 | |||
550 | /* | ||
551 | * When a deadline entity is placed in the runqueue, its runtime and deadline | ||
552 | * might need to be updated. This is done by a CBS wake up rule. There are two | ||
553 | * different rules: 1) the original CBS; and 2) the Revisited CBS. | ||
554 | * | ||
555 | * When the task is starting a new period, the Original CBS is used. In this | ||
556 | * case, the runtime is replenished and a new absolute deadline is set. | ||
557 | * | ||
558 | * When a task is queued before the begin of the next period, using the | ||
559 | * remaining runtime and deadline could make the entity to overflow, see | ||
560 | * dl_entity_overflow() to find more about runtime overflow. When such case | ||
561 | * is detected, the runtime and deadline need to be updated. | ||
562 | * | ||
563 | * If the task has an implicit deadline, i.e., deadline == period, the Original | ||
564 | * CBS is applied. the runtime is replenished and a new absolute deadline is | ||
565 | * set, as in the previous cases. | ||
566 | * | ||
567 | * However, the Original CBS does not work properly for tasks with | ||
568 | * deadline < period, which are said to have a constrained deadline. By | ||
569 | * applying the Original CBS, a constrained deadline task would be able to run | ||
570 | * runtime/deadline in a period. With deadline < period, the task would | ||
571 | * overrun the runtime/period allowed bandwidth, breaking the admission test. | ||
572 | * | ||
573 | * In order to prevent this misbehave, the Revisited CBS is used for | ||
574 | * constrained deadline tasks when a runtime overflow is detected. In the | ||
575 | * Revisited CBS, rather than replenishing & setting a new absolute deadline, | ||
576 | * the remaining runtime of the task is reduced to avoid runtime overflow. | ||
577 | * Please refer to the comments update_dl_revised_wakeup() function to find | ||
578 | * more about the Revised CBS rule. | ||
508 | */ | 579 | */ |
509 | static void update_dl_entity(struct sched_dl_entity *dl_se, | 580 | static void update_dl_entity(struct sched_dl_entity *dl_se, |
510 | struct sched_dl_entity *pi_se) | 581 | struct sched_dl_entity *pi_se) |
@@ -526,6 +597,14 @@ static void update_dl_entity(struct sched_dl_entity *dl_se, | |||
526 | 597 | ||
527 | if (dl_time_before(dl_se->deadline, rq_clock(rq)) || | 598 | if (dl_time_before(dl_se->deadline, rq_clock(rq)) || |
528 | dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { | 599 | dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { |
600 | |||
601 | if (unlikely(!dl_is_implicit(dl_se) && | ||
602 | !dl_time_before(dl_se->deadline, rq_clock(rq)) && | ||
603 | !dl_se->dl_boosted)){ | ||
604 | update_dl_revised_wakeup(dl_se, rq); | ||
605 | return; | ||
606 | } | ||
607 | |||
529 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; | 608 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; |
530 | dl_se->runtime = pi_se->dl_runtime; | 609 | dl_se->runtime = pi_se->dl_runtime; |
531 | } | 610 | } |
@@ -753,6 +832,8 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) | |||
753 | if (unlikely(dl_se->dl_boosted || !start_dl_timer(p))) | 832 | if (unlikely(dl_se->dl_boosted || !start_dl_timer(p))) |
754 | return; | 833 | return; |
755 | dl_se->dl_throttled = 1; | 834 | dl_se->dl_throttled = 1; |
835 | if (dl_se->runtime > 0) | ||
836 | dl_se->runtime = 0; | ||
756 | } | 837 | } |
757 | } | 838 | } |
758 | 839 | ||
@@ -1011,11 +1092,6 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se) | |||
1011 | __dequeue_dl_entity(dl_se); | 1092 | __dequeue_dl_entity(dl_se); |
1012 | } | 1093 | } |
1013 | 1094 | ||
1014 | static inline bool dl_is_constrained(struct sched_dl_entity *dl_se) | ||
1015 | { | ||
1016 | return dl_se->dl_deadline < dl_se->dl_period; | ||
1017 | } | ||
1018 | |||
1019 | static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | 1095 | static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) |
1020 | { | 1096 | { |
1021 | struct task_struct *pi_task = rt_mutex_get_top_task(p); | 1097 | struct task_struct *pi_task = rt_mutex_get_top_task(p); |
@@ -1047,7 +1123,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
1047 | * If that is the case, the task will be throttled and | 1123 | * If that is the case, the task will be throttled and |
1048 | * the replenishment timer will be set to the next period. | 1124 | * the replenishment timer will be set to the next period. |
1049 | */ | 1125 | */ |
1050 | if (!p->dl.dl_throttled && dl_is_constrained(&p->dl)) | 1126 | if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) |
1051 | dl_check_constrained_dl(&p->dl); | 1127 | dl_check_constrained_dl(&p->dl); |
1052 | 1128 | ||
1053 | /* | 1129 | /* |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c193e9b1c38f..b35e7ec0105b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5383,17 +5383,20 @@ long group_norm_util(struct energy_env *eenv, struct sched_group *sg) | |||
5383 | static int find_new_capacity(struct energy_env *eenv, | 5383 | static int find_new_capacity(struct energy_env *eenv, |
5384 | const struct sched_group_energy * const sge) | 5384 | const struct sched_group_energy * const sge) |
5385 | { | 5385 | { |
5386 | int idx; | 5386 | int idx, max_idx = sge->nr_cap_states - 1; |
5387 | unsigned long util = group_max_util(eenv); | 5387 | unsigned long util = group_max_util(eenv); |
5388 | 5388 | ||
5389 | /* default is max_cap if we don't find a match */ | ||
5390 | eenv->cap_idx = max_idx; | ||
5391 | |||
5389 | for (idx = 0; idx < sge->nr_cap_states; idx++) { | 5392 | for (idx = 0; idx < sge->nr_cap_states; idx++) { |
5390 | if (sge->cap_states[idx].cap >= util) | 5393 | if (sge->cap_states[idx].cap >= util) { |
5394 | eenv->cap_idx = idx; | ||
5391 | break; | 5395 | break; |
5396 | } | ||
5392 | } | 5397 | } |
5393 | 5398 | ||
5394 | eenv->cap_idx = idx; | 5399 | return eenv->cap_idx; |
5395 | |||
5396 | return idx; | ||
5397 | } | 5400 | } |
5398 | 5401 | ||
5399 | static int group_idle_state(struct energy_env *eenv, struct sched_group *sg) | 5402 | static int group_idle_state(struct energy_env *eenv, struct sched_group *sg) |
@@ -5476,10 +5479,22 @@ static int sched_group_energy(struct energy_env *eenv) | |||
5476 | int cpu, total_energy = 0; | 5479 | int cpu, total_energy = 0; |
5477 | struct cpumask visit_cpus; | 5480 | struct cpumask visit_cpus; |
5478 | struct sched_group *sg; | 5481 | struct sched_group *sg; |
5482 | int cpu_count; | ||
5479 | 5483 | ||
5480 | WARN_ON(!eenv->sg_top->sge); | 5484 | WARN_ON(!eenv->sg_top->sge); |
5481 | 5485 | ||
5482 | cpumask_copy(&visit_cpus, sched_group_cpus(eenv->sg_top)); | 5486 | cpumask_copy(&visit_cpus, sched_group_cpus(eenv->sg_top)); |
5487 | /* If a cpu is hotplugged in while we are in this function, | ||
5488 | * it does not appear in the existing visit_cpus mask | ||
5489 | * which came from the sched_group pointer of the | ||
5490 | * sched_domain pointed at by sd_ea for either the prev | ||
5491 | * or next cpu and was dereferenced in __energy_diff. | ||
5492 | * Since we will dereference sd_scs later as we iterate | ||
5493 | * through the CPUs we expect to visit, new CPUs can | ||
5494 | * be present which are not in the visit_cpus mask. | ||
5495 | * Guard this with cpu_count. | ||
5496 | */ | ||
5497 | cpu_count = cpumask_weight(&visit_cpus); | ||
5483 | 5498 | ||
5484 | while (!cpumask_empty(&visit_cpus)) { | 5499 | while (!cpumask_empty(&visit_cpus)) { |
5485 | struct sched_group *sg_shared_cap = NULL; | 5500 | struct sched_group *sg_shared_cap = NULL; |
@@ -5489,6 +5504,8 @@ static int sched_group_energy(struct energy_env *eenv) | |||
5489 | /* | 5504 | /* |
5490 | * Is the group utilization affected by cpus outside this | 5505 | * Is the group utilization affected by cpus outside this |
5491 | * sched_group? | 5506 | * sched_group? |
5507 | * This sd may have groups with cpus which were not present | ||
5508 | * when we took visit_cpus. | ||
5492 | */ | 5509 | */ |
5493 | sd = rcu_dereference(per_cpu(sd_scs, cpu)); | 5510 | sd = rcu_dereference(per_cpu(sd_scs, cpu)); |
5494 | 5511 | ||
@@ -5540,8 +5557,24 @@ static int sched_group_energy(struct energy_env *eenv) | |||
5540 | 5557 | ||
5541 | total_energy += sg_busy_energy + sg_idle_energy; | 5558 | total_energy += sg_busy_energy + sg_idle_energy; |
5542 | 5559 | ||
5543 | if (!sd->child) | 5560 | if (!sd->child) { |
5561 | /* | ||
5562 | * cpu_count here is the number of | ||
5563 | * cpus we expect to visit in this | ||
5564 | * calculation. If we race against | ||
5565 | * hotplug, we can have extra cpus | ||
5566 | * added to the groups we are | ||
5567 | * iterating which do not appear in | ||
5568 | * the visit_cpus mask. In that case | ||
5569 | * we are not able to calculate energy | ||
5570 | * without restarting so we will bail | ||
5571 | * out and use prev_cpu this time. | ||
5572 | */ | ||
5573 | if (!cpu_count) | ||
5574 | return -EINVAL; | ||
5544 | cpumask_xor(&visit_cpus, &visit_cpus, sched_group_cpus(sg)); | 5575 | cpumask_xor(&visit_cpus, &visit_cpus, sched_group_cpus(sg)); |
5576 | cpu_count--; | ||
5577 | } | ||
5545 | 5578 | ||
5546 | if (cpumask_equal(sched_group_cpus(sg), sched_group_cpus(eenv->sg_top))) | 5579 | if (cpumask_equal(sched_group_cpus(sg), sched_group_cpus(eenv->sg_top))) |
5547 | goto next_cpu; | 5580 | goto next_cpu; |
@@ -5553,6 +5586,9 @@ static int sched_group_energy(struct energy_env *eenv) | |||
5553 | * If we raced with hotplug and got an sd NULL-pointer; | 5586 | * If we raced with hotplug and got an sd NULL-pointer; |
5554 | * returning a wrong energy estimation is better than | 5587 | * returning a wrong energy estimation is better than |
5555 | * entering an infinite loop. | 5588 | * entering an infinite loop. |
5589 | * Specifically: If a cpu is unplugged after we took | ||
5590 | * the visit_cpus mask, it no longer has an sd_scs | ||
5591 | * pointer, so when we dereference it, we get NULL. | ||
5556 | */ | 5592 | */ |
5557 | if (cpumask_test_cpu(cpu, &visit_cpus)) | 5593 | if (cpumask_test_cpu(cpu, &visit_cpus)) |
5558 | return -EINVAL; | 5594 | return -EINVAL; |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 0cafe279ea06..9066d80f36fe 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1935,9 +1935,8 @@ static void push_rt_tasks(struct rq *rq) | |||
1935 | * the rt_loop_next will cause the iterator to perform another scan. | 1935 | * the rt_loop_next will cause the iterator to perform another scan. |
1936 | * | 1936 | * |
1937 | */ | 1937 | */ |
1938 | static int rto_next_cpu(struct rq *rq) | 1938 | static int rto_next_cpu(struct root_domain *rd) |
1939 | { | 1939 | { |
1940 | struct root_domain *rd = rq->rd; | ||
1941 | int next; | 1940 | int next; |
1942 | int cpu; | 1941 | int cpu; |
1943 | 1942 | ||
@@ -2013,19 +2012,24 @@ static void tell_cpu_to_push(struct rq *rq) | |||
2013 | * Otherwise it is finishing up and an ipi needs to be sent. | 2012 | * Otherwise it is finishing up and an ipi needs to be sent. |
2014 | */ | 2013 | */ |
2015 | if (rq->rd->rto_cpu < 0) | 2014 | if (rq->rd->rto_cpu < 0) |
2016 | cpu = rto_next_cpu(rq); | 2015 | cpu = rto_next_cpu(rq->rd); |
2017 | 2016 | ||
2018 | raw_spin_unlock(&rq->rd->rto_lock); | 2017 | raw_spin_unlock(&rq->rd->rto_lock); |
2019 | 2018 | ||
2020 | rto_start_unlock(&rq->rd->rto_loop_start); | 2019 | rto_start_unlock(&rq->rd->rto_loop_start); |
2021 | 2020 | ||
2022 | if (cpu >= 0) | 2021 | if (cpu >= 0) { |
2022 | /* Make sure the rd does not get freed while pushing */ | ||
2023 | sched_get_rd(rq->rd); | ||
2023 | irq_work_queue_on(&rq->rd->rto_push_work, cpu); | 2024 | irq_work_queue_on(&rq->rd->rto_push_work, cpu); |
2025 | } | ||
2024 | } | 2026 | } |
2025 | 2027 | ||
2026 | /* Called from hardirq context */ | 2028 | /* Called from hardirq context */ |
2027 | void rto_push_irq_work_func(struct irq_work *work) | 2029 | void rto_push_irq_work_func(struct irq_work *work) |
2028 | { | 2030 | { |
2031 | struct root_domain *rd = | ||
2032 | container_of(work, struct root_domain, rto_push_work); | ||
2029 | struct rq *rq; | 2033 | struct rq *rq; |
2030 | int cpu; | 2034 | int cpu; |
2031 | 2035 | ||
@@ -2041,18 +2045,20 @@ void rto_push_irq_work_func(struct irq_work *work) | |||
2041 | raw_spin_unlock(&rq->lock); | 2045 | raw_spin_unlock(&rq->lock); |
2042 | } | 2046 | } |
2043 | 2047 | ||
2044 | raw_spin_lock(&rq->rd->rto_lock); | 2048 | raw_spin_lock(&rd->rto_lock); |
2045 | 2049 | ||
2046 | /* Pass the IPI to the next rt overloaded queue */ | 2050 | /* Pass the IPI to the next rt overloaded queue */ |
2047 | cpu = rto_next_cpu(rq); | 2051 | cpu = rto_next_cpu(rd); |
2048 | 2052 | ||
2049 | raw_spin_unlock(&rq->rd->rto_lock); | 2053 | raw_spin_unlock(&rd->rto_lock); |
2050 | 2054 | ||
2051 | if (cpu < 0) | 2055 | if (cpu < 0) { |
2056 | sched_put_rd(rd); | ||
2052 | return; | 2057 | return; |
2058 | } | ||
2053 | 2059 | ||
2054 | /* Try the next RT overloaded CPU */ | 2060 | /* Try the next RT overloaded CPU */ |
2055 | irq_work_queue_on(&rq->rd->rto_push_work, cpu); | 2061 | irq_work_queue_on(&rd->rto_push_work, cpu); |
2056 | } | 2062 | } |
2057 | #endif /* HAVE_RT_PUSH_IPI */ | 2063 | #endif /* HAVE_RT_PUSH_IPI */ |
2058 | 2064 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ea897bea4fdd..9fa90cf12b82 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -575,6 +575,8 @@ struct root_domain { | |||
575 | }; | 575 | }; |
576 | 576 | ||
577 | extern struct root_domain def_root_domain; | 577 | extern struct root_domain def_root_domain; |
578 | extern void sched_get_rd(struct root_domain *rd); | ||
579 | extern void sched_put_rd(struct root_domain *rd); | ||
578 | 580 | ||
579 | #ifdef HAVE_RT_PUSH_IPI | 581 | #ifdef HAVE_RT_PUSH_IPI |
580 | extern void rto_push_irq_work_func(struct irq_work *work); | 582 | extern void rto_push_irq_work_func(struct irq_work *work); |
diff --git a/kernel/signal.c b/kernel/signal.c index 5d50ea899b6d..4a548c6a4118 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -72,7 +72,7 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force) | |||
72 | handler = sig_handler(t, sig); | 72 | handler = sig_handler(t, sig); |
73 | 73 | ||
74 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && | 74 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
75 | handler == SIG_DFL && !force) | 75 | handler == SIG_DFL && !(force && sig_kernel_only(sig))) |
76 | return 1; | 76 | return 1; |
77 | 77 | ||
78 | return sig_handler_ignored(handler, sig); | 78 | return sig_handler_ignored(handler, sig); |
@@ -88,13 +88,15 @@ static int sig_ignored(struct task_struct *t, int sig, bool force) | |||
88 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) | 88 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
89 | return 0; | 89 | return 0; |
90 | 90 | ||
91 | if (!sig_task_ignored(t, sig, force)) | ||
92 | return 0; | ||
93 | |||
94 | /* | 91 | /* |
95 | * Tracers may want to know about even ignored signals. | 92 | * Tracers may want to know about even ignored signal unless it |
93 | * is SIGKILL which can't be reported anyway but can be ignored | ||
94 | * by SIGNAL_UNKILLABLE task. | ||
96 | */ | 95 | */ |
97 | return !t->ptrace; | 96 | if (t->ptrace && sig != SIGKILL) |
97 | return 0; | ||
98 | |||
99 | return sig_task_ignored(t, sig, force); | ||
98 | } | 100 | } |
99 | 101 | ||
100 | /* | 102 | /* |
@@ -917,9 +919,9 @@ static void complete_signal(int sig, struct task_struct *p, int group) | |||
917 | * then start taking the whole group down immediately. | 919 | * then start taking the whole group down immediately. |
918 | */ | 920 | */ |
919 | if (sig_fatal(p, sig) && | 921 | if (sig_fatal(p, sig) && |
920 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && | 922 | !(signal->flags & SIGNAL_GROUP_EXIT) && |
921 | !sigismember(&t->real_blocked, sig) && | 923 | !sigismember(&t->real_blocked, sig) && |
922 | (sig == SIGKILL || !t->ptrace)) { | 924 | (sig == SIGKILL || !p->ptrace)) { |
923 | /* | 925 | /* |
924 | * This signal will be fatal to the whole group. | 926 | * This signal will be fatal to the whole group. |
925 | */ | 927 | */ |
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 405536b22c0c..227ea8166a83 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(__ktime_divns); | |||
312 | */ | 312 | */ |
313 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) | 313 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) |
314 | { | 314 | { |
315 | ktime_t res = ktime_add(lhs, rhs); | 315 | ktime_t res = ktime_add_unsafe(lhs, rhs); |
316 | 316 | ||
317 | /* | 317 | /* |
318 | * We use KTIME_SEC_MAX here, the maximum timeout which we can | 318 | * We use KTIME_SEC_MAX here, the maximum timeout which we can |
@@ -669,7 +669,9 @@ static void hrtimer_reprogram(struct hrtimer *timer, | |||
669 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | 669 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) |
670 | { | 670 | { |
671 | base->expires_next.tv64 = KTIME_MAX; | 671 | base->expires_next.tv64 = KTIME_MAX; |
672 | base->hang_detected = 0; | ||
672 | base->hres_active = 0; | 673 | base->hres_active = 0; |
674 | base->next_timer = NULL; | ||
673 | } | 675 | } |
674 | 676 | ||
675 | /* | 677 | /* |
@@ -1615,6 +1617,7 @@ static void init_hrtimers_cpu(int cpu) | |||
1615 | timerqueue_init_head(&cpu_base->clock_base[i].active); | 1617 | timerqueue_init_head(&cpu_base->clock_base[i].active); |
1616 | } | 1618 | } |
1617 | 1619 | ||
1620 | cpu_base->active_bases = 0; | ||
1618 | cpu_base->cpu = cpu; | 1621 | cpu_base->cpu = cpu; |
1619 | hrtimer_init_hres(cpu_base); | 1622 | hrtimer_init_hres(cpu_base); |
1620 | } | 1623 | } |
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index f2826c35e918..fc7c37ad90a0 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c | |||
@@ -507,17 +507,22 @@ static struct pid *good_sigevent(sigevent_t * event) | |||
507 | { | 507 | { |
508 | struct task_struct *rtn = current->group_leader; | 508 | struct task_struct *rtn = current->group_leader; |
509 | 509 | ||
510 | if ((event->sigev_notify & SIGEV_THREAD_ID ) && | 510 | switch (event->sigev_notify) { |
511 | (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || | 511 | case SIGEV_SIGNAL | SIGEV_THREAD_ID: |
512 | !same_thread_group(rtn, current) || | 512 | rtn = find_task_by_vpid(event->sigev_notify_thread_id); |
513 | (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) | 513 | if (!rtn || !same_thread_group(rtn, current)) |
514 | return NULL; | ||
515 | /* FALLTHRU */ | ||
516 | case SIGEV_SIGNAL: | ||
517 | case SIGEV_THREAD: | ||
518 | if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) | ||
519 | return NULL; | ||
520 | /* FALLTHRU */ | ||
521 | case SIGEV_NONE: | ||
522 | return task_pid(rtn); | ||
523 | default: | ||
514 | return NULL; | 524 | return NULL; |
515 | 525 | } | |
516 | if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && | ||
517 | ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) | ||
518 | return NULL; | ||
519 | |||
520 | return task_pid(rtn); | ||
521 | } | 526 | } |
522 | 527 | ||
523 | void posix_timers_register_clock(const clockid_t clock_id, | 528 | void posix_timers_register_clock(const clockid_t clock_id, |
@@ -745,8 +750,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) | |||
745 | /* interval timer ? */ | 750 | /* interval timer ? */ |
746 | if (iv.tv64) | 751 | if (iv.tv64) |
747 | cur_setting->it_interval = ktime_to_timespec(iv); | 752 | cur_setting->it_interval = ktime_to_timespec(iv); |
748 | else if (!hrtimer_active(timer) && | 753 | else if (!hrtimer_active(timer) && timr->it_sigev_notify != SIGEV_NONE) |
749 | (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) | ||
750 | return; | 754 | return; |
751 | 755 | ||
752 | now = timer->base->get_time(); | 756 | now = timer->base->get_time(); |
@@ -757,7 +761,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) | |||
757 | * expiry is > now. | 761 | * expiry is > now. |
758 | */ | 762 | */ |
759 | if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || | 763 | if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || |
760 | (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) | 764 | timr->it_sigev_notify == SIGEV_NONE)) |
761 | timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); | 765 | timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); |
762 | 766 | ||
763 | remaining = __hrtimer_expires_remaining_adjusted(timer, now); | 767 | remaining = __hrtimer_expires_remaining_adjusted(timer, now); |
@@ -767,7 +771,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) | |||
767 | * A single shot SIGEV_NONE timer must return 0, when | 771 | * A single shot SIGEV_NONE timer must return 0, when |
768 | * it is expired ! | 772 | * it is expired ! |
769 | */ | 773 | */ |
770 | if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) | 774 | if (timr->it_sigev_notify != SIGEV_NONE) |
771 | cur_setting->it_value.tv_nsec = 1; | 775 | cur_setting->it_value.tv_nsec = 1; |
772 | } else | 776 | } else |
773 | cur_setting->it_value = ktime_to_timespec(remaining); | 777 | cur_setting->it_value = ktime_to_timespec(remaining); |
@@ -865,7 +869,7 @@ common_timer_set(struct k_itimer *timr, int flags, | |||
865 | timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); | 869 | timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); |
866 | 870 | ||
867 | /* SIGEV_NONE timers are not queued ! See common_timer_get */ | 871 | /* SIGEV_NONE timers are not queued ! See common_timer_get */ |
868 | if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { | 872 | if (timr->it_sigev_notify == SIGEV_NONE) { |
869 | /* Setup correct expiry time for relative timers */ | 873 | /* Setup correct expiry time for relative timers */ |
870 | if (mode == HRTIMER_MODE_REL) { | 874 | if (mode == HRTIMER_MODE_REL) { |
871 | hrtimer_add_expires(timer, timer->base->get_time()); | 875 | hrtimer_add_expires(timer, timer->base->get_time()); |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 34f9a9c417d9..a935cbdc55a4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -568,6 +568,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) | |||
568 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); | 568 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); |
569 | } | 569 | } |
570 | 570 | ||
571 | static inline bool local_timer_softirq_pending(void) | ||
572 | { | ||
573 | return local_softirq_pending() & TIMER_SOFTIRQ; | ||
574 | } | ||
575 | |||
571 | static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | 576 | static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, |
572 | ktime_t now, int cpu) | 577 | ktime_t now, int cpu) |
573 | { | 578 | { |
@@ -584,8 +589,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | |||
584 | } while (read_seqretry(&jiffies_lock, seq)); | 589 | } while (read_seqretry(&jiffies_lock, seq)); |
585 | ts->last_jiffies = basejiff; | 590 | ts->last_jiffies = basejiff; |
586 | 591 | ||
587 | if (rcu_needs_cpu(basemono, &next_rcu) || | 592 | /* |
588 | arch_needs_cpu() || irq_work_needs_cpu()) { | 593 | * Keep the periodic tick, when RCU, architecture or irq_work |
594 | * requests it. | ||
595 | * Aside of that check whether the local timer softirq is | ||
596 | * pending. If so its a bad idea to call get_next_timer_interrupt() | ||
597 | * because there is an already expired timer, so it will request | ||
598 | * immeditate expiry, which rearms the hardware timer with a | ||
599 | * minimal delta which brings us back to this place | ||
600 | * immediately. Lather, rinse and repeat... | ||
601 | */ | ||
602 | if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || | ||
603 | irq_work_needs_cpu() || local_timer_softirq_pending()) { | ||
589 | next_tick = basemono + TICK_NSEC; | 604 | next_tick = basemono + TICK_NSEC; |
590 | } else { | 605 | } else { |
591 | /* | 606 | /* |
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index f575785c7cd8..523fe1669d4c 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
@@ -764,8 +764,15 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer, | |||
764 | __acquires(timer->base->lock) | 764 | __acquires(timer->base->lock) |
765 | { | 765 | { |
766 | for (;;) { | 766 | for (;;) { |
767 | u32 tf = timer->flags; | ||
768 | struct tvec_base *base; | 767 | struct tvec_base *base; |
768 | u32 tf; | ||
769 | |||
770 | /* | ||
771 | * We need to use READ_ONCE() here, otherwise the compiler | ||
772 | * might re-read @tf between the check for TIMER_MIGRATING | ||
773 | * and spin_lock(). | ||
774 | */ | ||
775 | tf = READ_ONCE(timer->flags); | ||
769 | 776 | ||
770 | if (!(tf & TIMER_MIGRATING)) { | 777 | if (!(tf & TIMER_MIGRATING)) { |
771 | base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); | 778 | base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fc0051fd672d..ac758a53fcea 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -3845,7 +3845,6 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3845 | func_g.type = filter_parse_regex(glob, strlen(glob), | 3845 | func_g.type = filter_parse_regex(glob, strlen(glob), |
3846 | &func_g.search, ¬); | 3846 | &func_g.search, ¬); |
3847 | func_g.len = strlen(func_g.search); | 3847 | func_g.len = strlen(func_g.search); |
3848 | func_g.search = glob; | ||
3849 | 3848 | ||
3850 | /* we do not support '!' for function probes */ | 3849 | /* we do not support '!' for function probes */ |
3851 | if (WARN_ON(not)) | 3850 | if (WARN_ON(not)) |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1275175b0946..d9cd6191760b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); | |||
280 | /* Missed count stored at end */ | 280 | /* Missed count stored at end */ |
281 | #define RB_MISSED_STORED (1 << 30) | 281 | #define RB_MISSED_STORED (1 << 30) |
282 | 282 | ||
283 | #define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED) | ||
284 | |||
283 | struct buffer_data_page { | 285 | struct buffer_data_page { |
284 | u64 time_stamp; /* page time stamp */ | 286 | u64 time_stamp; /* page time stamp */ |
285 | local_t commit; /* write committed index */ | 287 | local_t commit; /* write committed index */ |
@@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage) | |||
331 | */ | 333 | */ |
332 | size_t ring_buffer_page_len(void *page) | 334 | size_t ring_buffer_page_len(void *page) |
333 | { | 335 | { |
334 | return local_read(&((struct buffer_data_page *)page)->commit) | 336 | struct buffer_data_page *bpage = page; |
337 | |||
338 | return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS) | ||
335 | + BUF_PAGE_HDR_SIZE; | 339 | + BUF_PAGE_HDR_SIZE; |
336 | } | 340 | } |
337 | 341 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6d4ff15b8c0d..c180e4dc0fb9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -5844,7 +5844,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
5844 | .spd_release = buffer_spd_release, | 5844 | .spd_release = buffer_spd_release, |
5845 | }; | 5845 | }; |
5846 | struct buffer_ref *ref; | 5846 | struct buffer_ref *ref; |
5847 | int entries, size, i; | 5847 | int entries, i; |
5848 | ssize_t ret = 0; | 5848 | ssize_t ret = 0; |
5849 | 5849 | ||
5850 | #ifdef CONFIG_TRACER_MAX_TRACE | 5850 | #ifdef CONFIG_TRACER_MAX_TRACE |
@@ -5895,14 +5895,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
5895 | break; | 5895 | break; |
5896 | } | 5896 | } |
5897 | 5897 | ||
5898 | /* | ||
5899 | * zero out any left over data, this is going to | ||
5900 | * user land. | ||
5901 | */ | ||
5902 | size = ring_buffer_page_len(ref->page); | ||
5903 | if (size < PAGE_SIZE) | ||
5904 | memset(ref->page + size, 0, PAGE_SIZE - size); | ||
5905 | |||
5906 | page = virt_to_page(ref->page); | 5898 | page = virt_to_page(ref->page); |
5907 | 5899 | ||
5908 | spd.pages[i] = page; | 5900 | spd.pages[i] = page; |
@@ -6629,6 +6621,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size | |||
6629 | buf->data = alloc_percpu(struct trace_array_cpu); | 6621 | buf->data = alloc_percpu(struct trace_array_cpu); |
6630 | if (!buf->data) { | 6622 | if (!buf->data) { |
6631 | ring_buffer_free(buf->buffer); | 6623 | ring_buffer_free(buf->buffer); |
6624 | buf->buffer = NULL; | ||
6632 | return -ENOMEM; | 6625 | return -ENOMEM; |
6633 | } | 6626 | } |
6634 | 6627 | ||
@@ -6652,7 +6645,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) | |||
6652 | allocate_snapshot ? size : 1); | 6645 | allocate_snapshot ? size : 1); |
6653 | if (WARN_ON(ret)) { | 6646 | if (WARN_ON(ret)) { |
6654 | ring_buffer_free(tr->trace_buffer.buffer); | 6647 | ring_buffer_free(tr->trace_buffer.buffer); |
6648 | tr->trace_buffer.buffer = NULL; | ||
6655 | free_percpu(tr->trace_buffer.data); | 6649 | free_percpu(tr->trace_buffer.data); |
6650 | tr->trace_buffer.data = NULL; | ||
6656 | return -ENOMEM; | 6651 | return -ENOMEM; |
6657 | } | 6652 | } |
6658 | tr->allocated_snapshot = allocate_snapshot; | 6653 | tr->allocated_snapshot = allocate_snapshot; |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 996f0fd34312..ba5392807912 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -2300,6 +2300,7 @@ void trace_event_enum_update(struct trace_enum_map **map, int len) | |||
2300 | { | 2300 | { |
2301 | struct trace_event_call *call, *p; | 2301 | struct trace_event_call *call, *p; |
2302 | const char *last_system = NULL; | 2302 | const char *last_system = NULL; |
2303 | bool first = false; | ||
2303 | int last_i; | 2304 | int last_i; |
2304 | int i; | 2305 | int i; |
2305 | 2306 | ||
@@ -2307,15 +2308,28 @@ void trace_event_enum_update(struct trace_enum_map **map, int len) | |||
2307 | list_for_each_entry_safe(call, p, &ftrace_events, list) { | 2308 | list_for_each_entry_safe(call, p, &ftrace_events, list) { |
2308 | /* events are usually grouped together with systems */ | 2309 | /* events are usually grouped together with systems */ |
2309 | if (!last_system || call->class->system != last_system) { | 2310 | if (!last_system || call->class->system != last_system) { |
2311 | first = true; | ||
2310 | last_i = 0; | 2312 | last_i = 0; |
2311 | last_system = call->class->system; | 2313 | last_system = call->class->system; |
2312 | } | 2314 | } |
2313 | 2315 | ||
2316 | /* | ||
2317 | * Since calls are grouped by systems, the likelyhood that the | ||
2318 | * next call in the iteration belongs to the same system as the | ||
2319 | * previous call is high. As an optimization, we skip seaching | ||
2320 | * for a map[] that matches the call's system if the last call | ||
2321 | * was from the same system. That's what last_i is for. If the | ||
2322 | * call has the same system as the previous call, then last_i | ||
2323 | * will be the index of the first map[] that has a matching | ||
2324 | * system. | ||
2325 | */ | ||
2314 | for (i = last_i; i < len; i++) { | 2326 | for (i = last_i; i < len; i++) { |
2315 | if (call->class->system == map[i]->system) { | 2327 | if (call->class->system == map[i]->system) { |
2316 | /* Save the first system if need be */ | 2328 | /* Save the first system if need be */ |
2317 | if (!last_i) | 2329 | if (first) { |
2318 | last_i = i; | 2330 | last_i = i; |
2331 | first = false; | ||
2332 | } | ||
2319 | update_event_printk(call, map[i]); | 2333 | update_event_printk(call, map[i]); |
2320 | } | 2334 | } |
2321 | } | 2335 | } |
diff --git a/kernel/uid16.c b/kernel/uid16.c index d58cc4d8f0d1..651aaa5221ec 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c | |||
@@ -190,6 +190,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist) | |||
190 | return retval; | 190 | return retval; |
191 | } | 191 | } |
192 | 192 | ||
193 | groups_sort(group_info); | ||
193 | retval = set_current_groups(group_info); | 194 | retval = set_current_groups(group_info); |
194 | put_group_info(group_info); | 195 | put_group_info(group_info); |
195 | 196 | ||
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 7e26aea3e404..b7908d949a5f 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
@@ -5304,9 +5304,8 @@ static struct bpf_prog *generate_filter(int which, int *err) | |||
5304 | return NULL; | 5304 | return NULL; |
5305 | } | 5305 | } |
5306 | } | 5306 | } |
5307 | /* We don't expect to fail. */ | ||
5308 | if (*err) { | 5307 | if (*err) { |
5309 | pr_cont("FAIL to attach err=%d len=%d\n", | 5308 | pr_cont("FAIL to prog_create err=%d len=%d\n", |
5310 | *err, fprog.len); | 5309 | *err, fprog.len); |
5311 | return NULL; | 5310 | return NULL; |
5312 | } | 5311 | } |
@@ -5325,7 +5324,11 @@ static struct bpf_prog *generate_filter(int which, int *err) | |||
5325 | fp->type = BPF_PROG_TYPE_SOCKET_FILTER; | 5324 | fp->type = BPF_PROG_TYPE_SOCKET_FILTER; |
5326 | memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); | 5325 | memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); |
5327 | 5326 | ||
5328 | bpf_prog_select_runtime(fp); | 5327 | *err = bpf_prog_select_runtime(fp); |
5328 | if (*err) { | ||
5329 | pr_cont("FAIL to select_runtime err=%d\n", *err); | ||
5330 | return NULL; | ||
5331 | } | ||
5329 | break; | 5332 | break; |
5330 | } | 5333 | } |
5331 | 5334 | ||
@@ -5511,8 +5514,8 @@ static __init int test_bpf(void) | |||
5511 | pass_cnt++; | 5514 | pass_cnt++; |
5512 | continue; | 5515 | continue; |
5513 | } | 5516 | } |
5514 | 5517 | err_cnt++; | |
5515 | return err; | 5518 | continue; |
5516 | } | 5519 | } |
5517 | 5520 | ||
5518 | pr_cont("jited:%u ", fp->jited); | 5521 | pr_cont("jited:%u ", fp->jited); |
@@ -54,7 +54,7 @@ unsigned long cma_get_size(const struct cma *cma) | |||
54 | } | 54 | } |
55 | 55 | ||
56 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, | 56 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, |
57 | int align_order) | 57 | unsigned int align_order) |
58 | { | 58 | { |
59 | if (align_order <= cma->order_per_bit) | 59 | if (align_order <= cma->order_per_bit) |
60 | return 0; | 60 | return 0; |
@@ -62,17 +62,14 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, | |||
62 | } | 62 | } |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * Find a PFN aligned to the specified order and return an offset represented in | 65 | * Find the offset of the base PFN from the specified align_order. |
66 | * order_per_bits. | 66 | * The value returned is represented in order_per_bits. |
67 | */ | 67 | */ |
68 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, | 68 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, |
69 | int align_order) | 69 | unsigned int align_order) |
70 | { | 70 | { |
71 | if (align_order <= cma->order_per_bit) | 71 | return (cma->base_pfn & ((1UL << align_order) - 1)) |
72 | return 0; | 72 | >> cma->order_per_bit; |
73 | |||
74 | return (ALIGN(cma->base_pfn, (1UL << align_order)) | ||
75 | - cma->base_pfn) >> cma->order_per_bit; | ||
76 | } | 73 | } |
77 | 74 | ||
78 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, | 75 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, |
diff --git a/mm/compaction.c b/mm/compaction.c index dba02dec7195..b6f145ed7ae1 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -200,7 +200,8 @@ static void reset_cached_positions(struct zone *zone) | |||
200 | { | 200 | { |
201 | zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; | 201 | zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; |
202 | zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; | 202 | zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; |
203 | zone->compact_cached_free_pfn = zone_end_pfn(zone); | 203 | zone->compact_cached_free_pfn = |
204 | round_down(zone_end_pfn(zone) - 1, pageblock_nr_pages); | ||
204 | } | 205 | } |
205 | 206 | ||
206 | /* | 207 | /* |
@@ -552,13 +553,17 @@ unsigned long | |||
552 | isolate_freepages_range(struct compact_control *cc, | 553 | isolate_freepages_range(struct compact_control *cc, |
553 | unsigned long start_pfn, unsigned long end_pfn) | 554 | unsigned long start_pfn, unsigned long end_pfn) |
554 | { | 555 | { |
555 | unsigned long isolated, pfn, block_end_pfn; | 556 | unsigned long isolated, pfn, block_start_pfn, block_end_pfn; |
556 | LIST_HEAD(freelist); | 557 | LIST_HEAD(freelist); |
557 | 558 | ||
558 | pfn = start_pfn; | 559 | pfn = start_pfn; |
560 | block_start_pfn = pfn & ~(pageblock_nr_pages - 1); | ||
561 | if (block_start_pfn < cc->zone->zone_start_pfn) | ||
562 | block_start_pfn = cc->zone->zone_start_pfn; | ||
559 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | 563 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); |
560 | 564 | ||
561 | for (; pfn < end_pfn; pfn += isolated, | 565 | for (; pfn < end_pfn; pfn += isolated, |
566 | block_start_pfn = block_end_pfn, | ||
562 | block_end_pfn += pageblock_nr_pages) { | 567 | block_end_pfn += pageblock_nr_pages) { |
563 | /* Protect pfn from changing by isolate_freepages_block */ | 568 | /* Protect pfn from changing by isolate_freepages_block */ |
564 | unsigned long isolate_start_pfn = pfn; | 569 | unsigned long isolate_start_pfn = pfn; |
@@ -571,11 +576,13 @@ isolate_freepages_range(struct compact_control *cc, | |||
571 | * scanning range to right one. | 576 | * scanning range to right one. |
572 | */ | 577 | */ |
573 | if (pfn >= block_end_pfn) { | 578 | if (pfn >= block_end_pfn) { |
579 | block_start_pfn = pfn & ~(pageblock_nr_pages - 1); | ||
574 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | 580 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); |
575 | block_end_pfn = min(block_end_pfn, end_pfn); | 581 | block_end_pfn = min(block_end_pfn, end_pfn); |
576 | } | 582 | } |
577 | 583 | ||
578 | if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) | 584 | if (!pageblock_pfn_to_page(block_start_pfn, |
585 | block_end_pfn, cc->zone)) | ||
579 | break; | 586 | break; |
580 | 587 | ||
581 | isolated = isolate_freepages_block(cc, &isolate_start_pfn, | 588 | isolated = isolate_freepages_block(cc, &isolate_start_pfn, |
@@ -861,18 +868,23 @@ unsigned long | |||
861 | isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, | 868 | isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, |
862 | unsigned long end_pfn) | 869 | unsigned long end_pfn) |
863 | { | 870 | { |
864 | unsigned long pfn, block_end_pfn; | 871 | unsigned long pfn, block_start_pfn, block_end_pfn; |
865 | 872 | ||
866 | /* Scan block by block. First and last block may be incomplete */ | 873 | /* Scan block by block. First and last block may be incomplete */ |
867 | pfn = start_pfn; | 874 | pfn = start_pfn; |
875 | block_start_pfn = pfn & ~(pageblock_nr_pages - 1); | ||
876 | if (block_start_pfn < cc->zone->zone_start_pfn) | ||
877 | block_start_pfn = cc->zone->zone_start_pfn; | ||
868 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | 878 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); |
869 | 879 | ||
870 | for (; pfn < end_pfn; pfn = block_end_pfn, | 880 | for (; pfn < end_pfn; pfn = block_end_pfn, |
881 | block_start_pfn = block_end_pfn, | ||
871 | block_end_pfn += pageblock_nr_pages) { | 882 | block_end_pfn += pageblock_nr_pages) { |
872 | 883 | ||
873 | block_end_pfn = min(block_end_pfn, end_pfn); | 884 | block_end_pfn = min(block_end_pfn, end_pfn); |
874 | 885 | ||
875 | if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) | 886 | if (!pageblock_pfn_to_page(block_start_pfn, |
887 | block_end_pfn, cc->zone)) | ||
876 | continue; | 888 | continue; |
877 | 889 | ||
878 | pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, | 890 | pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, |
@@ -1090,7 +1102,9 @@ int sysctl_compact_unevictable_allowed __read_mostly = 1; | |||
1090 | static isolate_migrate_t isolate_migratepages(struct zone *zone, | 1102 | static isolate_migrate_t isolate_migratepages(struct zone *zone, |
1091 | struct compact_control *cc) | 1103 | struct compact_control *cc) |
1092 | { | 1104 | { |
1093 | unsigned long low_pfn, end_pfn; | 1105 | unsigned long block_start_pfn; |
1106 | unsigned long block_end_pfn; | ||
1107 | unsigned long low_pfn; | ||
1094 | unsigned long isolate_start_pfn; | 1108 | unsigned long isolate_start_pfn; |
1095 | struct page *page; | 1109 | struct page *page; |
1096 | const isolate_mode_t isolate_mode = | 1110 | const isolate_mode_t isolate_mode = |
@@ -1102,16 +1116,21 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, | |||
1102 | * initialized by compact_zone() | 1116 | * initialized by compact_zone() |
1103 | */ | 1117 | */ |
1104 | low_pfn = cc->migrate_pfn; | 1118 | low_pfn = cc->migrate_pfn; |
1119 | block_start_pfn = cc->migrate_pfn & ~(pageblock_nr_pages - 1); | ||
1120 | if (block_start_pfn < zone->zone_start_pfn) | ||
1121 | block_start_pfn = zone->zone_start_pfn; | ||
1105 | 1122 | ||
1106 | /* Only scan within a pageblock boundary */ | 1123 | /* Only scan within a pageblock boundary */ |
1107 | end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); | 1124 | block_end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); |
1108 | 1125 | ||
1109 | /* | 1126 | /* |
1110 | * Iterate over whole pageblocks until we find the first suitable. | 1127 | * Iterate over whole pageblocks until we find the first suitable. |
1111 | * Do not cross the free scanner. | 1128 | * Do not cross the free scanner. |
1112 | */ | 1129 | */ |
1113 | for (; end_pfn <= cc->free_pfn; | 1130 | for (; block_end_pfn <= cc->free_pfn; |
1114 | low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { | 1131 | low_pfn = block_end_pfn, |
1132 | block_start_pfn = block_end_pfn, | ||
1133 | block_end_pfn += pageblock_nr_pages) { | ||
1115 | 1134 | ||
1116 | /* | 1135 | /* |
1117 | * This can potentially iterate a massively long zone with | 1136 | * This can potentially iterate a massively long zone with |
@@ -1122,7 +1141,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, | |||
1122 | && compact_should_abort(cc)) | 1141 | && compact_should_abort(cc)) |
1123 | break; | 1142 | break; |
1124 | 1143 | ||
1125 | page = pageblock_pfn_to_page(low_pfn, end_pfn, zone); | 1144 | page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, |
1145 | zone); | ||
1126 | if (!page) | 1146 | if (!page) |
1127 | continue; | 1147 | continue; |
1128 | 1148 | ||
@@ -1141,8 +1161,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, | |||
1141 | 1161 | ||
1142 | /* Perform the isolation */ | 1162 | /* Perform the isolation */ |
1143 | isolate_start_pfn = low_pfn; | 1163 | isolate_start_pfn = low_pfn; |
1144 | low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, | 1164 | low_pfn = isolate_migratepages_block(cc, low_pfn, |
1145 | isolate_mode); | 1165 | block_end_pfn, isolate_mode); |
1146 | 1166 | ||
1147 | if (!low_pfn || cc->contended) { | 1167 | if (!low_pfn || cc->contended) { |
1148 | acct_isolated(zone, cc); | 1168 | acct_isolated(zone, cc); |
@@ -1358,11 +1378,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1358 | */ | 1378 | */ |
1359 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; | 1379 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; |
1360 | cc->free_pfn = zone->compact_cached_free_pfn; | 1380 | cc->free_pfn = zone->compact_cached_free_pfn; |
1361 | if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { | 1381 | if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { |
1362 | cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); | 1382 | cc->free_pfn = round_down(end_pfn - 1, pageblock_nr_pages); |
1363 | zone->compact_cached_free_pfn = cc->free_pfn; | 1383 | zone->compact_cached_free_pfn = cc->free_pfn; |
1364 | } | 1384 | } |
1365 | if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { | 1385 | if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { |
1366 | cc->migrate_pfn = start_pfn; | 1386 | cc->migrate_pfn = start_pfn; |
1367 | zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; | 1387 | zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; |
1368 | zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; | 1388 | zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index ae113cf8f3b9..c98cf85c1086 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -1441,6 +1441,8 @@ static void kmemleak_scan(void) | |||
1441 | if (page_count(page) == 0) | 1441 | if (page_count(page) == 0) |
1442 | continue; | 1442 | continue; |
1443 | scan_block(page, page + 1, NULL); | 1443 | scan_block(page, page + 1, NULL); |
1444 | if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page)))) | ||
1445 | cond_resched(); | ||
1444 | } | 1446 | } |
1445 | } | 1447 | } |
1446 | put_online_mems(); | 1448 | put_online_mems(); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e25b93a4267d..55a9facb8e8d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -5576,7 +5576,7 @@ static void uncharge_list(struct list_head *page_list) | |||
5576 | next = page->lru.next; | 5576 | next = page->lru.next; |
5577 | 5577 | ||
5578 | VM_BUG_ON_PAGE(PageLRU(page), page); | 5578 | VM_BUG_ON_PAGE(PageLRU(page), page); |
5579 | VM_BUG_ON_PAGE(page_count(page), page); | 5579 | VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page); |
5580 | 5580 | ||
5581 | if (!page->mem_cgroup) | 5581 | if (!page->mem_cgroup) |
5582 | continue; | 5582 | continue; |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 091fe9b06663..92a647957f91 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -539,6 +539,13 @@ static int delete_from_lru_cache(struct page *p) | |||
539 | */ | 539 | */ |
540 | ClearPageActive(p); | 540 | ClearPageActive(p); |
541 | ClearPageUnevictable(p); | 541 | ClearPageUnevictable(p); |
542 | |||
543 | /* | ||
544 | * Poisoned page might never drop its ref count to 0 so we have | ||
545 | * to uncharge it manually from its memcg. | ||
546 | */ | ||
547 | mem_cgroup_uncharge(p); | ||
548 | |||
542 | /* | 549 | /* |
543 | * drop the page count elevated by isolate_lru_page() | 550 | * drop the page count elevated by isolate_lru_page() |
544 | */ | 551 | */ |
diff --git a/mm/memory.c b/mm/memory.c index 9ac55172aa7b..31ca97f7ebbc 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -72,7 +72,7 @@ | |||
72 | 72 | ||
73 | #include "internal.h" | 73 | #include "internal.h" |
74 | 74 | ||
75 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS | 75 | #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) |
76 | #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. | 76 | #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. |
77 | #endif | 77 | #endif |
78 | 78 | ||
@@ -2209,7 +2209,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) | |||
2209 | gap_addr = TASK_SIZE; | 2209 | gap_addr = TASK_SIZE; |
2210 | 2210 | ||
2211 | next = vma->vm_next; | 2211 | next = vma->vm_next; |
2212 | if (next && next->vm_start < gap_addr) { | 2212 | if (next && next->vm_start < gap_addr && |
2213 | (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { | ||
2213 | if (!(next->vm_flags & VM_GROWSUP)) | 2214 | if (!(next->vm_flags & VM_GROWSUP)) |
2214 | return -ENOMEM; | 2215 | return -ENOMEM; |
2215 | /* Check that both stack segments have the same anon_vma? */ | 2216 | /* Check that both stack segments have the same anon_vma? */ |
@@ -2294,7 +2295,8 @@ int expand_downwards(struct vm_area_struct *vma, | |||
2294 | if (gap_addr > address) | 2295 | if (gap_addr > address) |
2295 | return -ENOMEM; | 2296 | return -ENOMEM; |
2296 | prev = vma->vm_prev; | 2297 | prev = vma->vm_prev; |
2297 | if (prev && prev->vm_end > gap_addr) { | 2298 | if (prev && prev->vm_end > gap_addr && |
2299 | (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { | ||
2298 | if (!(prev->vm_flags & VM_GROWSDOWN)) | 2300 | if (!(prev->vm_flags & VM_GROWSDOWN)) |
2299 | return -ENOMEM; | 2301 | return -ENOMEM; |
2300 | /* Check that both stack segments have the same anon_vma? */ | 2302 | /* Check that both stack segments have the same anon_vma? */ |
diff --git a/mm/mmu_context.c b/mm/mmu_context.c index f802c2d216a7..6f4d27c5bb32 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c | |||
@@ -4,9 +4,9 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/sched.h> | ||
7 | #include <linux/mmu_context.h> | 8 | #include <linux/mmu_context.h> |
8 | #include <linux/export.h> | 9 | #include <linux/export.h> |
9 | #include <linux/sched.h> | ||
10 | 10 | ||
11 | #include <asm/mmu_context.h> | 11 | #include <asm/mmu_context.h> |
12 | 12 | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 1e6769449ac2..71b0f525180a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1172,6 +1172,7 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc, | |||
1172 | unsigned long balanced_dirty_ratelimit; | 1172 | unsigned long balanced_dirty_ratelimit; |
1173 | unsigned long step; | 1173 | unsigned long step; |
1174 | unsigned long x; | 1174 | unsigned long x; |
1175 | unsigned long shift; | ||
1175 | 1176 | ||
1176 | /* | 1177 | /* |
1177 | * The dirty rate will match the writeout rate in long term, except | 1178 | * The dirty rate will match the writeout rate in long term, except |
@@ -1296,11 +1297,11 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc, | |||
1296 | * rate itself is constantly fluctuating. So decrease the track speed | 1297 | * rate itself is constantly fluctuating. So decrease the track speed |
1297 | * when it gets close to the target. Helps eliminate pointless tremors. | 1298 | * when it gets close to the target. Helps eliminate pointless tremors. |
1298 | */ | 1299 | */ |
1299 | step >>= dirty_ratelimit / (2 * step + 1); | 1300 | shift = dirty_ratelimit / (2 * step + 1); |
1300 | /* | 1301 | if (shift < BITS_PER_LONG) |
1301 | * Limit the tracking speed to avoid overshooting. | 1302 | step = DIV_ROUND_UP(step >> shift, 8); |
1302 | */ | 1303 | else |
1303 | step = (step + 7) / 8; | 1304 | step = 0; |
1304 | 1305 | ||
1305 | if (dirty_ratelimit < balanced_dirty_ratelimit) | 1306 | if (dirty_ratelimit < balanced_dirty_ratelimit) |
1306 | dirty_ratelimit += step; | 1307 | dirty_ratelimit += step; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 02c2f11c0e63..36a0940469b6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2473,9 +2473,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, | |||
2473 | if (!area->nr_free) | 2473 | if (!area->nr_free) |
2474 | continue; | 2474 | continue; |
2475 | 2475 | ||
2476 | if (alloc_harder) | ||
2477 | return true; | ||
2478 | |||
2479 | for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { | 2476 | for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { |
2480 | if (!list_empty(&area->free_list[mt])) | 2477 | if (!list_empty(&area->free_list[mt])) |
2481 | return true; | 2478 | return true; |
@@ -2487,6 +2484,9 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, | |||
2487 | return true; | 2484 | return true; |
2488 | } | 2485 | } |
2489 | #endif | 2486 | #endif |
2487 | if (alloc_harder && | ||
2488 | !list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) | ||
2489 | return true; | ||
2490 | } | 2490 | } |
2491 | return false; | 2491 | return false; |
2492 | } | 2492 | } |
@@ -587,19 +587,6 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
587 | } | 587 | } |
588 | 588 | ||
589 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH | 589 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
590 | static void percpu_flush_tlb_batch_pages(void *data) | ||
591 | { | ||
592 | /* | ||
593 | * All TLB entries are flushed on the assumption that it is | ||
594 | * cheaper to flush all TLBs and let them be refilled than | ||
595 | * flushing individual PFNs. Note that we do not track mm's | ||
596 | * to flush as that might simply be multiple full TLB flushes | ||
597 | * for no gain. | ||
598 | */ | ||
599 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); | ||
600 | flush_tlb_local(); | ||
601 | } | ||
602 | |||
603 | /* | 590 | /* |
604 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is | 591 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is |
605 | * important if a PTE was dirty when it was unmapped that it's flushed | 592 | * important if a PTE was dirty when it was unmapped that it's flushed |
@@ -616,15 +603,14 @@ void try_to_unmap_flush(void) | |||
616 | 603 | ||
617 | cpu = get_cpu(); | 604 | cpu = get_cpu(); |
618 | 605 | ||
619 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL); | 606 | if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) { |
620 | 607 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | |
621 | if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) | 608 | local_flush_tlb(); |
622 | percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask); | 609 | trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); |
623 | |||
624 | if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) { | ||
625 | smp_call_function_many(&tlb_ubc->cpumask, | ||
626 | percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true); | ||
627 | } | 610 | } |
611 | |||
612 | if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) | ||
613 | flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL); | ||
628 | cpumask_clear(&tlb_ubc->cpumask); | 614 | cpumask_clear(&tlb_ubc->cpumask); |
629 | tlb_ubc->flush_required = false; | 615 | tlb_ubc->flush_required = false; |
630 | tlb_ubc->writable = false; | 616 | tlb_ubc->writable = false; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 44618235722c..8640a185dfc6 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -738,6 +738,7 @@ const char * const vmstat_text[] = { | |||
738 | "nr_slab_unreclaimable", | 738 | "nr_slab_unreclaimable", |
739 | "nr_page_table_pages", | 739 | "nr_page_table_pages", |
740 | "nr_kernel_stack", | 740 | "nr_kernel_stack", |
741 | "nr_overhead", | ||
741 | "nr_unstable", | 742 | "nr_unstable", |
742 | "nr_bounce", | 743 | "nr_bounce", |
743 | "nr_vmscan_write", | 744 | "nr_vmscan_write", |
@@ -1352,7 +1353,9 @@ static int vmstat_show(struct seq_file *m, void *arg) | |||
1352 | unsigned long *l = arg; | 1353 | unsigned long *l = arg; |
1353 | unsigned long off = l - (unsigned long *)m->private; | 1354 | unsigned long off = l - (unsigned long *)m->private; |
1354 | 1355 | ||
1355 | seq_printf(m, "%s %lu\n", vmstat_text[off], *l); | 1356 | seq_puts(m, vmstat_text[off]); |
1357 | seq_put_decimal_ull(m, ' ', *l); | ||
1358 | seq_putc(m, '\n'); | ||
1356 | return 0; | 1359 | return 0; |
1357 | } | 1360 | } |
1358 | 1361 | ||
diff --git a/mm/zswap.c b/mm/zswap.c index 45476f429789..568015e2fe7a 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -123,7 +123,7 @@ struct zswap_pool { | |||
123 | struct crypto_comp * __percpu *tfm; | 123 | struct crypto_comp * __percpu *tfm; |
124 | struct kref kref; | 124 | struct kref kref; |
125 | struct list_head list; | 125 | struct list_head list; |
126 | struct rcu_head rcu_head; | 126 | struct work_struct work; |
127 | struct notifier_block notifier; | 127 | struct notifier_block notifier; |
128 | char tfm_name[CRYPTO_MAX_ALG_NAME]; | 128 | char tfm_name[CRYPTO_MAX_ALG_NAME]; |
129 | }; | 129 | }; |
@@ -667,9 +667,11 @@ static int __must_check zswap_pool_get(struct zswap_pool *pool) | |||
667 | return kref_get_unless_zero(&pool->kref); | 667 | return kref_get_unless_zero(&pool->kref); |
668 | } | 668 | } |
669 | 669 | ||
670 | static void __zswap_pool_release(struct rcu_head *head) | 670 | static void __zswap_pool_release(struct work_struct *work) |
671 | { | 671 | { |
672 | struct zswap_pool *pool = container_of(head, typeof(*pool), rcu_head); | 672 | struct zswap_pool *pool = container_of(work, typeof(*pool), work); |
673 | |||
674 | synchronize_rcu(); | ||
673 | 675 | ||
674 | /* nobody should have been able to get a kref... */ | 676 | /* nobody should have been able to get a kref... */ |
675 | WARN_ON(kref_get_unless_zero(&pool->kref)); | 677 | WARN_ON(kref_get_unless_zero(&pool->kref)); |
@@ -689,7 +691,9 @@ static void __zswap_pool_empty(struct kref *kref) | |||
689 | WARN_ON(pool == zswap_pool_current()); | 691 | WARN_ON(pool == zswap_pool_current()); |
690 | 692 | ||
691 | list_del_rcu(&pool->list); | 693 | list_del_rcu(&pool->list); |
692 | call_rcu(&pool->rcu_head, __zswap_pool_release); | 694 | |
695 | INIT_WORK(&pool->work, __zswap_pool_release); | ||
696 | schedule_work(&pool->work); | ||
693 | 697 | ||
694 | spin_unlock(&zswap_pools_lock); | 698 | spin_unlock(&zswap_pools_lock); |
695 | } | 699 | } |
@@ -748,18 +752,22 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp, | |||
748 | pool = zswap_pool_find_get(type, compressor); | 752 | pool = zswap_pool_find_get(type, compressor); |
749 | if (pool) { | 753 | if (pool) { |
750 | zswap_pool_debug("using existing", pool); | 754 | zswap_pool_debug("using existing", pool); |
755 | WARN_ON(pool == zswap_pool_current()); | ||
751 | list_del_rcu(&pool->list); | 756 | list_del_rcu(&pool->list); |
752 | } else { | ||
753 | spin_unlock(&zswap_pools_lock); | ||
754 | pool = zswap_pool_create(type, compressor); | ||
755 | spin_lock(&zswap_pools_lock); | ||
756 | } | 757 | } |
757 | 758 | ||
759 | spin_unlock(&zswap_pools_lock); | ||
760 | |||
761 | if (!pool) | ||
762 | pool = zswap_pool_create(type, compressor); | ||
763 | |||
758 | if (pool) | 764 | if (pool) |
759 | ret = param_set_charp(s, kp); | 765 | ret = param_set_charp(s, kp); |
760 | else | 766 | else |
761 | ret = -EINVAL; | 767 | ret = -EINVAL; |
762 | 768 | ||
769 | spin_lock(&zswap_pools_lock); | ||
770 | |||
763 | if (!ret) { | 771 | if (!ret) { |
764 | put_pool = zswap_pool_current(); | 772 | put_pool = zswap_pool_current(); |
765 | list_add_rcu(&pool->list, &zswap_pools); | 773 | list_add_rcu(&pool->list, &zswap_pools); |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 01abb6431fd9..e2713b0794ae 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
111 | vlan_gvrp_uninit_applicant(real_dev); | 111 | vlan_gvrp_uninit_applicant(real_dev); |
112 | } | 112 | } |
113 | 113 | ||
114 | /* Take it out of our own structures, but be sure to interlock with | 114 | vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); |
115 | * HW accelerating devices or SW vlan input packet processing if | ||
116 | * VLAN is not 0 (leave it there for 802.1p). | ||
117 | */ | ||
118 | if (vlan_id) | ||
119 | vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); | ||
120 | 115 | ||
121 | /* Get rid of the vlan's reference to real_dev */ | 116 | /* Get rid of the vlan's reference to real_dev */ |
122 | dev_put(real_dev); | 117 | dev_put(real_dev); |
diff --git a/net/Kconfig b/net/Kconfig index ce9585cf343a..16a19fd143d1 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -394,3 +394,6 @@ endif # if NET | |||
394 | # Used by archs to tell that they support BPF_JIT | 394 | # Used by archs to tell that they support BPF_JIT |
395 | config HAVE_BPF_JIT | 395 | config HAVE_BPF_JIT |
396 | bool | 396 | bool |
397 | |||
398 | config HAVE_EBPF_JIT | ||
399 | bool | ||
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 357bcd34cf1f..af68674690af 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -3342,9 +3342,10 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data | |||
3342 | break; | 3342 | break; |
3343 | 3343 | ||
3344 | case L2CAP_CONF_EFS: | 3344 | case L2CAP_CONF_EFS: |
3345 | remote_efs = 1; | 3345 | if (olen == sizeof(efs)) { |
3346 | if (olen == sizeof(efs)) | 3346 | remote_efs = 1; |
3347 | memcpy(&efs, (void *) val, olen); | 3347 | memcpy(&efs, (void *) val, olen); |
3348 | } | ||
3348 | break; | 3349 | break; |
3349 | 3350 | ||
3350 | case L2CAP_CONF_EWS: | 3351 | case L2CAP_CONF_EWS: |
@@ -3563,16 +3564,17 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, | |||
3563 | break; | 3564 | break; |
3564 | 3565 | ||
3565 | case L2CAP_CONF_EFS: | 3566 | case L2CAP_CONF_EFS: |
3566 | if (olen == sizeof(efs)) | 3567 | if (olen == sizeof(efs)) { |
3567 | memcpy(&efs, (void *)val, olen); | 3568 | memcpy(&efs, (void *)val, olen); |
3568 | 3569 | ||
3569 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && | 3570 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && |
3570 | efs.stype != L2CAP_SERV_NOTRAFIC && | 3571 | efs.stype != L2CAP_SERV_NOTRAFIC && |
3571 | efs.stype != chan->local_stype) | 3572 | efs.stype != chan->local_stype) |
3572 | return -ECONNREFUSED; | 3573 | return -ECONNREFUSED; |
3573 | 3574 | ||
3574 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), | 3575 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), |
3575 | (unsigned long) &efs, endptr - ptr); | 3576 | (unsigned long) &efs, endptr - ptr); |
3577 | } | ||
3576 | break; | 3578 | break; |
3577 | 3579 | ||
3578 | case L2CAP_CONF_FCS: | 3580 | case L2CAP_CONF_FCS: |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index a1f697ec4fc2..0ce26a0f7913 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -1067,19 +1067,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev, | |||
1067 | struct net_bridge *br = netdev_priv(dev); | 1067 | struct net_bridge *br = netdev_priv(dev); |
1068 | int err; | 1068 | int err; |
1069 | 1069 | ||
1070 | err = register_netdevice(dev); | ||
1071 | if (err) | ||
1072 | return err; | ||
1073 | |||
1070 | if (tb[IFLA_ADDRESS]) { | 1074 | if (tb[IFLA_ADDRESS]) { |
1071 | spin_lock_bh(&br->lock); | 1075 | spin_lock_bh(&br->lock); |
1072 | br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); | 1076 | br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); |
1073 | spin_unlock_bh(&br->lock); | 1077 | spin_unlock_bh(&br->lock); |
1074 | } | 1078 | } |
1075 | 1079 | ||
1076 | err = register_netdevice(dev); | ||
1077 | if (err) | ||
1078 | return err; | ||
1079 | |||
1080 | err = br_changelink(dev, tb, data); | 1080 | err = br_changelink(dev, tb, data); |
1081 | if (err) | 1081 | if (err) |
1082 | unregister_netdevice(dev); | 1082 | br_dev_delete(dev, NULL); |
1083 | |||
1083 | return err; | 1084 | return err; |
1084 | } | 1085 | } |
1085 | 1086 | ||
diff --git a/net/can/af_can.c b/net/can/af_can.c index 928f58064098..c866e761651a 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -722,13 +722,12 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, | |||
722 | if (unlikely(!net_eq(dev_net(dev), &init_net))) | 722 | if (unlikely(!net_eq(dev_net(dev), &init_net))) |
723 | goto drop; | 723 | goto drop; |
724 | 724 | ||
725 | if (WARN_ONCE(dev->type != ARPHRD_CAN || | 725 | if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU || |
726 | skb->len != CAN_MTU || | 726 | cfd->len > CAN_MAX_DLEN)) { |
727 | cfd->len > CAN_MAX_DLEN, | 727 | pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n", |
728 | "PF_CAN: dropped non conform CAN skbuf: " | 728 | dev->type, skb->len, cfd->len); |
729 | "dev type %d, len %d, datalen %d\n", | ||
730 | dev->type, skb->len, cfd->len)) | ||
731 | goto drop; | 729 | goto drop; |
730 | } | ||
732 | 731 | ||
733 | can_receive(skb, dev); | 732 | can_receive(skb, dev); |
734 | return NET_RX_SUCCESS; | 733 | return NET_RX_SUCCESS; |
@@ -746,13 +745,12 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, | |||
746 | if (unlikely(!net_eq(dev_net(dev), &init_net))) | 745 | if (unlikely(!net_eq(dev_net(dev), &init_net))) |
747 | goto drop; | 746 | goto drop; |
748 | 747 | ||
749 | if (WARN_ONCE(dev->type != ARPHRD_CAN || | 748 | if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU || |
750 | skb->len != CANFD_MTU || | 749 | cfd->len > CANFD_MAX_DLEN)) { |
751 | cfd->len > CANFD_MAX_DLEN, | 750 | pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n", |
752 | "PF_CAN: dropped non conform CAN FD skbuf: " | 751 | dev->type, skb->len, cfd->len); |
753 | "dev type %d, len %d, datalen %d\n", | ||
754 | dev->type, skb->len, cfd->len)) | ||
755 | goto drop; | 752 | goto drop; |
753 | } | ||
756 | 754 | ||
757 | can_receive(skb, dev); | 755 | can_receive(skb, dev); |
758 | return NET_RX_SUCCESS; | 756 | return NET_RX_SUCCESS; |
diff --git a/net/core/dev.c b/net/core/dev.c index 3b67c1e5756f..cb58ba15d51e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2889,10 +2889,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb) | |||
2889 | hdr_len = skb_transport_header(skb) - skb_mac_header(skb); | 2889 | hdr_len = skb_transport_header(skb) - skb_mac_header(skb); |
2890 | 2890 | ||
2891 | /* + transport layer */ | 2891 | /* + transport layer */ |
2892 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) | 2892 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { |
2893 | hdr_len += tcp_hdrlen(skb); | 2893 | const struct tcphdr *th; |
2894 | else | 2894 | struct tcphdr _tcphdr; |
2895 | hdr_len += sizeof(struct udphdr); | 2895 | |
2896 | th = skb_header_pointer(skb, skb_transport_offset(skb), | ||
2897 | sizeof(_tcphdr), &_tcphdr); | ||
2898 | if (likely(th)) | ||
2899 | hdr_len += __tcp_hdrlen(th); | ||
2900 | } else { | ||
2901 | struct udphdr _udphdr; | ||
2902 | |||
2903 | if (skb_header_pointer(skb, skb_transport_offset(skb), | ||
2904 | sizeof(_udphdr), &_udphdr)) | ||
2905 | hdr_len += sizeof(struct udphdr); | ||
2906 | } | ||
2896 | 2907 | ||
2897 | if (shinfo->gso_type & SKB_GSO_DODGY) | 2908 | if (shinfo->gso_type & SKB_GSO_DODGY) |
2898 | gso_segs = DIV_ROUND_UP(skb->len - hdr_len, | 2909 | gso_segs = DIV_ROUND_UP(skb->len - hdr_len, |
diff --git a/net/core/filter.c b/net/core/filter.c index e94355452166..1a9ded6af138 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -430,6 +430,10 @@ do_pass: | |||
430 | convert_bpf_extensions(fp, &insn)) | 430 | convert_bpf_extensions(fp, &insn)) |
431 | break; | 431 | break; |
432 | 432 | ||
433 | if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || | ||
434 | fp->code == (BPF_ALU | BPF_MOD | BPF_X)) | ||
435 | *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); | ||
436 | |||
433 | *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); | 437 | *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); |
434 | break; | 438 | break; |
435 | 439 | ||
@@ -984,7 +988,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) | |||
984 | */ | 988 | */ |
985 | goto out_err_free; | 989 | goto out_err_free; |
986 | 990 | ||
987 | bpf_prog_select_runtime(fp); | 991 | err = bpf_prog_select_runtime(fp); |
992 | if (err) | ||
993 | goto out_err_free; | ||
988 | 994 | ||
989 | kfree(old_prog); | 995 | kfree(old_prog); |
990 | return fp; | 996 | return fp; |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index ee9082792530..4d14908afaec 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -492,8 +492,8 @@ ip_proto_again: | |||
492 | out_good: | 492 | out_good: |
493 | ret = true; | 493 | ret = true; |
494 | 494 | ||
495 | key_control->thoff = (u16)nhoff; | ||
496 | out: | 495 | out: |
496 | key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); | ||
497 | key_basic->n_proto = proto; | 497 | key_basic->n_proto = proto; |
498 | key_basic->ip_proto = ip_proto; | 498 | key_basic->ip_proto = ip_proto; |
499 | 499 | ||
@@ -501,7 +501,6 @@ out: | |||
501 | 501 | ||
502 | out_bad: | 502 | out_bad: |
503 | ret = false; | 503 | ret = false; |
504 | key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); | ||
505 | goto out; | 504 | goto out; |
506 | } | 505 | } |
507 | EXPORT_SYMBOL(__skb_flow_dissect); | 506 | EXPORT_SYMBOL(__skb_flow_dissect); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ae92131c4f89..253c86b78ff0 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -496,7 +496,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, | |||
496 | if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) | 496 | if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) |
497 | nht = neigh_hash_grow(tbl, nht->hash_shift + 1); | 497 | nht = neigh_hash_grow(tbl, nht->hash_shift + 1); |
498 | 498 | ||
499 | hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); | 499 | hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); |
500 | 500 | ||
501 | if (n->parms->dead) { | 501 | if (n->parms->dead) { |
502 | rc = ERR_PTR(-EINVAL); | 502 | rc = ERR_PTR(-EINVAL); |
@@ -508,7 +508,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, | |||
508 | n1 != NULL; | 508 | n1 != NULL; |
509 | n1 = rcu_dereference_protected(n1->next, | 509 | n1 = rcu_dereference_protected(n1->next, |
510 | lockdep_is_held(&tbl->lock))) { | 510 | lockdep_is_held(&tbl->lock))) { |
511 | if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { | 511 | if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) { |
512 | if (want_ref) | 512 | if (want_ref) |
513 | neigh_hold(n1); | 513 | neigh_hold(n1); |
514 | rc = n1; | 514 | rc = n1; |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 2e9a1c2818c7..b5c351d2830b 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -261,7 +261,7 @@ struct net *get_net_ns_by_id(struct net *net, int id) | |||
261 | spin_lock_irqsave(&net->nsid_lock, flags); | 261 | spin_lock_irqsave(&net->nsid_lock, flags); |
262 | peer = idr_find(&net->netns_ids, id); | 262 | peer = idr_find(&net->netns_ids, id); |
263 | if (peer) | 263 | if (peer) |
264 | get_net(peer); | 264 | peer = maybe_get_net(peer); |
265 | spin_unlock_irqrestore(&net->nsid_lock, flags); | 265 | spin_unlock_irqrestore(&net->nsid_lock, flags); |
266 | rcu_read_unlock(); | 266 | rcu_read_unlock(); |
267 | 267 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d33609c2f276..86b619501350 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3676,7 +3676,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, | |||
3676 | struct sock *sk = skb->sk; | 3676 | struct sock *sk = skb->sk; |
3677 | 3677 | ||
3678 | if (!skb_may_tx_timestamp(sk, false)) | 3678 | if (!skb_may_tx_timestamp(sk, false)) |
3679 | return; | 3679 | goto err; |
3680 | 3680 | ||
3681 | /* Take a reference to prevent skb_orphan() from freeing the socket, | 3681 | /* Take a reference to prevent skb_orphan() from freeing the socket, |
3682 | * but only if the socket refcount is not zero. | 3682 | * but only if the socket refcount is not zero. |
@@ -3685,7 +3685,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, | |||
3685 | *skb_hwtstamps(skb) = *hwtstamps; | 3685 | *skb_hwtstamps(skb) = *hwtstamps; |
3686 | __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); | 3686 | __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); |
3687 | sock_put(sk); | 3687 | sock_put(sk); |
3688 | return; | ||
3688 | } | 3689 | } |
3690 | |||
3691 | err: | ||
3692 | kfree_skb(skb); | ||
3689 | } | 3693 | } |
3690 | EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); | 3694 | EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); |
3691 | 3695 | ||
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 3963c3872c69..9653798da293 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c | |||
@@ -294,7 +294,7 @@ static int sock_diag_bind(struct net *net, int group) | |||
294 | case SKNLGRP_INET6_UDP_DESTROY: | 294 | case SKNLGRP_INET6_UDP_DESTROY: |
295 | if (!sock_diag_handlers[AF_INET6]) | 295 | if (!sock_diag_handlers[AF_INET6]) |
296 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | 296 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, |
297 | NETLINK_SOCK_DIAG, AF_INET); | 297 | NETLINK_SOCK_DIAG, AF_INET6); |
298 | break; | 298 | break; |
299 | } | 299 | } |
300 | return 0; | 300 | return 0; |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index a6beb7b6ae55..6578a0a2f708 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -292,7 +292,13 @@ static struct ctl_table net_core_table[] = { | |||
292 | .data = &bpf_jit_enable, | 292 | .data = &bpf_jit_enable, |
293 | .maxlen = sizeof(int), | 293 | .maxlen = sizeof(int), |
294 | .mode = 0644, | 294 | .mode = 0644, |
295 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON | ||
295 | .proc_handler = proc_dointvec | 296 | .proc_handler = proc_dointvec |
297 | #else | ||
298 | .proc_handler = proc_dointvec_minmax, | ||
299 | .extra1 = &one, | ||
300 | .extra2 = &one, | ||
301 | #endif | ||
296 | }, | 302 | }, |
297 | #endif | 303 | #endif |
298 | { | 304 | { |
@@ -360,14 +366,16 @@ static struct ctl_table net_core_table[] = { | |||
360 | .data = &sysctl_net_busy_poll, | 366 | .data = &sysctl_net_busy_poll, |
361 | .maxlen = sizeof(unsigned int), | 367 | .maxlen = sizeof(unsigned int), |
362 | .mode = 0644, | 368 | .mode = 0644, |
363 | .proc_handler = proc_dointvec | 369 | .proc_handler = proc_dointvec_minmax, |
370 | .extra1 = &zero, | ||
364 | }, | 371 | }, |
365 | { | 372 | { |
366 | .procname = "busy_read", | 373 | .procname = "busy_read", |
367 | .data = &sysctl_net_busy_read, | 374 | .data = &sysctl_net_busy_read, |
368 | .maxlen = sizeof(unsigned int), | 375 | .maxlen = sizeof(unsigned int), |
369 | .mode = 0644, | 376 | .mode = 0644, |
370 | .proc_handler = proc_dointvec | 377 | .proc_handler = proc_dointvec_minmax, |
378 | .extra1 = &zero, | ||
371 | }, | 379 | }, |
372 | #endif | 380 | #endif |
373 | #ifdef CONFIG_NET_SCHED | 381 | #ifdef CONFIG_NET_SCHED |
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 5e3a7302f774..7753681195c1 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) | |||
140 | 140 | ||
141 | ccid2_pr_debug("RTO_EXPIRE\n"); | 141 | ccid2_pr_debug("RTO_EXPIRE\n"); |
142 | 142 | ||
143 | if (sk->sk_state == DCCP_CLOSED) | ||
144 | goto out; | ||
145 | |||
143 | /* back-off timer */ | 146 | /* back-off timer */ |
144 | hc->tx_rto <<= 1; | 147 | hc->tx_rto <<= 1; |
145 | if (hc->tx_rto > DCCP_RTO_MAX) | 148 | if (hc->tx_rto > DCCP_RTO_MAX) |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index b68168fcc06a..9d43c1f40274 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags) | |||
259 | { | 259 | { |
260 | struct inet_connection_sock *icsk = inet_csk(sk); | 260 | struct inet_connection_sock *icsk = inet_csk(sk); |
261 | struct inet_sock *inet = inet_sk(sk); | 261 | struct inet_sock *inet = inet_sk(sk); |
262 | struct dccp_sock *dp = dccp_sk(sk); | ||
262 | int err = 0; | 263 | int err = 0; |
263 | const int old_state = sk->sk_state; | 264 | const int old_state = sk->sk_state; |
264 | 265 | ||
@@ -278,6 +279,10 @@ int dccp_disconnect(struct sock *sk, int flags) | |||
278 | sk->sk_err = ECONNRESET; | 279 | sk->sk_err = ECONNRESET; |
279 | 280 | ||
280 | dccp_clear_xmit_timers(sk); | 281 | dccp_clear_xmit_timers(sk); |
282 | ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); | ||
283 | ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); | ||
284 | dp->dccps_hc_rx_ccid = NULL; | ||
285 | dp->dccps_hc_tx_ccid = NULL; | ||
281 | 286 | ||
282 | __skb_queue_purge(&sk->sk_receive_queue); | 287 | __skb_queue_purge(&sk->sk_receive_queue); |
283 | __skb_queue_purge(&sk->sk_write_queue); | 288 | __skb_queue_purge(&sk->sk_write_queue); |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 711b4dfa17c3..cb5eb649ad5f 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey) | |||
223 | 223 | ||
224 | static int arp_constructor(struct neighbour *neigh) | 224 | static int arp_constructor(struct neighbour *neigh) |
225 | { | 225 | { |
226 | __be32 addr = *(__be32 *)neigh->primary_key; | 226 | __be32 addr; |
227 | struct net_device *dev = neigh->dev; | 227 | struct net_device *dev = neigh->dev; |
228 | struct in_device *in_dev; | 228 | struct in_device *in_dev; |
229 | struct neigh_parms *parms; | 229 | struct neigh_parms *parms; |
230 | u32 inaddr_any = INADDR_ANY; | ||
230 | 231 | ||
232 | if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) | ||
233 | memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len); | ||
234 | |||
235 | addr = *(__be32 *)neigh->primary_key; | ||
231 | rcu_read_lock(); | 236 | rcu_read_lock(); |
232 | in_dev = __in_dev_get_rcu(dev); | 237 | in_dev = __in_dev_get_rcu(dev); |
233 | if (!in_dev) { | 238 | if (!in_dev) { |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 0212591b0077..63f99e9a821b 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1358,7 +1358,7 @@ skip: | |||
1358 | 1358 | ||
1359 | static bool inetdev_valid_mtu(unsigned int mtu) | 1359 | static bool inetdev_valid_mtu(unsigned int mtu) |
1360 | { | 1360 | { |
1361 | return mtu >= 68; | 1361 | return mtu >= IPV4_MIN_MTU; |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | static void inetdev_send_gratuitous_arp(struct net_device *dev, | 1364 | static void inetdev_send_gratuitous_arp(struct net_device *dev, |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index ee94bd32d6dc..7dc9f0680bf6 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -1253,7 +1253,7 @@ fail: | |||
1253 | 1253 | ||
1254 | static void ip_fib_net_exit(struct net *net) | 1254 | static void ip_fib_net_exit(struct net *net) |
1255 | { | 1255 | { |
1256 | unsigned int i; | 1256 | int i; |
1257 | 1257 | ||
1258 | rtnl_lock(); | 1258 | rtnl_lock(); |
1259 | #ifdef CONFIG_IP_MULTIPLE_TABLES | 1259 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
@@ -1261,7 +1261,12 @@ static void ip_fib_net_exit(struct net *net) | |||
1261 | RCU_INIT_POINTER(net->ipv4.fib_main, NULL); | 1261 | RCU_INIT_POINTER(net->ipv4.fib_main, NULL); |
1262 | RCU_INIT_POINTER(net->ipv4.fib_default, NULL); | 1262 | RCU_INIT_POINTER(net->ipv4.fib_default, NULL); |
1263 | #endif | 1263 | #endif |
1264 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { | 1264 | /* Destroy the tables in reverse order to guarantee that the |
1265 | * local table, ID 255, is destroyed before the main table, ID | ||
1266 | * 254. This is necessary as the local table may contain | ||
1267 | * references to data contained in the main table. | ||
1268 | */ | ||
1269 | for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) { | ||
1265 | struct hlist_head *head = &net->ipv4.fib_table_hash[i]; | 1270 | struct hlist_head *head = &net->ipv4.fib_table_hash[i]; |
1266 | struct hlist_node *tmp; | 1271 | struct hlist_node *tmp; |
1267 | struct fib_table *tb; | 1272 | struct fib_table *tb; |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 3809d523d012..c67efa3e79dd 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -89,6 +89,7 @@ | |||
89 | #include <linux/rtnetlink.h> | 89 | #include <linux/rtnetlink.h> |
90 | #include <linux/times.h> | 90 | #include <linux/times.h> |
91 | #include <linux/pkt_sched.h> | 91 | #include <linux/pkt_sched.h> |
92 | #include <linux/byteorder/generic.h> | ||
92 | 93 | ||
93 | #include <net/net_namespace.h> | 94 | #include <net/net_namespace.h> |
94 | #include <net/arp.h> | 95 | #include <net/arp.h> |
@@ -327,6 +328,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) | |||
327 | return scount; | 328 | return scount; |
328 | } | 329 | } |
329 | 330 | ||
331 | /* source address selection per RFC 3376 section 4.2.13 */ | ||
332 | static __be32 igmpv3_get_srcaddr(struct net_device *dev, | ||
333 | const struct flowi4 *fl4) | ||
334 | { | ||
335 | struct in_device *in_dev = __in_dev_get_rcu(dev); | ||
336 | |||
337 | if (!in_dev) | ||
338 | return htonl(INADDR_ANY); | ||
339 | |||
340 | for_ifa(in_dev) { | ||
341 | if (fl4->saddr == ifa->ifa_local) | ||
342 | return fl4->saddr; | ||
343 | } endfor_ifa(in_dev); | ||
344 | |||
345 | return htonl(INADDR_ANY); | ||
346 | } | ||
347 | |||
330 | static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) | 348 | static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) |
331 | { | 349 | { |
332 | struct sk_buff *skb; | 350 | struct sk_buff *skb; |
@@ -374,7 +392,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) | |||
374 | pip->frag_off = htons(IP_DF); | 392 | pip->frag_off = htons(IP_DF); |
375 | pip->ttl = 1; | 393 | pip->ttl = 1; |
376 | pip->daddr = fl4.daddr; | 394 | pip->daddr = fl4.daddr; |
377 | pip->saddr = fl4.saddr; | 395 | |
396 | rcu_read_lock(); | ||
397 | pip->saddr = igmpv3_get_srcaddr(dev, &fl4); | ||
398 | rcu_read_unlock(); | ||
399 | |||
378 | pip->protocol = IPPROTO_IGMP; | 400 | pip->protocol = IPPROTO_IGMP; |
379 | pip->tot_len = 0; /* filled in later */ | 401 | pip->tot_len = 0; /* filled in later */ |
380 | ip_select_ident(net, skb, NULL); | 402 | ip_select_ident(net, skb, NULL); |
@@ -410,16 +432,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) | |||
410 | } | 432 | } |
411 | 433 | ||
412 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, | 434 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, |
413 | int type, struct igmpv3_grec **ppgr) | 435 | int type, struct igmpv3_grec **ppgr, unsigned int mtu) |
414 | { | 436 | { |
415 | struct net_device *dev = pmc->interface->dev; | 437 | struct net_device *dev = pmc->interface->dev; |
416 | struct igmpv3_report *pih; | 438 | struct igmpv3_report *pih; |
417 | struct igmpv3_grec *pgr; | 439 | struct igmpv3_grec *pgr; |
418 | 440 | ||
419 | if (!skb) | 441 | if (!skb) { |
420 | skb = igmpv3_newpack(dev, dev->mtu); | 442 | skb = igmpv3_newpack(dev, mtu); |
421 | if (!skb) | 443 | if (!skb) |
422 | return NULL; | 444 | return NULL; |
445 | } | ||
423 | pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec)); | 446 | pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec)); |
424 | pgr->grec_type = type; | 447 | pgr->grec_type = type; |
425 | pgr->grec_auxwords = 0; | 448 | pgr->grec_auxwords = 0; |
@@ -441,12 +464,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, | |||
441 | struct igmpv3_grec *pgr = NULL; | 464 | struct igmpv3_grec *pgr = NULL; |
442 | struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; | 465 | struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; |
443 | int scount, stotal, first, isquery, truncate; | 466 | int scount, stotal, first, isquery, truncate; |
467 | unsigned int mtu; | ||
444 | 468 | ||
445 | if (pmc->multiaddr == IGMP_ALL_HOSTS) | 469 | if (pmc->multiaddr == IGMP_ALL_HOSTS) |
446 | return skb; | 470 | return skb; |
447 | if (ipv4_is_local_multicast(pmc->multiaddr) && !sysctl_igmp_llm_reports) | 471 | if (ipv4_is_local_multicast(pmc->multiaddr) && !sysctl_igmp_llm_reports) |
448 | return skb; | 472 | return skb; |
449 | 473 | ||
474 | mtu = READ_ONCE(dev->mtu); | ||
475 | if (mtu < IPV4_MIN_MTU) | ||
476 | return skb; | ||
477 | |||
450 | isquery = type == IGMPV3_MODE_IS_INCLUDE || | 478 | isquery = type == IGMPV3_MODE_IS_INCLUDE || |
451 | type == IGMPV3_MODE_IS_EXCLUDE; | 479 | type == IGMPV3_MODE_IS_EXCLUDE; |
452 | truncate = type == IGMPV3_MODE_IS_EXCLUDE || | 480 | truncate = type == IGMPV3_MODE_IS_EXCLUDE || |
@@ -467,7 +495,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, | |||
467 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { | 495 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { |
468 | if (skb) | 496 | if (skb) |
469 | igmpv3_sendpack(skb); | 497 | igmpv3_sendpack(skb); |
470 | skb = igmpv3_newpack(dev, dev->mtu); | 498 | skb = igmpv3_newpack(dev, mtu); |
471 | } | 499 | } |
472 | } | 500 | } |
473 | first = 1; | 501 | first = 1; |
@@ -494,12 +522,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, | |||
494 | pgr->grec_nsrcs = htons(scount); | 522 | pgr->grec_nsrcs = htons(scount); |
495 | if (skb) | 523 | if (skb) |
496 | igmpv3_sendpack(skb); | 524 | igmpv3_sendpack(skb); |
497 | skb = igmpv3_newpack(dev, dev->mtu); | 525 | skb = igmpv3_newpack(dev, mtu); |
498 | first = 1; | 526 | first = 1; |
499 | scount = 0; | 527 | scount = 0; |
500 | } | 528 | } |
501 | if (first) { | 529 | if (first) { |
502 | skb = add_grhead(skb, pmc, type, &pgr); | 530 | skb = add_grhead(skb, pmc, type, &pgr, mtu); |
503 | first = 0; | 531 | first = 0; |
504 | } | 532 | } |
505 | if (!skb) | 533 | if (!skb) |
@@ -533,7 +561,7 @@ empty_source: | |||
533 | igmpv3_sendpack(skb); | 561 | igmpv3_sendpack(skb); |
534 | skb = NULL; /* add_grhead will get a new one */ | 562 | skb = NULL; /* add_grhead will get a new one */ |
535 | } | 563 | } |
536 | skb = add_grhead(skb, pmc, type, &pgr); | 564 | skb = add_grhead(skb, pmc, type, &pgr, mtu); |
537 | } | 565 | } |
538 | } | 566 | } |
539 | if (pgr) | 567 | if (pgr) |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index e2e162432aa3..7057a1b09b5e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -200,6 +200,7 @@ static void ip_expire(unsigned long arg) | |||
200 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); | 200 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); |
201 | net = container_of(qp->q.net, struct net, ipv4.frags); | 201 | net = container_of(qp->q.net, struct net, ipv4.frags); |
202 | 202 | ||
203 | rcu_read_lock(); | ||
203 | spin_lock(&qp->q.lock); | 204 | spin_lock(&qp->q.lock); |
204 | 205 | ||
205 | if (qp->q.flags & INET_FRAG_COMPLETE) | 206 | if (qp->q.flags & INET_FRAG_COMPLETE) |
@@ -209,7 +210,7 @@ static void ip_expire(unsigned long arg) | |||
209 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); | 210 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
210 | 211 | ||
211 | if (!inet_frag_evicting(&qp->q)) { | 212 | if (!inet_frag_evicting(&qp->q)) { |
212 | struct sk_buff *head = qp->q.fragments; | 213 | struct sk_buff *clone, *head = qp->q.fragments; |
213 | const struct iphdr *iph; | 214 | const struct iphdr *iph; |
214 | int err; | 215 | int err; |
215 | 216 | ||
@@ -218,32 +219,40 @@ static void ip_expire(unsigned long arg) | |||
218 | if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) | 219 | if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) |
219 | goto out; | 220 | goto out; |
220 | 221 | ||
221 | rcu_read_lock(); | ||
222 | head->dev = dev_get_by_index_rcu(net, qp->iif); | 222 | head->dev = dev_get_by_index_rcu(net, qp->iif); |
223 | if (!head->dev) | 223 | if (!head->dev) |
224 | goto out_rcu_unlock; | 224 | goto out; |
225 | |||
225 | 226 | ||
226 | /* skb has no dst, perform route lookup again */ | 227 | /* skb has no dst, perform route lookup again */ |
227 | iph = ip_hdr(head); | 228 | iph = ip_hdr(head); |
228 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, | 229 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, |
229 | iph->tos, head->dev); | 230 | iph->tos, head->dev); |
230 | if (err) | 231 | if (err) |
231 | goto out_rcu_unlock; | 232 | goto out; |
232 | 233 | ||
233 | /* Only an end host needs to send an ICMP | 234 | /* Only an end host needs to send an ICMP |
234 | * "Fragment Reassembly Timeout" message, per RFC792. | 235 | * "Fragment Reassembly Timeout" message, per RFC792. |
235 | */ | 236 | */ |
236 | if (frag_expire_skip_icmp(qp->user) && | 237 | if (frag_expire_skip_icmp(qp->user) && |
237 | (skb_rtable(head)->rt_type != RTN_LOCAL)) | 238 | (skb_rtable(head)->rt_type != RTN_LOCAL)) |
238 | goto out_rcu_unlock; | 239 | goto out; |
240 | |||
241 | clone = skb_clone(head, GFP_ATOMIC); | ||
239 | 242 | ||
240 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | 243 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
241 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | 244 | if (clone) { |
242 | out_rcu_unlock: | 245 | spin_unlock(&qp->q.lock); |
243 | rcu_read_unlock(); | 246 | icmp_send(clone, ICMP_TIME_EXCEEDED, |
247 | ICMP_EXC_FRAGTIME, 0); | ||
248 | consume_skb(clone); | ||
249 | goto out_rcu_unlock; | ||
250 | } | ||
244 | } | 251 | } |
245 | out: | 252 | out: |
246 | spin_unlock(&qp->q.lock); | 253 | spin_unlock(&qp->q.lock); |
254 | out_rcu_unlock: | ||
255 | rcu_read_unlock(); | ||
247 | ipq_put(qp); | 256 | ipq_put(qp); |
248 | } | 257 | } |
249 | 258 | ||
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 3310ac75e3f3..c18245e05d26 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -400,8 +400,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev) | |||
400 | dev->needed_headroom = t_hlen + hlen; | 400 | dev->needed_headroom = t_hlen + hlen; |
401 | mtu -= (dev->hard_header_len + t_hlen); | 401 | mtu -= (dev->hard_header_len + t_hlen); |
402 | 402 | ||
403 | if (mtu < 68) | 403 | if (mtu < IPV4_MIN_MTU) |
404 | mtu = 68; | 404 | mtu = IPV4_MIN_MTU; |
405 | 405 | ||
406 | return mtu; | 406 | return mtu; |
407 | } | 407 | } |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 6e3e0e8b1ce3..4cfcc22f7430 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -367,23 +367,12 @@ static inline bool unconditional(const struct arpt_entry *e) | |||
367 | memcmp(&e->arp, &uncond, sizeof(uncond)) == 0; | 367 | memcmp(&e->arp, &uncond, sizeof(uncond)) == 0; |
368 | } | 368 | } |
369 | 369 | ||
370 | static bool find_jump_target(const struct xt_table_info *t, | ||
371 | const struct arpt_entry *target) | ||
372 | { | ||
373 | struct arpt_entry *iter; | ||
374 | |||
375 | xt_entry_foreach(iter, t->entries, t->size) { | ||
376 | if (iter == target) | ||
377 | return true; | ||
378 | } | ||
379 | return false; | ||
380 | } | ||
381 | |||
382 | /* Figures out from what hook each rule can be called: returns 0 if | 370 | /* Figures out from what hook each rule can be called: returns 0 if |
383 | * there are loops. Puts hook bitmask in comefrom. | 371 | * there are loops. Puts hook bitmask in comefrom. |
384 | */ | 372 | */ |
385 | static int mark_source_chains(const struct xt_table_info *newinfo, | 373 | static int mark_source_chains(const struct xt_table_info *newinfo, |
386 | unsigned int valid_hooks, void *entry0) | 374 | unsigned int valid_hooks, void *entry0, |
375 | unsigned int *offsets) | ||
387 | { | 376 | { |
388 | unsigned int hook; | 377 | unsigned int hook; |
389 | 378 | ||
@@ -472,10 +461,11 @@ static int mark_source_chains(const struct xt_table_info *newinfo, | |||
472 | /* This a jump; chase it. */ | 461 | /* This a jump; chase it. */ |
473 | duprintf("Jump rule %u -> %u\n", | 462 | duprintf("Jump rule %u -> %u\n", |
474 | pos, newpos); | 463 | pos, newpos); |
464 | if (!xt_find_jump_offset(offsets, newpos, | ||
465 | newinfo->number)) | ||
466 | return 0; | ||
475 | e = (struct arpt_entry *) | 467 | e = (struct arpt_entry *) |
476 | (entry0 + newpos); | 468 | (entry0 + newpos); |
477 | if (!find_jump_target(newinfo, e)) | ||
478 | return 0; | ||
479 | } else { | 469 | } else { |
480 | /* ... this is a fallthru */ | 470 | /* ... this is a fallthru */ |
481 | newpos = pos + e->next_offset; | 471 | newpos = pos + e->next_offset; |
@@ -521,11 +511,13 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) | |||
521 | { | 511 | { |
522 | struct xt_entry_target *t; | 512 | struct xt_entry_target *t; |
523 | struct xt_target *target; | 513 | struct xt_target *target; |
514 | unsigned long pcnt; | ||
524 | int ret; | 515 | int ret; |
525 | 516 | ||
526 | e->counters.pcnt = xt_percpu_counter_alloc(); | 517 | pcnt = xt_percpu_counter_alloc(); |
527 | if (IS_ERR_VALUE(e->counters.pcnt)) | 518 | if (IS_ERR_VALUE(pcnt)) |
528 | return -ENOMEM; | 519 | return -ENOMEM; |
520 | e->counters.pcnt = pcnt; | ||
529 | 521 | ||
530 | t = arpt_get_target(e); | 522 | t = arpt_get_target(e); |
531 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, | 523 | target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, |
@@ -642,6 +634,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, | |||
642 | const struct arpt_replace *repl) | 634 | const struct arpt_replace *repl) |
643 | { | 635 | { |
644 | struct arpt_entry *iter; | 636 | struct arpt_entry *iter; |
637 | unsigned int *offsets; | ||
645 | unsigned int i; | 638 | unsigned int i; |
646 | int ret = 0; | 639 | int ret = 0; |
647 | 640 | ||
@@ -655,6 +648,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, | |||
655 | } | 648 | } |
656 | 649 | ||
657 | duprintf("translate_table: size %u\n", newinfo->size); | 650 | duprintf("translate_table: size %u\n", newinfo->size); |
651 | offsets = xt_alloc_entry_offsets(newinfo->number); | ||
652 | if (!offsets) | ||
653 | return -ENOMEM; | ||
658 | i = 0; | 654 | i = 0; |
659 | 655 | ||
660 | /* Walk through entries, checking offsets. */ | 656 | /* Walk through entries, checking offsets. */ |
@@ -665,7 +661,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, | |||
665 | repl->underflow, | 661 | repl->underflow, |
666 | repl->valid_hooks); | 662 | repl->valid_hooks); |
667 | if (ret != 0) | 663 | if (ret != 0) |
668 | break; | 664 | goto out_free; |
665 | if (i < repl->num_entries) | ||
666 | offsets[i] = (void *)iter - entry0; | ||
669 | ++i; | 667 | ++i; |
670 | if (strcmp(arpt_get_target(iter)->u.user.name, | 668 | if (strcmp(arpt_get_target(iter)->u.user.name, |
671 | XT_ERROR_TARGET) == 0) | 669 | XT_ERROR_TARGET) == 0) |
@@ -673,12 +671,13 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, | |||
673 | } | 671 | } |
674 | duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); | 672 | duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); |
675 | if (ret != 0) | 673 | if (ret != 0) |
676 | return ret; | 674 | goto out_free; |
677 | 675 | ||
676 | ret = -EINVAL; | ||
678 | if (i != repl->num_entries) { | 677 | if (i != repl->num_entries) { |
679 | duprintf("translate_table: %u not %u entries\n", | 678 | duprintf("translate_table: %u not %u entries\n", |
680 | i, repl->num_entries); | 679 | i, repl->num_entries); |
681 | return -EINVAL; | 680 | goto out_free; |
682 | } | 681 | } |
683 | 682 | ||
684 | /* Check hooks all assigned */ | 683 | /* Check hooks all assigned */ |
@@ -689,17 +688,20 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, | |||
689 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { | 688 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { |
690 | duprintf("Invalid hook entry %u %u\n", | 689 | duprintf("Invalid hook entry %u %u\n", |
691 | i, repl->hook_entry[i]); | 690 | i, repl->hook_entry[i]); |
692 | return -EINVAL; | 691 | goto out_free; |
693 | } | 692 | } |
694 | if (newinfo->underflow[i] == 0xFFFFFFFF) { | 693 | if (newinfo->underflow[i] == 0xFFFFFFFF) { |
695 | duprintf("Invalid underflow %u %u\n", | 694 | duprintf("Invalid underflow %u %u\n", |
696 | i, repl->underflow[i]); | 695 | i, repl->underflow[i]); |
697 | return -EINVAL; | 696 | goto out_free; |
698 | } | 697 | } |
699 | } | 698 | } |
700 | 699 | ||
701 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) | 700 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) { |
702 | return -ELOOP; | 701 | ret = -ELOOP; |
702 | goto out_free; | ||
703 | } | ||
704 | kvfree(offsets); | ||
703 | 705 | ||
704 | /* Finally, each sanity check must pass */ | 706 | /* Finally, each sanity check must pass */ |
705 | i = 0; | 707 | i = 0; |
@@ -720,6 +722,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, | |||
720 | } | 722 | } |
721 | 723 | ||
722 | return ret; | 724 | return ret; |
725 | out_free: | ||
726 | kvfree(offsets); | ||
727 | return ret; | ||
723 | } | 728 | } |
724 | 729 | ||
725 | static void get_counters(const struct xt_table_info *t, | 730 | static void get_counters(const struct xt_table_info *t, |
@@ -1336,8 +1341,8 @@ static int translate_compat_table(struct xt_table_info **pinfo, | |||
1336 | 1341 | ||
1337 | newinfo->number = compatr->num_entries; | 1342 | newinfo->number = compatr->num_entries; |
1338 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { | 1343 | for (i = 0; i < NF_ARP_NUMHOOKS; i++) { |
1339 | newinfo->hook_entry[i] = info->hook_entry[i]; | 1344 | newinfo->hook_entry[i] = compatr->hook_entry[i]; |
1340 | newinfo->underflow[i] = info->underflow[i]; | 1345 | newinfo->underflow[i] = compatr->underflow[i]; |
1341 | } | 1346 | } |
1342 | entry1 = newinfo->entries; | 1347 | entry1 = newinfo->entries; |
1343 | pos = entry1; | 1348 | pos = entry1; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index a399c5419622..a98173d1ea97 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -443,23 +443,12 @@ ipt_do_table(struct sk_buff *skb, | |||
443 | #endif | 443 | #endif |
444 | } | 444 | } |
445 | 445 | ||
446 | static bool find_jump_target(const struct xt_table_info *t, | ||
447 | const struct ipt_entry *target) | ||
448 | { | ||
449 | struct ipt_entry *iter; | ||
450 | |||
451 | xt_entry_foreach(iter, t->entries, t->size) { | ||
452 | if (iter == target) | ||
453 | return true; | ||
454 | } | ||
455 | return false; | ||
456 | } | ||
457 | |||
458 | /* Figures out from what hook each rule can be called: returns 0 if | 446 | /* Figures out from what hook each rule can be called: returns 0 if |
459 | there are loops. Puts hook bitmask in comefrom. */ | 447 | there are loops. Puts hook bitmask in comefrom. */ |
460 | static int | 448 | static int |
461 | mark_source_chains(const struct xt_table_info *newinfo, | 449 | mark_source_chains(const struct xt_table_info *newinfo, |
462 | unsigned int valid_hooks, void *entry0) | 450 | unsigned int valid_hooks, void *entry0, |
451 | unsigned int *offsets) | ||
463 | { | 452 | { |
464 | unsigned int hook; | 453 | unsigned int hook; |
465 | 454 | ||
@@ -552,10 +541,11 @@ mark_source_chains(const struct xt_table_info *newinfo, | |||
552 | /* This a jump; chase it. */ | 541 | /* This a jump; chase it. */ |
553 | duprintf("Jump rule %u -> %u\n", | 542 | duprintf("Jump rule %u -> %u\n", |
554 | pos, newpos); | 543 | pos, newpos); |
544 | if (!xt_find_jump_offset(offsets, newpos, | ||
545 | newinfo->number)) | ||
546 | return 0; | ||
555 | e = (struct ipt_entry *) | 547 | e = (struct ipt_entry *) |
556 | (entry0 + newpos); | 548 | (entry0 + newpos); |
557 | if (!find_jump_target(newinfo, e)) | ||
558 | return 0; | ||
559 | } else { | 549 | } else { |
560 | /* ... this is a fallthru */ | 550 | /* ... this is a fallthru */ |
561 | newpos = pos + e->next_offset; | 551 | newpos = pos + e->next_offset; |
@@ -663,10 +653,12 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, | |||
663 | unsigned int j; | 653 | unsigned int j; |
664 | struct xt_mtchk_param mtpar; | 654 | struct xt_mtchk_param mtpar; |
665 | struct xt_entry_match *ematch; | 655 | struct xt_entry_match *ematch; |
656 | unsigned long pcnt; | ||
666 | 657 | ||
667 | e->counters.pcnt = xt_percpu_counter_alloc(); | 658 | pcnt = xt_percpu_counter_alloc(); |
668 | if (IS_ERR_VALUE(e->counters.pcnt)) | 659 | if (IS_ERR_VALUE(pcnt)) |
669 | return -ENOMEM; | 660 | return -ENOMEM; |
661 | e->counters.pcnt = pcnt; | ||
670 | 662 | ||
671 | j = 0; | 663 | j = 0; |
672 | mtpar.net = net; | 664 | mtpar.net = net; |
@@ -811,6 +803,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
811 | const struct ipt_replace *repl) | 803 | const struct ipt_replace *repl) |
812 | { | 804 | { |
813 | struct ipt_entry *iter; | 805 | struct ipt_entry *iter; |
806 | unsigned int *offsets; | ||
814 | unsigned int i; | 807 | unsigned int i; |
815 | int ret = 0; | 808 | int ret = 0; |
816 | 809 | ||
@@ -824,6 +817,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
824 | } | 817 | } |
825 | 818 | ||
826 | duprintf("translate_table: size %u\n", newinfo->size); | 819 | duprintf("translate_table: size %u\n", newinfo->size); |
820 | offsets = xt_alloc_entry_offsets(newinfo->number); | ||
821 | if (!offsets) | ||
822 | return -ENOMEM; | ||
827 | i = 0; | 823 | i = 0; |
828 | /* Walk through entries, checking offsets. */ | 824 | /* Walk through entries, checking offsets. */ |
829 | xt_entry_foreach(iter, entry0, newinfo->size) { | 825 | xt_entry_foreach(iter, entry0, newinfo->size) { |
@@ -833,17 +829,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
833 | repl->underflow, | 829 | repl->underflow, |
834 | repl->valid_hooks); | 830 | repl->valid_hooks); |
835 | if (ret != 0) | 831 | if (ret != 0) |
836 | return ret; | 832 | goto out_free; |
833 | if (i < repl->num_entries) | ||
834 | offsets[i] = (void *)iter - entry0; | ||
837 | ++i; | 835 | ++i; |
838 | if (strcmp(ipt_get_target(iter)->u.user.name, | 836 | if (strcmp(ipt_get_target(iter)->u.user.name, |
839 | XT_ERROR_TARGET) == 0) | 837 | XT_ERROR_TARGET) == 0) |
840 | ++newinfo->stacksize; | 838 | ++newinfo->stacksize; |
841 | } | 839 | } |
842 | 840 | ||
841 | ret = -EINVAL; | ||
843 | if (i != repl->num_entries) { | 842 | if (i != repl->num_entries) { |
844 | duprintf("translate_table: %u not %u entries\n", | 843 | duprintf("translate_table: %u not %u entries\n", |
845 | i, repl->num_entries); | 844 | i, repl->num_entries); |
846 | return -EINVAL; | 845 | goto out_free; |
847 | } | 846 | } |
848 | 847 | ||
849 | /* Check hooks all assigned */ | 848 | /* Check hooks all assigned */ |
@@ -854,17 +853,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
854 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { | 853 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { |
855 | duprintf("Invalid hook entry %u %u\n", | 854 | duprintf("Invalid hook entry %u %u\n", |
856 | i, repl->hook_entry[i]); | 855 | i, repl->hook_entry[i]); |
857 | return -EINVAL; | 856 | goto out_free; |
858 | } | 857 | } |
859 | if (newinfo->underflow[i] == 0xFFFFFFFF) { | 858 | if (newinfo->underflow[i] == 0xFFFFFFFF) { |
860 | duprintf("Invalid underflow %u %u\n", | 859 | duprintf("Invalid underflow %u %u\n", |
861 | i, repl->underflow[i]); | 860 | i, repl->underflow[i]); |
862 | return -EINVAL; | 861 | goto out_free; |
863 | } | 862 | } |
864 | } | 863 | } |
865 | 864 | ||
866 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) | 865 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) { |
867 | return -ELOOP; | 866 | ret = -ELOOP; |
867 | goto out_free; | ||
868 | } | ||
869 | kvfree(offsets); | ||
868 | 870 | ||
869 | /* Finally, each sanity check must pass */ | 871 | /* Finally, each sanity check must pass */ |
870 | i = 0; | 872 | i = 0; |
@@ -885,6 +887,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
885 | } | 887 | } |
886 | 888 | ||
887 | return ret; | 889 | return ret; |
890 | out_free: | ||
891 | kvfree(offsets); | ||
892 | return ret; | ||
888 | } | 893 | } |
889 | 894 | ||
890 | static void | 895 | static void |
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 2689c9c4f1a0..182eb878633d 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = { | |||
1260 | .timeout = 180, | 1260 | .timeout = 180, |
1261 | }; | 1261 | }; |
1262 | 1262 | ||
1263 | static struct nf_conntrack_helper snmp_helper __read_mostly = { | ||
1264 | .me = THIS_MODULE, | ||
1265 | .help = help, | ||
1266 | .expect_policy = &snmp_exp_policy, | ||
1267 | .name = "snmp", | ||
1268 | .tuple.src.l3num = AF_INET, | ||
1269 | .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT), | ||
1270 | .tuple.dst.protonum = IPPROTO_UDP, | ||
1271 | }; | ||
1272 | |||
1273 | static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { | 1263 | static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { |
1274 | .me = THIS_MODULE, | 1264 | .me = THIS_MODULE, |
1275 | .help = help, | 1265 | .help = help, |
@@ -1288,17 +1278,10 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { | |||
1288 | 1278 | ||
1289 | static int __init nf_nat_snmp_basic_init(void) | 1279 | static int __init nf_nat_snmp_basic_init(void) |
1290 | { | 1280 | { |
1291 | int ret = 0; | ||
1292 | |||
1293 | BUG_ON(nf_nat_snmp_hook != NULL); | 1281 | BUG_ON(nf_nat_snmp_hook != NULL); |
1294 | RCU_INIT_POINTER(nf_nat_snmp_hook, help); | 1282 | RCU_INIT_POINTER(nf_nat_snmp_hook, help); |
1295 | 1283 | ||
1296 | ret = nf_conntrack_helper_register(&snmp_trap_helper); | 1284 | return nf_conntrack_helper_register(&snmp_trap_helper); |
1297 | if (ret < 0) { | ||
1298 | nf_conntrack_helper_unregister(&snmp_helper); | ||
1299 | return ret; | ||
1300 | } | ||
1301 | return ret; | ||
1302 | } | 1285 | } |
1303 | 1286 | ||
1304 | static void __exit nf_nat_snmp_basic_fini(void) | 1287 | static void __exit nf_nat_snmp_basic_fini(void) |
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index c747b2d9eb77..d4acf38b60fd 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c | |||
@@ -124,6 +124,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) | |||
124 | /* ip_route_me_harder expects skb->dst to be set */ | 124 | /* ip_route_me_harder expects skb->dst to be set */ |
125 | skb_dst_set_noref(nskb, skb_dst(oldskb)); | 125 | skb_dst_set_noref(nskb, skb_dst(oldskb)); |
126 | 126 | ||
127 | nskb->mark = IP4_REPLY_MARK(net, oldskb->mark); | ||
128 | |||
127 | skb_reserve(nskb, LL_MAX_HEADER); | 129 | skb_reserve(nskb, LL_MAX_HEADER); |
128 | niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, | 130 | niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, |
129 | ip4_dst_hoplimit(skb_dst(nskb))); | 131 | ip4_dst_hoplimit(skb_dst(nskb))); |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ca1031411aa7..7541427537d0 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -500,11 +500,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
500 | int err; | 500 | int err; |
501 | struct ip_options_data opt_copy; | 501 | struct ip_options_data opt_copy; |
502 | struct raw_frag_vec rfv; | 502 | struct raw_frag_vec rfv; |
503 | int hdrincl; | ||
503 | 504 | ||
504 | err = -EMSGSIZE; | 505 | err = -EMSGSIZE; |
505 | if (len > 0xFFFF) | 506 | if (len > 0xFFFF) |
506 | goto out; | 507 | goto out; |
507 | 508 | ||
509 | /* hdrincl should be READ_ONCE(inet->hdrincl) | ||
510 | * but READ_ONCE() doesn't work with bit fields | ||
511 | */ | ||
512 | hdrincl = inet->hdrincl; | ||
508 | /* | 513 | /* |
509 | * Check the flags. | 514 | * Check the flags. |
510 | */ | 515 | */ |
@@ -579,7 +584,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
579 | /* Linux does not mangle headers on raw sockets, | 584 | /* Linux does not mangle headers on raw sockets, |
580 | * so that IP options + IP_HDRINCL is non-sense. | 585 | * so that IP options + IP_HDRINCL is non-sense. |
581 | */ | 586 | */ |
582 | if (inet->hdrincl) | 587 | if (hdrincl) |
583 | goto done; | 588 | goto done; |
584 | if (ipc.opt->opt.srr) { | 589 | if (ipc.opt->opt.srr) { |
585 | if (!daddr) | 590 | if (!daddr) |
@@ -601,9 +606,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
601 | 606 | ||
602 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, | 607 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, |
603 | RT_SCOPE_UNIVERSE, | 608 | RT_SCOPE_UNIVERSE, |
604 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, | 609 | hdrincl ? IPPROTO_RAW : sk->sk_protocol, |
605 | inet_sk_flowi_flags(sk) | | 610 | inet_sk_flowi_flags(sk) | |
606 | (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), | 611 | (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), |
607 | daddr, saddr, 0, 0, sk->sk_uid); | 612 | daddr, saddr, 0, 0, sk->sk_uid); |
608 | 613 | ||
609 | if (!saddr && ipc.oif) { | 614 | if (!saddr && ipc.oif) { |
@@ -612,7 +617,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
612 | goto done; | 617 | goto done; |
613 | } | 618 | } |
614 | 619 | ||
615 | if (!inet->hdrincl) { | 620 | if (!hdrincl) { |
616 | rfv.msg = msg; | 621 | rfv.msg = msg; |
617 | rfv.hlen = 0; | 622 | rfv.hlen = 0; |
618 | 623 | ||
@@ -637,7 +642,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
637 | goto do_confirm; | 642 | goto do_confirm; |
638 | back_from_confirm: | 643 | back_from_confirm: |
639 | 644 | ||
640 | if (inet->hdrincl) | 645 | if (hdrincl) |
641 | err = raw_send_hdrinc(sk, &fl4, msg, len, | 646 | err = raw_send_hdrinc(sk, &fl4, msg, len, |
642 | &rt, msg->msg_flags); | 647 | &rt, msg->msg_flags); |
643 | 648 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 34b5bf9e7406..b7f089b79b42 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2176,6 +2176,9 @@ adjudge_to_death: | |||
2176 | tcp_send_active_reset(sk, GFP_ATOMIC); | 2176 | tcp_send_active_reset(sk, GFP_ATOMIC); |
2177 | NET_INC_STATS_BH(sock_net(sk), | 2177 | NET_INC_STATS_BH(sock_net(sk), |
2178 | LINUX_MIB_TCPABORTONMEMORY); | 2178 | LINUX_MIB_TCPABORTONMEMORY); |
2179 | } else if (!check_net(sock_net(sk))) { | ||
2180 | /* Not possible to send reset; just close */ | ||
2181 | tcp_set_state(sk, TCP_CLOSE); | ||
2179 | } | 2182 | } |
2180 | } | 2183 | } |
2181 | 2184 | ||
@@ -2273,6 +2276,12 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
2273 | 2276 | ||
2274 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); | 2277 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); |
2275 | 2278 | ||
2279 | if (sk->sk_frag.page) { | ||
2280 | put_page(sk->sk_frag.page); | ||
2281 | sk->sk_frag.page = NULL; | ||
2282 | sk->sk_frag.offset = 0; | ||
2283 | } | ||
2284 | |||
2276 | sk->sk_error_report(sk); | 2285 | sk->sk_error_report(sk); |
2277 | return err; | 2286 | return err; |
2278 | } | 2287 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4e656fab1718..0100738666f4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -817,7 +817,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, | |||
817 | tcp_time_stamp, | 817 | tcp_time_stamp, |
818 | req->ts_recent, | 818 | req->ts_recent, |
819 | 0, | 819 | 0, |
820 | tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, | 820 | tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr, |
821 | AF_INET), | 821 | AF_INET), |
822 | inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, | 822 | inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, |
823 | ip_hdr(skb)->tos); | 823 | ip_hdr(skb)->tos); |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 1ec12a4f327e..35f638cfc675 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -46,11 +46,19 @@ static void tcp_write_err(struct sock *sk) | |||
46 | * to prevent DoS attacks. It is called when a retransmission timeout | 46 | * to prevent DoS attacks. It is called when a retransmission timeout |
47 | * or zero probe timeout occurs on orphaned socket. | 47 | * or zero probe timeout occurs on orphaned socket. |
48 | * | 48 | * |
49 | * Also close if our net namespace is exiting; in that case there is no | ||
50 | * hope of ever communicating again since all netns interfaces are already | ||
51 | * down (or about to be down), and we need to release our dst references, | ||
52 | * which have been moved to the netns loopback interface, so the namespace | ||
53 | * can finish exiting. This condition is only possible if we are a kernel | ||
54 | * socket, as those do not hold references to the namespace. | ||
55 | * | ||
49 | * Criteria is still not confirmed experimentally and may change. | 56 | * Criteria is still not confirmed experimentally and may change. |
50 | * We kill the socket, if: | 57 | * We kill the socket, if: |
51 | * 1. If number of orphaned sockets exceeds an administratively configured | 58 | * 1. If number of orphaned sockets exceeds an administratively configured |
52 | * limit. | 59 | * limit. |
53 | * 2. If we have strong memory pressure. | 60 | * 2. If we have strong memory pressure. |
61 | * 3. If our net namespace is exiting. | ||
54 | */ | 62 | */ |
55 | static int tcp_out_of_resources(struct sock *sk, bool do_reset) | 63 | static int tcp_out_of_resources(struct sock *sk, bool do_reset) |
56 | { | 64 | { |
@@ -79,6 +87,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) | |||
79 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); | 87 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); |
80 | return 1; | 88 | return 1; |
81 | } | 89 | } |
90 | |||
91 | if (!check_net(sock_net(sk))) { | ||
92 | /* Not possible to send reset; just close */ | ||
93 | tcp_done(sk); | ||
94 | return 1; | ||
95 | } | ||
96 | |||
82 | return 0; | 97 | return 0; |
83 | } | 98 | } |
84 | 99 | ||
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 13951c4087d4..b9fac0522be6 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
@@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); | |||
158 | 158 | ||
159 | static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) | 159 | static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) |
160 | { | 160 | { |
161 | return min(tp->snd_ssthresh, tp->snd_cwnd-1); | 161 | return min(tp->snd_ssthresh, tp->snd_cwnd); |
162 | } | 162 | } |
163 | 163 | ||
164 | static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) | 164 | static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 7ad3f4a52b67..e1fe8d227ef1 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -216,7 +216,6 @@ lookup_protocol: | |||
216 | np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; | 216 | np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; |
217 | np->mc_loop = 1; | 217 | np->mc_loop = 1; |
218 | np->pmtudisc = IPV6_PMTUDISC_WANT; | 218 | np->pmtudisc = IPV6_PMTUDISC_WANT; |
219 | np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk)); | ||
220 | sk->sk_ipv6only = net->ipv6.sysctl.bindv6only; | 219 | sk->sk_ipv6only = net->ipv6.sysctl.bindv6only; |
221 | 220 | ||
222 | /* Init the ipv4 part of the socket since we can have sockets | 221 | /* Init the ipv4 part of the socket since we can have sockets |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 71624cf26832..3ef81c387923 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -148,6 +148,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
148 | !(IP6CB(skb)->flags & IP6SKB_REROUTED)); | 148 | !(IP6CB(skb)->flags & IP6SKB_REROUTED)); |
149 | } | 149 | } |
150 | 150 | ||
151 | bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np) | ||
152 | { | ||
153 | if (!np->autoflowlabel_set) | ||
154 | return ip6_default_np_autolabel(net); | ||
155 | else | ||
156 | return np->autoflowlabel; | ||
157 | } | ||
158 | |||
151 | /* | 159 | /* |
152 | * xmit an sk_buff (used by TCP, SCTP and DCCP) | 160 | * xmit an sk_buff (used by TCP, SCTP and DCCP) |
153 | * Note : socket lock is not held for SYNACK packets, but might be modified | 161 | * Note : socket lock is not held for SYNACK packets, but might be modified |
@@ -211,7 +219,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
211 | hlimit = ip6_dst_hoplimit(dst); | 219 | hlimit = ip6_dst_hoplimit(dst); |
212 | 220 | ||
213 | ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel, | 221 | ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel, |
214 | np->autoflowlabel, fl6)); | 222 | ip6_autoflowlabel(net, np), fl6)); |
215 | 223 | ||
216 | hdr->payload_len = htons(seg_len); | 224 | hdr->payload_len = htons(seg_len); |
217 | hdr->nexthdr = proto; | 225 | hdr->nexthdr = proto; |
@@ -1238,14 +1246,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, | |||
1238 | v6_cork->tclass = tclass; | 1246 | v6_cork->tclass = tclass; |
1239 | if (rt->dst.flags & DST_XFRM_TUNNEL) | 1247 | if (rt->dst.flags & DST_XFRM_TUNNEL) |
1240 | mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? | 1248 | mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? |
1241 | rt->dst.dev->mtu : dst_mtu(&rt->dst); | 1249 | READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst); |
1242 | else | 1250 | else |
1243 | mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? | 1251 | mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? |
1244 | rt->dst.dev->mtu : dst_mtu(rt->dst.path); | 1252 | READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path); |
1245 | if (np->frag_size < mtu) { | 1253 | if (np->frag_size < mtu) { |
1246 | if (np->frag_size) | 1254 | if (np->frag_size) |
1247 | mtu = np->frag_size; | 1255 | mtu = np->frag_size; |
1248 | } | 1256 | } |
1257 | if (mtu < IPV6_MIN_MTU) | ||
1258 | return -EINVAL; | ||
1249 | cork->base.fragsize = mtu; | 1259 | cork->base.fragsize = mtu; |
1250 | if (dst_allfrag(rt->dst.path)) | 1260 | if (dst_allfrag(rt->dst.path)) |
1251 | cork->base.flags |= IPCORK_ALLFRAG; | 1261 | cork->base.flags |= IPCORK_ALLFRAG; |
@@ -1675,7 +1685,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, | |||
1675 | 1685 | ||
1676 | ip6_flow_hdr(hdr, v6_cork->tclass, | 1686 | ip6_flow_hdr(hdr, v6_cork->tclass, |
1677 | ip6_make_flowlabel(net, skb, fl6->flowlabel, | 1687 | ip6_make_flowlabel(net, skb, fl6->flowlabel, |
1678 | np->autoflowlabel, fl6)); | 1688 | ip6_autoflowlabel(net, np), fl6)); |
1679 | hdr->hop_limit = v6_cork->hop_limit; | 1689 | hdr->hop_limit = v6_cork->hop_limit; |
1680 | hdr->nexthdr = proto; | 1690 | hdr->nexthdr = proto; |
1681 | hdr->saddr = fl6->saddr; | 1691 | hdr->saddr = fl6->saddr; |
@@ -1775,10 +1785,13 @@ struct sk_buff *ip6_make_skb(struct sock *sk, | |||
1775 | cork.base.flags = 0; | 1785 | cork.base.flags = 0; |
1776 | cork.base.addr = 0; | 1786 | cork.base.addr = 0; |
1777 | cork.base.opt = NULL; | 1787 | cork.base.opt = NULL; |
1788 | cork.base.dst = NULL; | ||
1778 | v6_cork.opt = NULL; | 1789 | v6_cork.opt = NULL; |
1779 | err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6); | 1790 | err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6); |
1780 | if (err) | 1791 | if (err) { |
1792 | ip6_cork_release(&cork, &v6_cork); | ||
1781 | return ERR_PTR(err); | 1793 | return ERR_PTR(err); |
1794 | } | ||
1782 | 1795 | ||
1783 | if (dontfrag < 0) | 1796 | if (dontfrag < 0) |
1784 | dontfrag = inet6_sk(sk)->dontfrag; | 1797 | dontfrag = inet6_sk(sk)->dontfrag; |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 600975c5eacf..cbea14e09bc5 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1083,10 +1083,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1083 | memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); | 1083 | memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); |
1084 | neigh_release(neigh); | 1084 | neigh_release(neigh); |
1085 | } | 1085 | } |
1086 | } else if (!(t->parms.flags & | 1086 | } else if (t->parms.proto != 0 && !(t->parms.flags & |
1087 | (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { | 1087 | (IP6_TNL_F_USE_ORIG_TCLASS | |
1088 | /* enable the cache only only if the routing decision does | 1088 | IP6_TNL_F_USE_ORIG_FWMARK))) { |
1089 | * not depend on the current inner header value | 1089 | /* enable the cache only if neither the outer protocol nor the |
1090 | * routing decision depends on the current inner header value | ||
1090 | */ | 1091 | */ |
1091 | use_cache = true; | 1092 | use_cache = true; |
1092 | } | 1093 | } |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 8361d73ab653..e5846d1f9b55 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -495,6 +495,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) | |||
495 | return ERR_PTR(-ENOENT); | 495 | return ERR_PTR(-ENOENT); |
496 | 496 | ||
497 | it->mrt = mrt; | 497 | it->mrt = mrt; |
498 | it->cache = NULL; | ||
498 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) | 499 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) |
499 | : SEQ_START_TOKEN; | 500 | : SEQ_START_TOKEN; |
500 | } | 501 | } |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index a4a30d2ca66f..9011176c8387 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -872,6 +872,7 @@ pref_skip_coa: | |||
872 | break; | 872 | break; |
873 | case IPV6_AUTOFLOWLABEL: | 873 | case IPV6_AUTOFLOWLABEL: |
874 | np->autoflowlabel = valbool; | 874 | np->autoflowlabel = valbool; |
875 | np->autoflowlabel_set = 1; | ||
875 | retv = 0; | 876 | retv = 0; |
876 | break; | 877 | break; |
877 | } | 878 | } |
@@ -1312,7 +1313,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1312 | break; | 1313 | break; |
1313 | 1314 | ||
1314 | case IPV6_AUTOFLOWLABEL: | 1315 | case IPV6_AUTOFLOWLABEL: |
1315 | val = np->autoflowlabel; | 1316 | val = ip6_autoflowlabel(sock_net(sk), np); |
1316 | break; | 1317 | break; |
1317 | 1318 | ||
1318 | default: | 1319 | default: |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index d64ee7e83664..06640685ff43 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1668,16 +1668,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) | |||
1668 | } | 1668 | } |
1669 | 1669 | ||
1670 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, | 1670 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, |
1671 | int type, struct mld2_grec **ppgr) | 1671 | int type, struct mld2_grec **ppgr, unsigned int mtu) |
1672 | { | 1672 | { |
1673 | struct net_device *dev = pmc->idev->dev; | ||
1674 | struct mld2_report *pmr; | 1673 | struct mld2_report *pmr; |
1675 | struct mld2_grec *pgr; | 1674 | struct mld2_grec *pgr; |
1676 | 1675 | ||
1677 | if (!skb) | 1676 | if (!skb) { |
1678 | skb = mld_newpack(pmc->idev, dev->mtu); | 1677 | skb = mld_newpack(pmc->idev, mtu); |
1679 | if (!skb) | 1678 | if (!skb) |
1680 | return NULL; | 1679 | return NULL; |
1680 | } | ||
1681 | pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec)); | 1681 | pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec)); |
1682 | pgr->grec_type = type; | 1682 | pgr->grec_type = type; |
1683 | pgr->grec_auxwords = 0; | 1683 | pgr->grec_auxwords = 0; |
@@ -1700,10 +1700,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
1700 | struct mld2_grec *pgr = NULL; | 1700 | struct mld2_grec *pgr = NULL; |
1701 | struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; | 1701 | struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; |
1702 | int scount, stotal, first, isquery, truncate; | 1702 | int scount, stotal, first, isquery, truncate; |
1703 | unsigned int mtu; | ||
1703 | 1704 | ||
1704 | if (pmc->mca_flags & MAF_NOREPORT) | 1705 | if (pmc->mca_flags & MAF_NOREPORT) |
1705 | return skb; | 1706 | return skb; |
1706 | 1707 | ||
1708 | mtu = READ_ONCE(dev->mtu); | ||
1709 | if (mtu < IPV6_MIN_MTU) | ||
1710 | return skb; | ||
1711 | |||
1707 | isquery = type == MLD2_MODE_IS_INCLUDE || | 1712 | isquery = type == MLD2_MODE_IS_INCLUDE || |
1708 | type == MLD2_MODE_IS_EXCLUDE; | 1713 | type == MLD2_MODE_IS_EXCLUDE; |
1709 | truncate = type == MLD2_MODE_IS_EXCLUDE || | 1714 | truncate = type == MLD2_MODE_IS_EXCLUDE || |
@@ -1724,7 +1729,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
1724 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { | 1729 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { |
1725 | if (skb) | 1730 | if (skb) |
1726 | mld_sendpack(skb); | 1731 | mld_sendpack(skb); |
1727 | skb = mld_newpack(idev, dev->mtu); | 1732 | skb = mld_newpack(idev, mtu); |
1728 | } | 1733 | } |
1729 | } | 1734 | } |
1730 | first = 1; | 1735 | first = 1; |
@@ -1751,12 +1756,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
1751 | pgr->grec_nsrcs = htons(scount); | 1756 | pgr->grec_nsrcs = htons(scount); |
1752 | if (skb) | 1757 | if (skb) |
1753 | mld_sendpack(skb); | 1758 | mld_sendpack(skb); |
1754 | skb = mld_newpack(idev, dev->mtu); | 1759 | skb = mld_newpack(idev, mtu); |
1755 | first = 1; | 1760 | first = 1; |
1756 | scount = 0; | 1761 | scount = 0; |
1757 | } | 1762 | } |
1758 | if (first) { | 1763 | if (first) { |
1759 | skb = add_grhead(skb, pmc, type, &pgr); | 1764 | skb = add_grhead(skb, pmc, type, &pgr, mtu); |
1760 | first = 0; | 1765 | first = 0; |
1761 | } | 1766 | } |
1762 | if (!skb) | 1767 | if (!skb) |
@@ -1790,7 +1795,7 @@ empty_source: | |||
1790 | mld_sendpack(skb); | 1795 | mld_sendpack(skb); |
1791 | skb = NULL; /* add_grhead will get a new one */ | 1796 | skb = NULL; /* add_grhead will get a new one */ |
1792 | } | 1797 | } |
1793 | skb = add_grhead(skb, pmc, type, &pgr); | 1798 | skb = add_grhead(skb, pmc, type, &pgr, mtu); |
1794 | } | 1799 | } |
1795 | } | 1800 | } |
1796 | if (pgr) | 1801 | if (pgr) |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 22f39e00bef3..bb1b5453a7a1 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -455,23 +455,12 @@ ip6t_do_table(struct sk_buff *skb, | |||
455 | #endif | 455 | #endif |
456 | } | 456 | } |
457 | 457 | ||
458 | static bool find_jump_target(const struct xt_table_info *t, | ||
459 | const struct ip6t_entry *target) | ||
460 | { | ||
461 | struct ip6t_entry *iter; | ||
462 | |||
463 | xt_entry_foreach(iter, t->entries, t->size) { | ||
464 | if (iter == target) | ||
465 | return true; | ||
466 | } | ||
467 | return false; | ||
468 | } | ||
469 | |||
470 | /* Figures out from what hook each rule can be called: returns 0 if | 458 | /* Figures out from what hook each rule can be called: returns 0 if |
471 | there are loops. Puts hook bitmask in comefrom. */ | 459 | there are loops. Puts hook bitmask in comefrom. */ |
472 | static int | 460 | static int |
473 | mark_source_chains(const struct xt_table_info *newinfo, | 461 | mark_source_chains(const struct xt_table_info *newinfo, |
474 | unsigned int valid_hooks, void *entry0) | 462 | unsigned int valid_hooks, void *entry0, |
463 | unsigned int *offsets) | ||
475 | { | 464 | { |
476 | unsigned int hook; | 465 | unsigned int hook; |
477 | 466 | ||
@@ -564,10 +553,11 @@ mark_source_chains(const struct xt_table_info *newinfo, | |||
564 | /* This a jump; chase it. */ | 553 | /* This a jump; chase it. */ |
565 | duprintf("Jump rule %u -> %u\n", | 554 | duprintf("Jump rule %u -> %u\n", |
566 | pos, newpos); | 555 | pos, newpos); |
556 | if (!xt_find_jump_offset(offsets, newpos, | ||
557 | newinfo->number)) | ||
558 | return 0; | ||
567 | e = (struct ip6t_entry *) | 559 | e = (struct ip6t_entry *) |
568 | (entry0 + newpos); | 560 | (entry0 + newpos); |
569 | if (!find_jump_target(newinfo, e)) | ||
570 | return 0; | ||
571 | } else { | 561 | } else { |
572 | /* ... this is a fallthru */ | 562 | /* ... this is a fallthru */ |
573 | newpos = pos + e->next_offset; | 563 | newpos = pos + e->next_offset; |
@@ -676,10 +666,12 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, | |||
676 | unsigned int j; | 666 | unsigned int j; |
677 | struct xt_mtchk_param mtpar; | 667 | struct xt_mtchk_param mtpar; |
678 | struct xt_entry_match *ematch; | 668 | struct xt_entry_match *ematch; |
669 | unsigned long pcnt; | ||
679 | 670 | ||
680 | e->counters.pcnt = xt_percpu_counter_alloc(); | 671 | pcnt = xt_percpu_counter_alloc(); |
681 | if (IS_ERR_VALUE(e->counters.pcnt)) | 672 | if (IS_ERR_VALUE(pcnt)) |
682 | return -ENOMEM; | 673 | return -ENOMEM; |
674 | e->counters.pcnt = pcnt; | ||
683 | 675 | ||
684 | j = 0; | 676 | j = 0; |
685 | mtpar.net = net; | 677 | mtpar.net = net; |
@@ -823,6 +815,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
823 | const struct ip6t_replace *repl) | 815 | const struct ip6t_replace *repl) |
824 | { | 816 | { |
825 | struct ip6t_entry *iter; | 817 | struct ip6t_entry *iter; |
818 | unsigned int *offsets; | ||
826 | unsigned int i; | 819 | unsigned int i; |
827 | int ret = 0; | 820 | int ret = 0; |
828 | 821 | ||
@@ -836,6 +829,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
836 | } | 829 | } |
837 | 830 | ||
838 | duprintf("translate_table: size %u\n", newinfo->size); | 831 | duprintf("translate_table: size %u\n", newinfo->size); |
832 | offsets = xt_alloc_entry_offsets(newinfo->number); | ||
833 | if (!offsets) | ||
834 | return -ENOMEM; | ||
839 | i = 0; | 835 | i = 0; |
840 | /* Walk through entries, checking offsets. */ | 836 | /* Walk through entries, checking offsets. */ |
841 | xt_entry_foreach(iter, entry0, newinfo->size) { | 837 | xt_entry_foreach(iter, entry0, newinfo->size) { |
@@ -845,17 +841,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
845 | repl->underflow, | 841 | repl->underflow, |
846 | repl->valid_hooks); | 842 | repl->valid_hooks); |
847 | if (ret != 0) | 843 | if (ret != 0) |
848 | return ret; | 844 | goto out_free; |
845 | if (i < repl->num_entries) | ||
846 | offsets[i] = (void *)iter - entry0; | ||
849 | ++i; | 847 | ++i; |
850 | if (strcmp(ip6t_get_target(iter)->u.user.name, | 848 | if (strcmp(ip6t_get_target(iter)->u.user.name, |
851 | XT_ERROR_TARGET) == 0) | 849 | XT_ERROR_TARGET) == 0) |
852 | ++newinfo->stacksize; | 850 | ++newinfo->stacksize; |
853 | } | 851 | } |
854 | 852 | ||
853 | ret = -EINVAL; | ||
855 | if (i != repl->num_entries) { | 854 | if (i != repl->num_entries) { |
856 | duprintf("translate_table: %u not %u entries\n", | 855 | duprintf("translate_table: %u not %u entries\n", |
857 | i, repl->num_entries); | 856 | i, repl->num_entries); |
858 | return -EINVAL; | 857 | goto out_free; |
859 | } | 858 | } |
860 | 859 | ||
861 | /* Check hooks all assigned */ | 860 | /* Check hooks all assigned */ |
@@ -866,17 +865,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
866 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { | 865 | if (newinfo->hook_entry[i] == 0xFFFFFFFF) { |
867 | duprintf("Invalid hook entry %u %u\n", | 866 | duprintf("Invalid hook entry %u %u\n", |
868 | i, repl->hook_entry[i]); | 867 | i, repl->hook_entry[i]); |
869 | return -EINVAL; | 868 | goto out_free; |
870 | } | 869 | } |
871 | if (newinfo->underflow[i] == 0xFFFFFFFF) { | 870 | if (newinfo->underflow[i] == 0xFFFFFFFF) { |
872 | duprintf("Invalid underflow %u %u\n", | 871 | duprintf("Invalid underflow %u %u\n", |
873 | i, repl->underflow[i]); | 872 | i, repl->underflow[i]); |
874 | return -EINVAL; | 873 | goto out_free; |
875 | } | 874 | } |
876 | } | 875 | } |
877 | 876 | ||
878 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) | 877 | if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) { |
879 | return -ELOOP; | 878 | ret = -ELOOP; |
879 | goto out_free; | ||
880 | } | ||
881 | kvfree(offsets); | ||
880 | 882 | ||
881 | /* Finally, each sanity check must pass */ | 883 | /* Finally, each sanity check must pass */ |
882 | i = 0; | 884 | i = 0; |
@@ -897,6 +899,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
897 | } | 899 | } |
898 | 900 | ||
899 | return ret; | 901 | return ret; |
902 | out_free: | ||
903 | kvfree(offsets); | ||
904 | return ret; | ||
900 | } | 905 | } |
901 | 906 | ||
902 | static void | 907 | static void |
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c index 6989c70ae29f..4a84b5ad9ecb 100644 --- a/net/ipv6/netfilter/nf_dup_ipv6.c +++ b/net/ipv6/netfilter/nf_dup_ipv6.c | |||
@@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb, | |||
33 | fl6.daddr = *gw; | 33 | fl6.daddr = *gw; |
34 | fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) | | 34 | fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) | |
35 | (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]); | 35 | (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]); |
36 | fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH; | ||
36 | dst = ip6_route_output(net, NULL, &fl6); | 37 | dst = ip6_route_output(net, NULL, &fl6); |
37 | if (dst->error) { | 38 | if (dst->error) { |
38 | dst_release(dst); | 39 | dst_release(dst); |
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index e0f922b777e3..7117e5bef412 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c | |||
@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | |||
157 | fl6.daddr = oip6h->saddr; | 157 | fl6.daddr = oip6h->saddr; |
158 | fl6.fl6_sport = otcph->dest; | 158 | fl6.fl6_sport = otcph->dest; |
159 | fl6.fl6_dport = otcph->source; | 159 | fl6.fl6_dport = otcph->source; |
160 | fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark); | ||
160 | security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); | 161 | security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); |
161 | dst = ip6_route_output(net, NULL, &fl6); | 162 | dst = ip6_route_output(net, NULL, &fl6); |
162 | if (dst == NULL || dst->error) { | 163 | if (dst == NULL || dst->error) { |
@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | |||
180 | 181 | ||
181 | skb_dst_set(nskb, dst); | 182 | skb_dst_set(nskb, dst); |
182 | 183 | ||
184 | nskb->mark = fl6.flowi6_mark; | ||
185 | |||
183 | skb_reserve(nskb, hh_len + dst->header_len); | 186 | skb_reserve(nskb, hh_len + dst->header_len); |
184 | ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, | 187 | ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, |
185 | ip6_dst_hoplimit(dst)); | 188 | ip6_dst_hoplimit(dst)); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4618f52a4abe..7e29a31dd4f0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -951,7 +951,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, | |||
951 | tcp_rsk(req)->rcv_nxt, | 951 | tcp_rsk(req)->rcv_nxt, |
952 | req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, | 952 | req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, |
953 | tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, | 953 | tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, |
954 | tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), | 954 | tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr), |
955 | 0, 0); | 955 | 0, 0); |
956 | } | 956 | } |
957 | 957 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index 94bf810ad242..6482b001f19a 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -401,6 +401,11 @@ static int verify_address_len(const void *p) | |||
401 | #endif | 401 | #endif |
402 | int len; | 402 | int len; |
403 | 403 | ||
404 | if (sp->sadb_address_len < | ||
405 | DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family), | ||
406 | sizeof(uint64_t))) | ||
407 | return -EINVAL; | ||
408 | |||
404 | switch (addr->sa_family) { | 409 | switch (addr->sa_family) { |
405 | case AF_INET: | 410 | case AF_INET: |
406 | len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t)); | 411 | len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t)); |
@@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void * | |||
511 | uint16_t ext_type; | 516 | uint16_t ext_type; |
512 | int ext_len; | 517 | int ext_len; |
513 | 518 | ||
519 | if (len < sizeof(*ehdr)) | ||
520 | return -EINVAL; | ||
521 | |||
514 | ext_len = ehdr->sadb_ext_len; | 522 | ext_len = ehdr->sadb_ext_len; |
515 | ext_len *= sizeof(uint64_t); | 523 | ext_len *= sizeof(uint64_t); |
516 | ext_type = ehdr->sadb_ext_type; | 524 | ext_type = ehdr->sadb_ext_type; |
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 4d2aaebd4f97..e546a987a9d3 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -91,7 +91,7 @@ static const struct file_operations reset_ops = { | |||
91 | }; | 91 | }; |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = { | 94 | static const char *hw_flag_names[] = { |
95 | #define FLAG(F) [IEEE80211_HW_##F] = #F | 95 | #define FLAG(F) [IEEE80211_HW_##F] = #F |
96 | FLAG(HAS_RATE_CONTROL), | 96 | FLAG(HAS_RATE_CONTROL), |
97 | FLAG(RX_INCLUDES_FCS), | 97 | FLAG(RX_INCLUDES_FCS), |
@@ -125,9 +125,6 @@ static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = { | |||
125 | FLAG(TDLS_WIDER_BW), | 125 | FLAG(TDLS_WIDER_BW), |
126 | FLAG(SUPPORTS_AMSDU_IN_AMPDU), | 126 | FLAG(SUPPORTS_AMSDU_IN_AMPDU), |
127 | FLAG(BEACON_TX_STATUS), | 127 | FLAG(BEACON_TX_STATUS), |
128 | |||
129 | /* keep last for the build bug below */ | ||
130 | (void *)0x1 | ||
131 | #undef FLAG | 128 | #undef FLAG |
132 | }; | 129 | }; |
133 | 130 | ||
@@ -147,7 +144,7 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf, | |||
147 | /* fail compilation if somebody adds or removes | 144 | /* fail compilation if somebody adds or removes |
148 | * a flag without updating the name array above | 145 | * a flag without updating the name array above |
149 | */ | 146 | */ |
150 | BUILD_BUG_ON(hw_flag_names[NUM_IEEE80211_HW_FLAGS] != (void *)0x1); | 147 | BUILD_BUG_ON(ARRAY_SIZE(hw_flag_names) != NUM_IEEE80211_HW_FLAGS); |
151 | 148 | ||
152 | for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { | 149 | for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { |
153 | if (test_bit(i, local->hw.flags)) | 150 | if (test_bit(i, local->hw.flags)) |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index b6dc2d7cd650..f55cddcd556c 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -776,7 +776,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | |||
776 | struct mesh_path *mpath; | 776 | struct mesh_path *mpath; |
777 | u8 ttl, flags, hopcount; | 777 | u8 ttl, flags, hopcount; |
778 | const u8 *orig_addr; | 778 | const u8 *orig_addr; |
779 | u32 orig_sn, metric, metric_txsta, interval; | 779 | u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval; |
780 | bool root_is_gate; | 780 | bool root_is_gate; |
781 | 781 | ||
782 | ttl = rann->rann_ttl; | 782 | ttl = rann->rann_ttl; |
@@ -787,7 +787,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | |||
787 | interval = le32_to_cpu(rann->rann_interval); | 787 | interval = le32_to_cpu(rann->rann_interval); |
788 | hopcount = rann->rann_hopcount; | 788 | hopcount = rann->rann_hopcount; |
789 | hopcount++; | 789 | hopcount++; |
790 | metric = le32_to_cpu(rann->rann_metric); | 790 | orig_metric = le32_to_cpu(rann->rann_metric); |
791 | 791 | ||
792 | /* Ignore our own RANNs */ | 792 | /* Ignore our own RANNs */ |
793 | if (ether_addr_equal(orig_addr, sdata->vif.addr)) | 793 | if (ether_addr_equal(orig_addr, sdata->vif.addr)) |
@@ -804,7 +804,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | |||
804 | return; | 804 | return; |
805 | } | 805 | } |
806 | 806 | ||
807 | metric_txsta = airtime_link_metric_get(local, sta); | 807 | last_hop_metric = airtime_link_metric_get(local, sta); |
808 | new_metric = orig_metric + last_hop_metric; | ||
809 | if (new_metric < orig_metric) | ||
810 | new_metric = MAX_METRIC; | ||
808 | 811 | ||
809 | mpath = mesh_path_lookup(sdata, orig_addr); | 812 | mpath = mesh_path_lookup(sdata, orig_addr); |
810 | if (!mpath) { | 813 | if (!mpath) { |
@@ -817,7 +820,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | |||
817 | } | 820 | } |
818 | 821 | ||
819 | if (!(SN_LT(mpath->sn, orig_sn)) && | 822 | if (!(SN_LT(mpath->sn, orig_sn)) && |
820 | !(mpath->sn == orig_sn && metric < mpath->rann_metric)) { | 823 | !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) { |
821 | rcu_read_unlock(); | 824 | rcu_read_unlock(); |
822 | return; | 825 | return; |
823 | } | 826 | } |
@@ -835,7 +838,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | |||
835 | } | 838 | } |
836 | 839 | ||
837 | mpath->sn = orig_sn; | 840 | mpath->sn = orig_sn; |
838 | mpath->rann_metric = metric + metric_txsta; | 841 | mpath->rann_metric = new_metric; |
839 | mpath->is_root = true; | 842 | mpath->is_root = true; |
840 | /* Recording RANNs sender address to send individually | 843 | /* Recording RANNs sender address to send individually |
841 | * addressed PREQs destined for root mesh STA */ | 844 | * addressed PREQs destined for root mesh STA */ |
@@ -855,7 +858,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | |||
855 | mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, | 858 | mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, |
856 | orig_sn, 0, NULL, 0, broadcast_addr, | 859 | orig_sn, 0, NULL, 0, broadcast_addr, |
857 | hopcount, ttl, interval, | 860 | hopcount, ttl, interval, |
858 | metric + metric_txsta, 0, sdata); | 861 | new_metric, 0, sdata); |
859 | } | 862 | } |
860 | 863 | ||
861 | rcu_read_unlock(); | 864 | rcu_read_unlock(); |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 86a3c6f0c871..5f747089024f 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -719,6 +719,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | |||
719 | * least once for the stats anyway. | 719 | * least once for the stats anyway. |
720 | */ | 720 | */ |
721 | rcu_read_lock_bh(); | 721 | rcu_read_lock_bh(); |
722 | begin: | ||
722 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { | 723 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { |
723 | ct = nf_ct_tuplehash_to_ctrack(h); | 724 | ct = nf_ct_tuplehash_to_ctrack(h); |
724 | if (ct != ignored_conntrack && | 725 | if (ct != ignored_conntrack && |
@@ -730,6 +731,12 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | |||
730 | } | 731 | } |
731 | NF_CT_STAT_INC(net, searched); | 732 | NF_CT_STAT_INC(net, searched); |
732 | } | 733 | } |
734 | |||
735 | if (get_nulls_value(n) != hash) { | ||
736 | NF_CT_STAT_INC(net, search_restart); | ||
737 | goto begin; | ||
738 | } | ||
739 | |||
733 | rcu_read_unlock_bh(); | 740 | rcu_read_unlock_bh(); |
734 | 741 | ||
735 | return 0; | 742 | return 0; |
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 7f16d19d6198..a91f8bd51d05 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -560,7 +560,7 @@ static int exp_seq_show(struct seq_file *s, void *v) | |||
560 | helper = rcu_dereference(nfct_help(expect->master)->helper); | 560 | helper = rcu_dereference(nfct_help(expect->master)->helper); |
561 | if (helper) { | 561 | if (helper) { |
562 | seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); | 562 | seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); |
563 | if (helper->expect_policy[expect->class].name) | 563 | if (helper->expect_policy[expect->class].name[0]) |
564 | seq_printf(s, "/%s", | 564 | seq_printf(s, "/%s", |
565 | helper->expect_policy[expect->class].name); | 565 | helper->expect_policy[expect->class].name); |
566 | } | 566 | } |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 885b4aba3695..1665c2159e4b 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -1434,9 +1434,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff, | |||
1434 | handler = &sip_handlers[i]; | 1434 | handler = &sip_handlers[i]; |
1435 | if (handler->request == NULL) | 1435 | if (handler->request == NULL) |
1436 | continue; | 1436 | continue; |
1437 | if (*datalen < handler->len || | 1437 | if (*datalen < handler->len + 2 || |
1438 | strncasecmp(*dptr, handler->method, handler->len)) | 1438 | strncasecmp(*dptr, handler->method, handler->len)) |
1439 | continue; | 1439 | continue; |
1440 | if ((*dptr)[handler->len] != ' ' || | ||
1441 | !isalpha((*dptr)[handler->len+1])) | ||
1442 | continue; | ||
1440 | 1443 | ||
1441 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, | 1444 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, |
1442 | &matchoff, &matchlen) <= 0) { | 1445 | &matchoff, &matchlen) <= 0) { |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 5baa8e24e6ac..b19ad20a705c 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -26,23 +26,21 @@ | |||
26 | * Once the queue is registered it must reinject all packets it | 26 | * Once the queue is registered it must reinject all packets it |
27 | * receives, no matter what. | 27 | * receives, no matter what. |
28 | */ | 28 | */ |
29 | static const struct nf_queue_handler __rcu *queue_handler __read_mostly; | ||
30 | 29 | ||
31 | /* return EBUSY when somebody else is registered, return EEXIST if the | 30 | /* return EBUSY when somebody else is registered, return EEXIST if the |
32 | * same handler is registered, return 0 in case of success. */ | 31 | * same handler is registered, return 0 in case of success. */ |
33 | void nf_register_queue_handler(const struct nf_queue_handler *qh) | 32 | void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh) |
34 | { | 33 | { |
35 | /* should never happen, we only have one queueing backend in kernel */ | 34 | /* should never happen, we only have one queueing backend in kernel */ |
36 | WARN_ON(rcu_access_pointer(queue_handler)); | 35 | WARN_ON(rcu_access_pointer(net->nf.queue_handler)); |
37 | rcu_assign_pointer(queue_handler, qh); | 36 | rcu_assign_pointer(net->nf.queue_handler, qh); |
38 | } | 37 | } |
39 | EXPORT_SYMBOL(nf_register_queue_handler); | 38 | EXPORT_SYMBOL(nf_register_queue_handler); |
40 | 39 | ||
41 | /* The caller must flush their queue before this */ | 40 | /* The caller must flush their queue before this */ |
42 | void nf_unregister_queue_handler(void) | 41 | void nf_unregister_queue_handler(struct net *net) |
43 | { | 42 | { |
44 | RCU_INIT_POINTER(queue_handler, NULL); | 43 | RCU_INIT_POINTER(net->nf.queue_handler, NULL); |
45 | synchronize_rcu(); | ||
46 | } | 44 | } |
47 | EXPORT_SYMBOL(nf_unregister_queue_handler); | 45 | EXPORT_SYMBOL(nf_unregister_queue_handler); |
48 | 46 | ||
@@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops) | |||
103 | const struct nf_queue_handler *qh; | 101 | const struct nf_queue_handler *qh; |
104 | 102 | ||
105 | rcu_read_lock(); | 103 | rcu_read_lock(); |
106 | qh = rcu_dereference(queue_handler); | 104 | qh = rcu_dereference(net->nf.queue_handler); |
107 | if (qh) | 105 | if (qh) |
108 | qh->nf_hook_drop(net, ops); | 106 | qh->nf_hook_drop(net, ops); |
109 | rcu_read_unlock(); | 107 | rcu_read_unlock(); |
@@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb, | |||
122 | struct nf_queue_entry *entry = NULL; | 120 | struct nf_queue_entry *entry = NULL; |
123 | const struct nf_afinfo *afinfo; | 121 | const struct nf_afinfo *afinfo; |
124 | const struct nf_queue_handler *qh; | 122 | const struct nf_queue_handler *qh; |
123 | struct net *net = state->net; | ||
125 | 124 | ||
126 | /* QUEUE == DROP if no one is waiting, to be safe. */ | 125 | /* QUEUE == DROP if no one is waiting, to be safe. */ |
127 | qh = rcu_dereference(queue_handler); | 126 | qh = rcu_dereference(net->nf.queue_handler); |
128 | if (!qh) { | 127 | if (!qh) { |
129 | status = -ESRCH; | 128 | status = -ESRCH; |
130 | goto err; | 129 | goto err; |
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 6d10002d23f8..ac143ae4f7b6 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/capability.h> | ||
20 | #include <net/netlink.h> | 21 | #include <net/netlink.h> |
21 | #include <net/sock.h> | 22 | #include <net/sock.h> |
22 | 23 | ||
@@ -32,6 +33,13 @@ MODULE_LICENSE("GPL"); | |||
32 | MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); | 33 | MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); |
33 | MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers"); | 34 | MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers"); |
34 | 35 | ||
36 | struct nfnl_cthelper { | ||
37 | struct list_head list; | ||
38 | struct nf_conntrack_helper helper; | ||
39 | }; | ||
40 | |||
41 | static LIST_HEAD(nfnl_cthelper_list); | ||
42 | |||
35 | static int | 43 | static int |
36 | nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff, | 44 | nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff, |
37 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | 45 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) |
@@ -205,18 +213,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[], | |||
205 | struct nf_conntrack_tuple *tuple) | 213 | struct nf_conntrack_tuple *tuple) |
206 | { | 214 | { |
207 | struct nf_conntrack_helper *helper; | 215 | struct nf_conntrack_helper *helper; |
216 | struct nfnl_cthelper *nfcth; | ||
208 | int ret; | 217 | int ret; |
209 | 218 | ||
210 | if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) | 219 | if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) |
211 | return -EINVAL; | 220 | return -EINVAL; |
212 | 221 | ||
213 | helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL); | 222 | nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL); |
214 | if (helper == NULL) | 223 | if (nfcth == NULL) |
215 | return -ENOMEM; | 224 | return -ENOMEM; |
225 | helper = &nfcth->helper; | ||
216 | 226 | ||
217 | ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); | 227 | ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); |
218 | if (ret < 0) | 228 | if (ret < 0) |
219 | goto err; | 229 | goto err1; |
220 | 230 | ||
221 | strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); | 231 | strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); |
222 | helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); | 232 | helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); |
@@ -247,15 +257,101 @@ nfnl_cthelper_create(const struct nlattr * const tb[], | |||
247 | 257 | ||
248 | ret = nf_conntrack_helper_register(helper); | 258 | ret = nf_conntrack_helper_register(helper); |
249 | if (ret < 0) | 259 | if (ret < 0) |
250 | goto err; | 260 | goto err2; |
251 | 261 | ||
262 | list_add_tail(&nfcth->list, &nfnl_cthelper_list); | ||
252 | return 0; | 263 | return 0; |
253 | err: | 264 | err2: |
254 | kfree(helper); | 265 | kfree(helper->expect_policy); |
266 | err1: | ||
267 | kfree(nfcth); | ||
255 | return ret; | 268 | return ret; |
256 | } | 269 | } |
257 | 270 | ||
258 | static int | 271 | static int |
272 | nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy, | ||
273 | struct nf_conntrack_expect_policy *new_policy, | ||
274 | const struct nlattr *attr) | ||
275 | { | ||
276 | struct nlattr *tb[NFCTH_POLICY_MAX + 1]; | ||
277 | int err; | ||
278 | |||
279 | err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, | ||
280 | nfnl_cthelper_expect_pol); | ||
281 | if (err < 0) | ||
282 | return err; | ||
283 | |||
284 | if (!tb[NFCTH_POLICY_NAME] || | ||
285 | !tb[NFCTH_POLICY_EXPECT_MAX] || | ||
286 | !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) | ||
287 | return -EINVAL; | ||
288 | |||
289 | if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name)) | ||
290 | return -EBUSY; | ||
291 | |||
292 | new_policy->max_expected = | ||
293 | ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); | ||
294 | new_policy->timeout = | ||
295 | ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT])); | ||
296 | |||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static int nfnl_cthelper_update_policy_all(struct nlattr *tb[], | ||
301 | struct nf_conntrack_helper *helper) | ||
302 | { | ||
303 | struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1]; | ||
304 | struct nf_conntrack_expect_policy *policy; | ||
305 | int i, err; | ||
306 | |||
307 | /* Check first that all policy attributes are well-formed, so we don't | ||
308 | * leave things in inconsistent state on errors. | ||
309 | */ | ||
310 | for (i = 0; i < helper->expect_class_max + 1; i++) { | ||
311 | |||
312 | if (!tb[NFCTH_POLICY_SET + i]) | ||
313 | return -EINVAL; | ||
314 | |||
315 | err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i], | ||
316 | &new_policy[i], | ||
317 | tb[NFCTH_POLICY_SET + i]); | ||
318 | if (err < 0) | ||
319 | return err; | ||
320 | } | ||
321 | /* Now we can safely update them. */ | ||
322 | for (i = 0; i < helper->expect_class_max + 1; i++) { | ||
323 | policy = (struct nf_conntrack_expect_policy *) | ||
324 | &helper->expect_policy[i]; | ||
325 | policy->max_expected = new_policy->max_expected; | ||
326 | policy->timeout = new_policy->timeout; | ||
327 | } | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper, | ||
333 | const struct nlattr *attr) | ||
334 | { | ||
335 | struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1]; | ||
336 | unsigned int class_max; | ||
337 | int err; | ||
338 | |||
339 | err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, | ||
340 | nfnl_cthelper_expect_policy_set); | ||
341 | if (err < 0) | ||
342 | return err; | ||
343 | |||
344 | if (!tb[NFCTH_POLICY_SET_NUM]) | ||
345 | return -EINVAL; | ||
346 | |||
347 | class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); | ||
348 | if (helper->expect_class_max + 1 != class_max) | ||
349 | return -EBUSY; | ||
350 | |||
351 | return nfnl_cthelper_update_policy_all(tb, helper); | ||
352 | } | ||
353 | |||
354 | static int | ||
259 | nfnl_cthelper_update(const struct nlattr * const tb[], | 355 | nfnl_cthelper_update(const struct nlattr * const tb[], |
260 | struct nf_conntrack_helper *helper) | 356 | struct nf_conntrack_helper *helper) |
261 | { | 357 | { |
@@ -265,8 +361,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[], | |||
265 | return -EBUSY; | 361 | return -EBUSY; |
266 | 362 | ||
267 | if (tb[NFCTH_POLICY]) { | 363 | if (tb[NFCTH_POLICY]) { |
268 | ret = nfnl_cthelper_parse_expect_policy(helper, | 364 | ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]); |
269 | tb[NFCTH_POLICY]); | ||
270 | if (ret < 0) | 365 | if (ret < 0) |
271 | return ret; | 366 | return ret; |
272 | } | 367 | } |
@@ -295,7 +390,11 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb, | |||
295 | const char *helper_name; | 390 | const char *helper_name; |
296 | struct nf_conntrack_helper *cur, *helper = NULL; | 391 | struct nf_conntrack_helper *cur, *helper = NULL; |
297 | struct nf_conntrack_tuple tuple; | 392 | struct nf_conntrack_tuple tuple; |
298 | int ret = 0, i; | 393 | struct nfnl_cthelper *nlcth; |
394 | int ret = 0; | ||
395 | |||
396 | if (!capable(CAP_NET_ADMIN)) | ||
397 | return -EPERM; | ||
299 | 398 | ||
300 | if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) | 399 | if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) |
301 | return -EINVAL; | 400 | return -EINVAL; |
@@ -306,31 +405,22 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb, | |||
306 | if (ret < 0) | 405 | if (ret < 0) |
307 | return ret; | 406 | return ret; |
308 | 407 | ||
309 | rcu_read_lock(); | 408 | list_for_each_entry(nlcth, &nfnl_cthelper_list, list) { |
310 | for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { | 409 | cur = &nlcth->helper; |
311 | hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { | ||
312 | 410 | ||
313 | /* skip non-userspace conntrack helpers. */ | 411 | if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN)) |
314 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | 412 | continue; |
315 | continue; | ||
316 | 413 | ||
317 | if (strncmp(cur->name, helper_name, | 414 | if ((tuple.src.l3num != cur->tuple.src.l3num || |
318 | NF_CT_HELPER_NAME_LEN) != 0) | 415 | tuple.dst.protonum != cur->tuple.dst.protonum)) |
319 | continue; | 416 | continue; |
320 | 417 | ||
321 | if ((tuple.src.l3num != cur->tuple.src.l3num || | 418 | if (nlh->nlmsg_flags & NLM_F_EXCL) |
322 | tuple.dst.protonum != cur->tuple.dst.protonum)) | 419 | return -EEXIST; |
323 | continue; | ||
324 | 420 | ||
325 | if (nlh->nlmsg_flags & NLM_F_EXCL) { | 421 | helper = cur; |
326 | ret = -EEXIST; | 422 | break; |
327 | goto err; | ||
328 | } | ||
329 | helper = cur; | ||
330 | break; | ||
331 | } | ||
332 | } | 423 | } |
333 | rcu_read_unlock(); | ||
334 | 424 | ||
335 | if (helper == NULL) | 425 | if (helper == NULL) |
336 | ret = nfnl_cthelper_create(tb, &tuple); | 426 | ret = nfnl_cthelper_create(tb, &tuple); |
@@ -338,9 +428,6 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb, | |||
338 | ret = nfnl_cthelper_update(tb, helper); | 428 | ret = nfnl_cthelper_update(tb, helper); |
339 | 429 | ||
340 | return ret; | 430 | return ret; |
341 | err: | ||
342 | rcu_read_unlock(); | ||
343 | return ret; | ||
344 | } | 431 | } |
345 | 432 | ||
346 | static int | 433 | static int |
@@ -504,13 +591,17 @@ static int | |||
504 | nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb, | 591 | nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb, |
505 | const struct nlmsghdr *nlh, const struct nlattr * const tb[]) | 592 | const struct nlmsghdr *nlh, const struct nlattr * const tb[]) |
506 | { | 593 | { |
507 | int ret = -ENOENT, i; | 594 | int ret = -ENOENT; |
508 | struct nf_conntrack_helper *cur; | 595 | struct nf_conntrack_helper *cur; |
509 | struct sk_buff *skb2; | 596 | struct sk_buff *skb2; |
510 | char *helper_name = NULL; | 597 | char *helper_name = NULL; |
511 | struct nf_conntrack_tuple tuple; | 598 | struct nf_conntrack_tuple tuple; |
599 | struct nfnl_cthelper *nlcth; | ||
512 | bool tuple_set = false; | 600 | bool tuple_set = false; |
513 | 601 | ||
602 | if (!capable(CAP_NET_ADMIN)) | ||
603 | return -EPERM; | ||
604 | |||
514 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 605 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
515 | struct netlink_dump_control c = { | 606 | struct netlink_dump_control c = { |
516 | .dump = nfnl_cthelper_dump_table, | 607 | .dump = nfnl_cthelper_dump_table, |
@@ -529,45 +620,39 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb, | |||
529 | tuple_set = true; | 620 | tuple_set = true; |
530 | } | 621 | } |
531 | 622 | ||
532 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 623 | list_for_each_entry(nlcth, &nfnl_cthelper_list, list) { |
533 | hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { | 624 | cur = &nlcth->helper; |
534 | 625 | if (helper_name && | |
535 | /* skip non-userspace conntrack helpers. */ | 626 | strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN)) |
536 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | 627 | continue; |
537 | continue; | ||
538 | 628 | ||
539 | if (helper_name && strncmp(cur->name, helper_name, | 629 | if (tuple_set && |
540 | NF_CT_HELPER_NAME_LEN) != 0) { | 630 | (tuple.src.l3num != cur->tuple.src.l3num || |
541 | continue; | 631 | tuple.dst.protonum != cur->tuple.dst.protonum)) |
542 | } | 632 | continue; |
543 | if (tuple_set && | ||
544 | (tuple.src.l3num != cur->tuple.src.l3num || | ||
545 | tuple.dst.protonum != cur->tuple.dst.protonum)) | ||
546 | continue; | ||
547 | 633 | ||
548 | skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 634 | skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
549 | if (skb2 == NULL) { | 635 | if (skb2 == NULL) { |
550 | ret = -ENOMEM; | 636 | ret = -ENOMEM; |
551 | break; | 637 | break; |
552 | } | 638 | } |
553 | 639 | ||
554 | ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, | 640 | ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, |
555 | nlh->nlmsg_seq, | 641 | nlh->nlmsg_seq, |
556 | NFNL_MSG_TYPE(nlh->nlmsg_type), | 642 | NFNL_MSG_TYPE(nlh->nlmsg_type), |
557 | NFNL_MSG_CTHELPER_NEW, cur); | 643 | NFNL_MSG_CTHELPER_NEW, cur); |
558 | if (ret <= 0) { | 644 | if (ret <= 0) { |
559 | kfree_skb(skb2); | 645 | kfree_skb(skb2); |
560 | break; | 646 | break; |
561 | } | 647 | } |
562 | 648 | ||
563 | ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, | 649 | ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, |
564 | MSG_DONTWAIT); | 650 | MSG_DONTWAIT); |
565 | if (ret > 0) | 651 | if (ret > 0) |
566 | ret = 0; | 652 | ret = 0; |
567 | 653 | ||
568 | /* this avoids a loop in nfnetlink. */ | 654 | /* this avoids a loop in nfnetlink. */ |
569 | return ret == -EAGAIN ? -ENOBUFS : ret; | 655 | return ret == -EAGAIN ? -ENOBUFS : ret; |
570 | } | ||
571 | } | 656 | } |
572 | return ret; | 657 | return ret; |
573 | } | 658 | } |
@@ -578,10 +663,13 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb, | |||
578 | { | 663 | { |
579 | char *helper_name = NULL; | 664 | char *helper_name = NULL; |
580 | struct nf_conntrack_helper *cur; | 665 | struct nf_conntrack_helper *cur; |
581 | struct hlist_node *tmp; | ||
582 | struct nf_conntrack_tuple tuple; | 666 | struct nf_conntrack_tuple tuple; |
583 | bool tuple_set = false, found = false; | 667 | bool tuple_set = false, found = false; |
584 | int i, j = 0, ret; | 668 | struct nfnl_cthelper *nlcth, *n; |
669 | int j = 0, ret; | ||
670 | |||
671 | if (!capable(CAP_NET_ADMIN)) | ||
672 | return -EPERM; | ||
585 | 673 | ||
586 | if (tb[NFCTH_NAME]) | 674 | if (tb[NFCTH_NAME]) |
587 | helper_name = nla_data(tb[NFCTH_NAME]); | 675 | helper_name = nla_data(tb[NFCTH_NAME]); |
@@ -594,28 +682,27 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb, | |||
594 | tuple_set = true; | 682 | tuple_set = true; |
595 | } | 683 | } |
596 | 684 | ||
597 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 685 | list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { |
598 | hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], | 686 | cur = &nlcth->helper; |
599 | hnode) { | 687 | j++; |
600 | /* skip non-userspace conntrack helpers. */ | ||
601 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | ||
602 | continue; | ||
603 | 688 | ||
604 | j++; | 689 | if (helper_name && |
690 | strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN)) | ||
691 | continue; | ||
605 | 692 | ||
606 | if (helper_name && strncmp(cur->name, helper_name, | 693 | if (tuple_set && |
607 | NF_CT_HELPER_NAME_LEN) != 0) { | 694 | (tuple.src.l3num != cur->tuple.src.l3num || |
608 | continue; | 695 | tuple.dst.protonum != cur->tuple.dst.protonum)) |
609 | } | 696 | continue; |
610 | if (tuple_set && | ||
611 | (tuple.src.l3num != cur->tuple.src.l3num || | ||
612 | tuple.dst.protonum != cur->tuple.dst.protonum)) | ||
613 | continue; | ||
614 | 697 | ||
615 | found = true; | 698 | found = true; |
616 | nf_conntrack_helper_unregister(cur); | 699 | nf_conntrack_helper_unregister(cur); |
617 | } | 700 | kfree(cur->expect_policy); |
701 | |||
702 | list_del(&nlcth->list); | ||
703 | kfree(nlcth); | ||
618 | } | 704 | } |
705 | |||
619 | /* Make sure we return success if we flush and there is no helpers */ | 706 | /* Make sure we return success if we flush and there is no helpers */ |
620 | return (found || j == 0) ? 0 : -ENOENT; | 707 | return (found || j == 0) ? 0 : -ENOENT; |
621 | } | 708 | } |
@@ -664,20 +751,16 @@ err_out: | |||
664 | static void __exit nfnl_cthelper_exit(void) | 751 | static void __exit nfnl_cthelper_exit(void) |
665 | { | 752 | { |
666 | struct nf_conntrack_helper *cur; | 753 | struct nf_conntrack_helper *cur; |
667 | struct hlist_node *tmp; | 754 | struct nfnl_cthelper *nlcth, *n; |
668 | int i; | ||
669 | 755 | ||
670 | nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); | 756 | nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); |
671 | 757 | ||
672 | for (i=0; i<nf_ct_helper_hsize; i++) { | 758 | list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { |
673 | hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], | 759 | cur = &nlcth->helper; |
674 | hnode) { | ||
675 | /* skip non-userspace conntrack helpers. */ | ||
676 | if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) | ||
677 | continue; | ||
678 | 760 | ||
679 | nf_conntrack_helper_unregister(cur); | 761 | nf_conntrack_helper_unregister(cur); |
680 | } | 762 | kfree(cur->expect_policy); |
763 | kfree(nlcth); | ||
681 | } | 764 | } |
682 | } | 765 | } |
683 | 766 | ||
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 861c6615253b..f853b55bf877 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -390,7 +390,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
390 | GFP_ATOMIC); | 390 | GFP_ATOMIC); |
391 | if (!skb) { | 391 | if (!skb) { |
392 | skb_tx_error(entskb); | 392 | skb_tx_error(entskb); |
393 | return NULL; | 393 | goto nlmsg_failure; |
394 | } | 394 | } |
395 | 395 | ||
396 | nlh = nlmsg_put(skb, 0, 0, | 396 | nlh = nlmsg_put(skb, 0, 0, |
@@ -399,7 +399,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
399 | if (!nlh) { | 399 | if (!nlh) { |
400 | skb_tx_error(entskb); | 400 | skb_tx_error(entskb); |
401 | kfree_skb(skb); | 401 | kfree_skb(skb); |
402 | return NULL; | 402 | goto nlmsg_failure; |
403 | } | 403 | } |
404 | nfmsg = nlmsg_data(nlh); | 404 | nfmsg = nlmsg_data(nlh); |
405 | nfmsg->nfgen_family = entry->state.pf; | 405 | nfmsg->nfgen_family = entry->state.pf; |
@@ -542,12 +542,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
542 | } | 542 | } |
543 | 543 | ||
544 | nlh->nlmsg_len = skb->len; | 544 | nlh->nlmsg_len = skb->len; |
545 | if (seclen) | ||
546 | security_release_secctx(secdata, seclen); | ||
545 | return skb; | 547 | return skb; |
546 | 548 | ||
547 | nla_put_failure: | 549 | nla_put_failure: |
548 | skb_tx_error(entskb); | 550 | skb_tx_error(entskb); |
549 | kfree_skb(skb); | 551 | kfree_skb(skb); |
550 | net_err_ratelimited("nf_queue: error creating packet message\n"); | 552 | net_err_ratelimited("nf_queue: error creating packet message\n"); |
553 | nlmsg_failure: | ||
554 | if (seclen) | ||
555 | security_release_secctx(secdata, seclen); | ||
551 | return NULL; | 556 | return NULL; |
552 | } | 557 | } |
553 | 558 | ||
@@ -1048,10 +1053,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | |||
1048 | struct net *net = sock_net(ctnl); | 1053 | struct net *net = sock_net(ctnl); |
1049 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | 1054 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); |
1050 | 1055 | ||
1051 | queue = instance_lookup(q, queue_num); | 1056 | queue = verdict_instance_lookup(q, queue_num, |
1052 | if (!queue) | 1057 | NETLINK_CB(skb).portid); |
1053 | queue = verdict_instance_lookup(q, queue_num, | ||
1054 | NETLINK_CB(skb).portid); | ||
1055 | if (IS_ERR(queue)) | 1058 | if (IS_ERR(queue)) |
1056 | return PTR_ERR(queue); | 1059 | return PTR_ERR(queue); |
1057 | 1060 | ||
@@ -1379,21 +1382,29 @@ static int __net_init nfnl_queue_net_init(struct net *net) | |||
1379 | net->nf.proc_netfilter, &nfqnl_file_ops)) | 1382 | net->nf.proc_netfilter, &nfqnl_file_ops)) |
1380 | return -ENOMEM; | 1383 | return -ENOMEM; |
1381 | #endif | 1384 | #endif |
1385 | nf_register_queue_handler(net, &nfqh); | ||
1382 | return 0; | 1386 | return 0; |
1383 | } | 1387 | } |
1384 | 1388 | ||
1385 | static void __net_exit nfnl_queue_net_exit(struct net *net) | 1389 | static void __net_exit nfnl_queue_net_exit(struct net *net) |
1386 | { | 1390 | { |
1391 | nf_unregister_queue_handler(net); | ||
1387 | #ifdef CONFIG_PROC_FS | 1392 | #ifdef CONFIG_PROC_FS |
1388 | remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); | 1393 | remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); |
1389 | #endif | 1394 | #endif |
1390 | } | 1395 | } |
1391 | 1396 | ||
1397 | static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list) | ||
1398 | { | ||
1399 | synchronize_rcu(); | ||
1400 | } | ||
1401 | |||
1392 | static struct pernet_operations nfnl_queue_net_ops = { | 1402 | static struct pernet_operations nfnl_queue_net_ops = { |
1393 | .init = nfnl_queue_net_init, | 1403 | .init = nfnl_queue_net_init, |
1394 | .exit = nfnl_queue_net_exit, | 1404 | .exit = nfnl_queue_net_exit, |
1395 | .id = &nfnl_queue_net_id, | 1405 | .exit_batch = nfnl_queue_net_exit_batch, |
1396 | .size = sizeof(struct nfnl_queue_net), | 1406 | .id = &nfnl_queue_net_id, |
1407 | .size = sizeof(struct nfnl_queue_net), | ||
1397 | }; | 1408 | }; |
1398 | 1409 | ||
1399 | static int __init nfnetlink_queue_init(void) | 1410 | static int __init nfnetlink_queue_init(void) |
@@ -1414,7 +1425,6 @@ static int __init nfnetlink_queue_init(void) | |||
1414 | } | 1425 | } |
1415 | 1426 | ||
1416 | register_netdevice_notifier(&nfqnl_dev_notifier); | 1427 | register_netdevice_notifier(&nfqnl_dev_notifier); |
1417 | nf_register_queue_handler(&nfqh); | ||
1418 | return status; | 1428 | return status; |
1419 | 1429 | ||
1420 | cleanup_netlink_notifier: | 1430 | cleanup_netlink_notifier: |
@@ -1426,7 +1436,6 @@ out: | |||
1426 | 1436 | ||
1427 | static void __exit nfnetlink_queue_fini(void) | 1437 | static void __exit nfnetlink_queue_fini(void) |
1428 | { | 1438 | { |
1429 | nf_unregister_queue_handler(); | ||
1430 | unregister_netdevice_notifier(&nfqnl_dev_notifier); | 1439 | unregister_netdevice_notifier(&nfqnl_dev_notifier); |
1431 | nfnetlink_subsys_unregister(&nfqnl_subsys); | 1440 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
1432 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 1441 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 2fc6ca9d1286..7b42b0ad3f9b 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -701,6 +701,56 @@ int xt_check_entry_offsets(const void *base, | |||
701 | } | 701 | } |
702 | EXPORT_SYMBOL(xt_check_entry_offsets); | 702 | EXPORT_SYMBOL(xt_check_entry_offsets); |
703 | 703 | ||
704 | /** | ||
705 | * xt_alloc_entry_offsets - allocate array to store rule head offsets | ||
706 | * | ||
707 | * @size: number of entries | ||
708 | * | ||
709 | * Return: NULL or kmalloc'd or vmalloc'd array | ||
710 | */ | ||
711 | unsigned int *xt_alloc_entry_offsets(unsigned int size) | ||
712 | { | ||
713 | unsigned int *off; | ||
714 | |||
715 | off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN); | ||
716 | |||
717 | if (off) | ||
718 | return off; | ||
719 | |||
720 | if (size < (SIZE_MAX / sizeof(unsigned int))) | ||
721 | off = vmalloc(size * sizeof(unsigned int)); | ||
722 | |||
723 | return off; | ||
724 | } | ||
725 | EXPORT_SYMBOL(xt_alloc_entry_offsets); | ||
726 | |||
727 | /** | ||
728 | * xt_find_jump_offset - check if target is a valid jump offset | ||
729 | * | ||
730 | * @offsets: array containing all valid rule start offsets of a rule blob | ||
731 | * @target: the jump target to search for | ||
732 | * @size: entries in @offset | ||
733 | */ | ||
734 | bool xt_find_jump_offset(const unsigned int *offsets, | ||
735 | unsigned int target, unsigned int size) | ||
736 | { | ||
737 | int m, low = 0, hi = size; | ||
738 | |||
739 | while (hi > low) { | ||
740 | m = (low + hi) / 2u; | ||
741 | |||
742 | if (offsets[m] > target) | ||
743 | hi = m; | ||
744 | else if (offsets[m] < target) | ||
745 | low = m + 1; | ||
746 | else | ||
747 | return true; | ||
748 | } | ||
749 | |||
750 | return false; | ||
751 | } | ||
752 | EXPORT_SYMBOL(xt_find_jump_offset); | ||
753 | |||
704 | int xt_check_target(struct xt_tgchk_param *par, | 754 | int xt_check_target(struct xt_tgchk_param *par, |
705 | unsigned int size, u_int8_t proto, bool inv_proto) | 755 | unsigned int size, u_int8_t proto, bool inv_proto) |
706 | { | 756 | { |
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c index df8801e02a32..7eae0d0af89a 100644 --- a/net/netfilter/xt_osf.c +++ b/net/netfilter/xt_osf.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | 21 | ||
22 | #include <linux/capability.h> | ||
22 | #include <linux/if.h> | 23 | #include <linux/if.h> |
23 | #include <linux/inetdevice.h> | 24 | #include <linux/inetdevice.h> |
24 | #include <linux/ip.h> | 25 | #include <linux/ip.h> |
@@ -69,6 +70,9 @@ static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb, | |||
69 | struct xt_osf_finger *kf = NULL, *sf; | 70 | struct xt_osf_finger *kf = NULL, *sf; |
70 | int err = 0; | 71 | int err = 0; |
71 | 72 | ||
73 | if (!capable(CAP_NET_ADMIN)) | ||
74 | return -EPERM; | ||
75 | |||
72 | if (!osf_attrs[OSF_ATTR_FINGER]) | 76 | if (!osf_attrs[OSF_ATTR_FINGER]) |
73 | return -EINVAL; | 77 | return -EINVAL; |
74 | 78 | ||
@@ -112,6 +116,9 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb, | |||
112 | struct xt_osf_finger *sf; | 116 | struct xt_osf_finger *sf; |
113 | int err = -ENOENT; | 117 | int err = -ENOENT; |
114 | 118 | ||
119 | if (!capable(CAP_NET_ADMIN)) | ||
120 | return -EPERM; | ||
121 | |||
115 | if (!osf_attrs[OSF_ATTR_FINGER]) | 122 | if (!osf_attrs[OSF_ATTR_FINGER]) |
116 | return -EINVAL; | 123 | return -EINVAL; |
117 | 124 | ||
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c index fbe4e1ecce6a..4f765425053c 100644 --- a/net/netfilter/xt_qtaguid.c +++ b/net/netfilter/xt_qtaguid.c | |||
@@ -519,13 +519,11 @@ static struct tag_ref *get_tag_ref(tag_t full_tag, | |||
519 | 519 | ||
520 | DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n", | 520 | DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n", |
521 | full_tag); | 521 | full_tag); |
522 | spin_lock_bh(&uid_tag_data_tree_lock); | ||
523 | tr_entry = lookup_tag_ref(full_tag, &utd_entry); | 522 | tr_entry = lookup_tag_ref(full_tag, &utd_entry); |
524 | BUG_ON(IS_ERR_OR_NULL(utd_entry)); | 523 | BUG_ON(IS_ERR_OR_NULL(utd_entry)); |
525 | if (!tr_entry) | 524 | if (!tr_entry) |
526 | tr_entry = new_tag_ref(full_tag, utd_entry); | 525 | tr_entry = new_tag_ref(full_tag, utd_entry); |
527 | 526 | ||
528 | spin_unlock_bh(&uid_tag_data_tree_lock); | ||
529 | if (utd_res) | 527 | if (utd_res) |
530 | *utd_res = utd_entry; | 528 | *utd_res = utd_entry; |
531 | DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n", | 529 | DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n", |
@@ -2032,6 +2030,7 @@ static int ctrl_cmd_delete(const char *input) | |||
2032 | 2030 | ||
2033 | /* Delete socket tags */ | 2031 | /* Delete socket tags */ |
2034 | spin_lock_bh(&sock_tag_list_lock); | 2032 | spin_lock_bh(&sock_tag_list_lock); |
2033 | spin_lock_bh(&uid_tag_data_tree_lock); | ||
2035 | node = rb_first(&sock_tag_tree); | 2034 | node = rb_first(&sock_tag_tree); |
2036 | while (node) { | 2035 | while (node) { |
2037 | st_entry = rb_entry(node, struct sock_tag, sock_node); | 2036 | st_entry = rb_entry(node, struct sock_tag, sock_node); |
@@ -2061,6 +2060,7 @@ static int ctrl_cmd_delete(const char *input) | |||
2061 | list_del(&st_entry->list); | 2060 | list_del(&st_entry->list); |
2062 | } | 2061 | } |
2063 | } | 2062 | } |
2063 | spin_unlock_bh(&uid_tag_data_tree_lock); | ||
2064 | spin_unlock_bh(&sock_tag_list_lock); | 2064 | spin_unlock_bh(&sock_tag_list_lock); |
2065 | 2065 | ||
2066 | sock_tag_tree_erase(&st_to_free_tree); | 2066 | sock_tag_tree_erase(&st_to_free_tree); |
@@ -2270,10 +2270,12 @@ static int ctrl_cmd_tag(const char *input) | |||
2270 | full_tag = combine_atag_with_uid(acct_tag, uid_int); | 2270 | full_tag = combine_atag_with_uid(acct_tag, uid_int); |
2271 | 2271 | ||
2272 | spin_lock_bh(&sock_tag_list_lock); | 2272 | spin_lock_bh(&sock_tag_list_lock); |
2273 | spin_lock_bh(&uid_tag_data_tree_lock); | ||
2273 | sock_tag_entry = get_sock_stat_nl(el_socket->sk); | 2274 | sock_tag_entry = get_sock_stat_nl(el_socket->sk); |
2274 | tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry); | 2275 | tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry); |
2275 | if (IS_ERR(tag_ref_entry)) { | 2276 | if (IS_ERR(tag_ref_entry)) { |
2276 | res = PTR_ERR(tag_ref_entry); | 2277 | res = PTR_ERR(tag_ref_entry); |
2278 | spin_unlock_bh(&uid_tag_data_tree_lock); | ||
2277 | spin_unlock_bh(&sock_tag_list_lock); | 2279 | spin_unlock_bh(&sock_tag_list_lock); |
2278 | goto err_put; | 2280 | goto err_put; |
2279 | } | 2281 | } |
@@ -2300,9 +2302,14 @@ static int ctrl_cmd_tag(const char *input) | |||
2300 | pr_err("qtaguid: ctrl_tag(%s): " | 2302 | pr_err("qtaguid: ctrl_tag(%s): " |
2301 | "socket tag alloc failed\n", | 2303 | "socket tag alloc failed\n", |
2302 | input); | 2304 | input); |
2305 | BUG_ON(tag_ref_entry->num_sock_tags <= 0); | ||
2306 | tag_ref_entry->num_sock_tags--; | ||
2307 | free_tag_ref_from_utd_entry(tag_ref_entry, | ||
2308 | uid_tag_data_entry); | ||
2309 | spin_unlock_bh(&uid_tag_data_tree_lock); | ||
2303 | spin_unlock_bh(&sock_tag_list_lock); | 2310 | spin_unlock_bh(&sock_tag_list_lock); |
2304 | res = -ENOMEM; | 2311 | res = -ENOMEM; |
2305 | goto err_tag_unref_put; | 2312 | goto err_put; |
2306 | } | 2313 | } |
2307 | /* | 2314 | /* |
2308 | * Hold the sk refcount here to make sure the sk pointer cannot | 2315 | * Hold the sk refcount here to make sure the sk pointer cannot |
@@ -2312,7 +2319,6 @@ static int ctrl_cmd_tag(const char *input) | |||
2312 | sock_tag_entry->sk = el_socket->sk; | 2319 | sock_tag_entry->sk = el_socket->sk; |
2313 | sock_tag_entry->pid = current->tgid; | 2320 | sock_tag_entry->pid = current->tgid; |
2314 | sock_tag_entry->tag = combine_atag_with_uid(acct_tag, uid_int); | 2321 | sock_tag_entry->tag = combine_atag_with_uid(acct_tag, uid_int); |
2315 | spin_lock_bh(&uid_tag_data_tree_lock); | ||
2316 | pqd_entry = proc_qtu_data_tree_search( | 2322 | pqd_entry = proc_qtu_data_tree_search( |
2317 | &proc_qtu_data_tree, current->tgid); | 2323 | &proc_qtu_data_tree, current->tgid); |
2318 | /* | 2324 | /* |
@@ -2330,11 +2336,11 @@ static int ctrl_cmd_tag(const char *input) | |||
2330 | else | 2336 | else |
2331 | list_add(&sock_tag_entry->list, | 2337 | list_add(&sock_tag_entry->list, |
2332 | &pqd_entry->sock_tag_list); | 2338 | &pqd_entry->sock_tag_list); |
2333 | spin_unlock_bh(&uid_tag_data_tree_lock); | ||
2334 | 2339 | ||
2335 | sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree); | 2340 | sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree); |
2336 | atomic64_inc(&qtu_events.sockets_tagged); | 2341 | atomic64_inc(&qtu_events.sockets_tagged); |
2337 | } | 2342 | } |
2343 | spin_unlock_bh(&uid_tag_data_tree_lock); | ||
2338 | spin_unlock_bh(&sock_tag_list_lock); | 2344 | spin_unlock_bh(&sock_tag_list_lock); |
2339 | /* We keep the ref to the sk until it is untagged */ | 2345 | /* We keep the ref to the sk until it is untagged */ |
2340 | CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->sk_refcnt=%d\n", | 2346 | CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->sk_refcnt=%d\n", |
@@ -2343,10 +2349,6 @@ static int ctrl_cmd_tag(const char *input) | |||
2343 | sockfd_put(el_socket); | 2349 | sockfd_put(el_socket); |
2344 | return 0; | 2350 | return 0; |
2345 | 2351 | ||
2346 | err_tag_unref_put: | ||
2347 | BUG_ON(tag_ref_entry->num_sock_tags <= 0); | ||
2348 | tag_ref_entry->num_sock_tags--; | ||
2349 | free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry); | ||
2350 | err_put: | 2352 | err_put: |
2351 | CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->sk_refcnt=%d\n", | 2353 | CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->sk_refcnt=%d\n", |
2352 | input, atomic_read(&el_socket->sk->sk_refcnt) - 1); | 2354 | input, atomic_read(&el_socket->sk->sk_refcnt) - 1); |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a87afc4f3c91..48e1608414e6 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table); | |||
96 | 96 | ||
97 | static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); | 97 | static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); |
98 | 98 | ||
99 | static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS]; | ||
100 | |||
101 | static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = { | ||
102 | "nlk_cb_mutex-ROUTE", | ||
103 | "nlk_cb_mutex-1", | ||
104 | "nlk_cb_mutex-USERSOCK", | ||
105 | "nlk_cb_mutex-FIREWALL", | ||
106 | "nlk_cb_mutex-SOCK_DIAG", | ||
107 | "nlk_cb_mutex-NFLOG", | ||
108 | "nlk_cb_mutex-XFRM", | ||
109 | "nlk_cb_mutex-SELINUX", | ||
110 | "nlk_cb_mutex-ISCSI", | ||
111 | "nlk_cb_mutex-AUDIT", | ||
112 | "nlk_cb_mutex-FIB_LOOKUP", | ||
113 | "nlk_cb_mutex-CONNECTOR", | ||
114 | "nlk_cb_mutex-NETFILTER", | ||
115 | "nlk_cb_mutex-IP6_FW", | ||
116 | "nlk_cb_mutex-DNRTMSG", | ||
117 | "nlk_cb_mutex-KOBJECT_UEVENT", | ||
118 | "nlk_cb_mutex-GENERIC", | ||
119 | "nlk_cb_mutex-17", | ||
120 | "nlk_cb_mutex-SCSITRANSPORT", | ||
121 | "nlk_cb_mutex-ECRYPTFS", | ||
122 | "nlk_cb_mutex-RDMA", | ||
123 | "nlk_cb_mutex-CRYPTO", | ||
124 | "nlk_cb_mutex-SMC", | ||
125 | "nlk_cb_mutex-23", | ||
126 | "nlk_cb_mutex-24", | ||
127 | "nlk_cb_mutex-25", | ||
128 | "nlk_cb_mutex-26", | ||
129 | "nlk_cb_mutex-27", | ||
130 | "nlk_cb_mutex-28", | ||
131 | "nlk_cb_mutex-29", | ||
132 | "nlk_cb_mutex-30", | ||
133 | "nlk_cb_mutex-31", | ||
134 | "nlk_cb_mutex-MAX_LINKS" | ||
135 | }; | ||
136 | |||
99 | static int netlink_dump(struct sock *sk); | 137 | static int netlink_dump(struct sock *sk); |
100 | static void netlink_skb_destructor(struct sk_buff *skb); | 138 | static void netlink_skb_destructor(struct sk_buff *skb); |
101 | 139 | ||
@@ -223,6 +261,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, | |||
223 | struct sock *sk = skb->sk; | 261 | struct sock *sk = skb->sk; |
224 | int ret = -ENOMEM; | 262 | int ret = -ENOMEM; |
225 | 263 | ||
264 | if (!net_eq(dev_net(dev), sock_net(sk))) | ||
265 | return 0; | ||
266 | |||
226 | dev_hold(dev); | 267 | dev_hold(dev); |
227 | 268 | ||
228 | if (is_vmalloc_addr(skb->head)) | 269 | if (is_vmalloc_addr(skb->head)) |
@@ -585,6 +626,9 @@ static int __netlink_create(struct net *net, struct socket *sock, | |||
585 | } else { | 626 | } else { |
586 | nlk->cb_mutex = &nlk->cb_def_mutex; | 627 | nlk->cb_mutex = &nlk->cb_def_mutex; |
587 | mutex_init(nlk->cb_mutex); | 628 | mutex_init(nlk->cb_mutex); |
629 | lockdep_set_class_and_name(nlk->cb_mutex, | ||
630 | nlk_cb_mutex_keys + protocol, | ||
631 | nlk_cb_mutex_key_strings[protocol]); | ||
588 | } | 632 | } |
589 | init_waitqueue_head(&nlk->wait); | 633 | init_waitqueue_head(&nlk->wait); |
590 | 634 | ||
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index d26b28def310..21e4d339217e 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -1672,14 +1672,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb) | |||
1672 | 1672 | ||
1673 | #define MAX_ACTIONS_BUFSIZE (32 * 1024) | 1673 | #define MAX_ACTIONS_BUFSIZE (32 * 1024) |
1674 | 1674 | ||
1675 | static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log) | 1675 | static struct sw_flow_actions *nla_alloc_flow_actions(int size) |
1676 | { | 1676 | { |
1677 | struct sw_flow_actions *sfa; | 1677 | struct sw_flow_actions *sfa; |
1678 | 1678 | ||
1679 | if (size > MAX_ACTIONS_BUFSIZE) { | 1679 | WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE); |
1680 | OVS_NLERR(log, "Flow action size %u bytes exceeds max", size); | ||
1681 | return ERR_PTR(-EINVAL); | ||
1682 | } | ||
1683 | 1680 | ||
1684 | sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); | 1681 | sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); |
1685 | if (!sfa) | 1682 | if (!sfa) |
@@ -1752,12 +1749,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, | |||
1752 | new_acts_size = ksize(*sfa) * 2; | 1749 | new_acts_size = ksize(*sfa) * 2; |
1753 | 1750 | ||
1754 | if (new_acts_size > MAX_ACTIONS_BUFSIZE) { | 1751 | if (new_acts_size > MAX_ACTIONS_BUFSIZE) { |
1755 | if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) | 1752 | if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { |
1753 | OVS_NLERR(log, "Flow action size exceeds max %u", | ||
1754 | MAX_ACTIONS_BUFSIZE); | ||
1756 | return ERR_PTR(-EMSGSIZE); | 1755 | return ERR_PTR(-EMSGSIZE); |
1756 | } | ||
1757 | new_acts_size = MAX_ACTIONS_BUFSIZE; | 1757 | new_acts_size = MAX_ACTIONS_BUFSIZE; |
1758 | } | 1758 | } |
1759 | 1759 | ||
1760 | acts = nla_alloc_flow_actions(new_acts_size, log); | 1760 | acts = nla_alloc_flow_actions(new_acts_size); |
1761 | if (IS_ERR(acts)) | 1761 | if (IS_ERR(acts)) |
1762 | return (void *)acts; | 1762 | return (void *)acts; |
1763 | 1763 | ||
@@ -2369,7 +2369,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, | |||
2369 | { | 2369 | { |
2370 | int err; | 2370 | int err; |
2371 | 2371 | ||
2372 | *sfa = nla_alloc_flow_actions(nla_len(attr), log); | 2372 | *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE)); |
2373 | if (IS_ERR(*sfa)) | 2373 | if (IS_ERR(*sfa)) |
2374 | return PTR_ERR(*sfa); | 2374 | return PTR_ERR(*sfa); |
2375 | 2375 | ||
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index bdf151c6307d..bdfc395d1be2 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -517,6 +517,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args) | |||
517 | 517 | ||
518 | local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; | 518 | local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; |
519 | 519 | ||
520 | if (args->nr_local == 0) | ||
521 | return -EINVAL; | ||
522 | |||
520 | /* figure out the number of pages in the vector */ | 523 | /* figure out the number of pages in the vector */ |
521 | for (i = 0; i < args->nr_local; i++) { | 524 | for (i = 0; i < args->nr_local; i++) { |
522 | if (copy_from_user(&vec, &local_vec[i], | 525 | if (copy_from_user(&vec, &local_vec[i], |
@@ -866,6 +869,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, | |||
866 | err: | 869 | err: |
867 | if (page) | 870 | if (page) |
868 | put_page(page); | 871 | put_page(page); |
872 | rm->atomic.op_active = 0; | ||
869 | kfree(rm->atomic.op_notifier); | 873 | kfree(rm->atomic.op_notifier); |
870 | 874 | ||
871 | return ret; | 875 | return ret; |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index d0dff0cd8186..cce4e6ada7fa 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -199,9 +199,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
199 | pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p); | 199 | pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p); |
200 | 200 | ||
201 | if (p->set_tc_index) { | 201 | if (p->set_tc_index) { |
202 | int wlen = skb_network_offset(skb); | ||
203 | |||
202 | switch (tc_skb_protocol(skb)) { | 204 | switch (tc_skb_protocol(skb)) { |
203 | case htons(ETH_P_IP): | 205 | case htons(ETH_P_IP): |
204 | if (skb_cow_head(skb, sizeof(struct iphdr))) | 206 | wlen += sizeof(struct iphdr); |
207 | if (!pskb_may_pull(skb, wlen) || | ||
208 | skb_try_make_writable(skb, wlen)) | ||
205 | goto drop; | 209 | goto drop; |
206 | 210 | ||
207 | skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) | 211 | skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) |
@@ -209,7 +213,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
209 | break; | 213 | break; |
210 | 214 | ||
211 | case htons(ETH_P_IPV6): | 215 | case htons(ETH_P_IPV6): |
212 | if (skb_cow_head(skb, sizeof(struct ipv6hdr))) | 216 | wlen += sizeof(struct ipv6hdr); |
217 | if (!pskb_may_pull(skb, wlen) || | ||
218 | skb_try_make_writable(skb, wlen)) | ||
213 | goto drop; | 219 | goto drop; |
214 | 220 | ||
215 | skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb)) | 221 | skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb)) |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 61189c576963..e9851198a850 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -83,7 +83,7 @@ | |||
83 | static int sctp_writeable(struct sock *sk); | 83 | static int sctp_writeable(struct sock *sk); |
84 | static void sctp_wfree(struct sk_buff *skb); | 84 | static void sctp_wfree(struct sk_buff *skb); |
85 | static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, | 85 | static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, |
86 | size_t msg_len, struct sock **orig_sk); | 86 | size_t msg_len); |
87 | static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); | 87 | static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); |
88 | static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); | 88 | static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); |
89 | static int sctp_wait_for_accept(struct sock *sk, long timeo); | 89 | static int sctp_wait_for_accept(struct sock *sk, long timeo); |
@@ -332,16 +332,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, | |||
332 | if (len < sizeof (struct sockaddr)) | 332 | if (len < sizeof (struct sockaddr)) |
333 | return NULL; | 333 | return NULL; |
334 | 334 | ||
335 | if (!opt->pf->af_supported(addr->sa.sa_family, opt)) | ||
336 | return NULL; | ||
337 | |||
335 | /* V4 mapped address are really of AF_INET family */ | 338 | /* V4 mapped address are really of AF_INET family */ |
336 | if (addr->sa.sa_family == AF_INET6 && | 339 | if (addr->sa.sa_family == AF_INET6 && |
337 | ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { | 340 | ipv6_addr_v4mapped(&addr->v6.sin6_addr) && |
338 | if (!opt->pf->af_supported(AF_INET, opt)) | 341 | !opt->pf->af_supported(AF_INET, opt)) |
339 | return NULL; | 342 | return NULL; |
340 | } else { | ||
341 | /* Does this PF support this AF? */ | ||
342 | if (!opt->pf->af_supported(addr->sa.sa_family, opt)) | ||
343 | return NULL; | ||
344 | } | ||
345 | 343 | ||
346 | /* If we get this far, af is valid. */ | 344 | /* If we get this far, af is valid. */ |
347 | af = sctp_get_af_specific(addr->sa.sa_family); | 345 | af = sctp_get_af_specific(addr->sa.sa_family); |
@@ -1954,7 +1952,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
1954 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | 1952 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); |
1955 | if (!sctp_wspace(asoc)) { | 1953 | if (!sctp_wspace(asoc)) { |
1956 | /* sk can be changed by peel off when waiting for buf. */ | 1954 | /* sk can be changed by peel off when waiting for buf. */ |
1957 | err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk); | 1955 | err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); |
1958 | if (err) { | 1956 | if (err) { |
1959 | if (err == -ESRCH) { | 1957 | if (err == -ESRCH) { |
1960 | /* asoc is already dead. */ | 1958 | /* asoc is already dead. */ |
@@ -4153,7 +4151,7 @@ static int sctp_init_sock(struct sock *sk) | |||
4153 | SCTP_DBG_OBJCNT_INC(sock); | 4151 | SCTP_DBG_OBJCNT_INC(sock); |
4154 | 4152 | ||
4155 | local_bh_disable(); | 4153 | local_bh_disable(); |
4156 | percpu_counter_inc(&sctp_sockets_allocated); | 4154 | sk_sockets_allocated_inc(sk); |
4157 | sock_prot_inuse_add(net, sk->sk_prot, 1); | 4155 | sock_prot_inuse_add(net, sk->sk_prot, 1); |
4158 | 4156 | ||
4159 | /* Nothing can fail after this block, otherwise | 4157 | /* Nothing can fail after this block, otherwise |
@@ -4197,7 +4195,7 @@ static void sctp_destroy_sock(struct sock *sk) | |||
4197 | } | 4195 | } |
4198 | sctp_endpoint_free(sp->ep); | 4196 | sctp_endpoint_free(sp->ep); |
4199 | local_bh_disable(); | 4197 | local_bh_disable(); |
4200 | percpu_counter_dec(&sctp_sockets_allocated); | 4198 | sk_sockets_allocated_dec(sk); |
4201 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 4199 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
4202 | local_bh_enable(); | 4200 | local_bh_enable(); |
4203 | } | 4201 | } |
@@ -6976,12 +6974,12 @@ void sctp_sock_rfree(struct sk_buff *skb) | |||
6976 | 6974 | ||
6977 | /* Helper function to wait for space in the sndbuf. */ | 6975 | /* Helper function to wait for space in the sndbuf. */ |
6978 | static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, | 6976 | static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, |
6979 | size_t msg_len, struct sock **orig_sk) | 6977 | size_t msg_len) |
6980 | { | 6978 | { |
6981 | struct sock *sk = asoc->base.sk; | 6979 | struct sock *sk = asoc->base.sk; |
6982 | int err = 0; | ||
6983 | long current_timeo = *timeo_p; | 6980 | long current_timeo = *timeo_p; |
6984 | DEFINE_WAIT(wait); | 6981 | DEFINE_WAIT(wait); |
6982 | int err = 0; | ||
6985 | 6983 | ||
6986 | pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, | 6984 | pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, |
6987 | *timeo_p, msg_len); | 6985 | *timeo_p, msg_len); |
@@ -7010,17 +7008,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, | |||
7010 | release_sock(sk); | 7008 | release_sock(sk); |
7011 | current_timeo = schedule_timeout(current_timeo); | 7009 | current_timeo = schedule_timeout(current_timeo); |
7012 | lock_sock(sk); | 7010 | lock_sock(sk); |
7013 | if (sk != asoc->base.sk) { | 7011 | if (sk != asoc->base.sk) |
7014 | release_sock(sk); | 7012 | goto do_error; |
7015 | sk = asoc->base.sk; | ||
7016 | lock_sock(sk); | ||
7017 | } | ||
7018 | 7013 | ||
7019 | *timeo_p = current_timeo; | 7014 | *timeo_p = current_timeo; |
7020 | } | 7015 | } |
7021 | 7016 | ||
7022 | out: | 7017 | out: |
7023 | *orig_sk = sk; | ||
7024 | finish_wait(&asoc->wait, &wait); | 7018 | finish_wait(&asoc->wait, &wait); |
7025 | 7019 | ||
7026 | /* Release the association's refcnt. */ | 7020 | /* Release the association's refcnt. */ |
diff --git a/net/socket.c b/net/socket.c index 7a05647ce4ce..9684fd34b013 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -2548,6 +2548,15 @@ out_fs: | |||
2548 | 2548 | ||
2549 | core_initcall(sock_init); /* early initcall */ | 2549 | core_initcall(sock_init); /* early initcall */ |
2550 | 2550 | ||
2551 | static int __init jit_init(void) | ||
2552 | { | ||
2553 | #ifdef CONFIG_BPF_JIT_ALWAYS_ON | ||
2554 | bpf_jit_enable = 1; | ||
2555 | #endif | ||
2556 | return 0; | ||
2557 | } | ||
2558 | pure_initcall(jit_init); | ||
2559 | |||
2551 | #ifdef CONFIG_PROC_FS | 2560 | #ifdef CONFIG_PROC_FS |
2552 | void socket_seq_show(struct seq_file *seq) | 2561 | void socket_seq_show(struct seq_file *seq) |
2553 | { | 2562 | { |
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c index 2410d557ae39..89731c9023f0 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c | |||
@@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr, | |||
231 | goto out_free_groups; | 231 | goto out_free_groups; |
232 | GROUP_AT(creds->cr_group_info, i) = kgid; | 232 | GROUP_AT(creds->cr_group_info, i) = kgid; |
233 | } | 233 | } |
234 | groups_sort(creds->cr_group_info); | ||
234 | 235 | ||
235 | return 0; | 236 | return 0; |
236 | out_free_groups: | 237 | out_free_groups: |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 033fec307528..036bbf2b44c1 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd, | |||
481 | goto out; | 481 | goto out; |
482 | GROUP_AT(rsci.cred.cr_group_info, i) = kgid; | 482 | GROUP_AT(rsci.cred.cr_group_info, i) = kgid; |
483 | } | 483 | } |
484 | groups_sort(rsci.cred.cr_group_info); | ||
484 | 485 | ||
485 | /* mech name */ | 486 | /* mech name */ |
486 | len = qword_get(&mesg, buf, mlen); | 487 | len = qword_get(&mesg, buf, mlen); |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 621ca7b4a155..98db1715cb17 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd, | |||
520 | GROUP_AT(ug.gi, i) = kgid; | 520 | GROUP_AT(ug.gi, i) = kgid; |
521 | } | 521 | } |
522 | 522 | ||
523 | groups_sort(ug.gi); | ||
523 | ugp = unix_gid_lookup(cd, uid); | 524 | ugp = unix_gid_lookup(cd, uid); |
524 | if (ugp) { | 525 | if (ugp) { |
525 | struct cache_head *ch; | 526 | struct cache_head *ch; |
@@ -827,6 +828,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
827 | kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); | 828 | kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); |
828 | GROUP_AT(cred->cr_group_info, i) = kgid; | 829 | GROUP_AT(cred->cr_group_info, i) = kgid; |
829 | } | 830 | } |
831 | groups_sort(cred->cr_group_info); | ||
830 | if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { | 832 | if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { |
831 | *authp = rpc_autherr_badverf; | 833 | *authp = rpc_autherr_badverf; |
832 | return SVC_DENIED; | 834 | return SVC_DENIED; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 27b6f55fa43a..728d65fbab0c 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2360,6 +2360,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
2360 | case -ECONNREFUSED: | 2360 | case -ECONNREFUSED: |
2361 | case -ECONNRESET: | 2361 | case -ECONNRESET: |
2362 | case -ENETUNREACH: | 2362 | case -ENETUNREACH: |
2363 | case -EHOSTUNREACH: | ||
2363 | case -EADDRINUSE: | 2364 | case -EADDRINUSE: |
2364 | case -ENOBUFS: | 2365 | case -ENOBUFS: |
2365 | /* retry with existing socket, after a delay */ | 2366 | /* retry with existing socket, after a delay */ |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 01df30af4d4a..18209917e379 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -158,7 +158,8 @@ cmd_cc_i_c = $(CPP) $(c_flags) -o $@ $< | |||
158 | $(obj)/%.i: $(src)/%.c FORCE | 158 | $(obj)/%.i: $(src)/%.c FORCE |
159 | $(call if_changed_dep,cc_i_c) | 159 | $(call if_changed_dep,cc_i_c) |
160 | 160 | ||
161 | cmd_gensymtypes = \ | 161 | # These mirror gensymtypes_S and co below, keep them in synch. |
162 | cmd_gensymtypes_c = \ | ||
162 | $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ | 163 | $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ |
163 | $(GENKSYMS) $(if $(1), -T $(2)) \ | 164 | $(GENKSYMS) $(if $(1), -T $(2)) \ |
164 | $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ | 165 | $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ |
@@ -168,7 +169,7 @@ cmd_gensymtypes = \ | |||
168 | quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ | 169 | quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ |
169 | cmd_cc_symtypes_c = \ | 170 | cmd_cc_symtypes_c = \ |
170 | set -e; \ | 171 | set -e; \ |
171 | $(call cmd_gensymtypes,true,$@) >/dev/null; \ | 172 | $(call cmd_gensymtypes_c,true,$@) >/dev/null; \ |
172 | test -s $@ || rm -f $@ | 173 | test -s $@ || rm -f $@ |
173 | 174 | ||
174 | $(obj)/%.symtypes : $(src)/%.c FORCE | 175 | $(obj)/%.symtypes : $(src)/%.c FORCE |
@@ -197,9 +198,10 @@ else | |||
197 | # the actual value of the checksum generated by genksyms | 198 | # the actual value of the checksum generated by genksyms |
198 | 199 | ||
199 | cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< | 200 | cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< |
200 | cmd_modversions = \ | 201 | |
202 | cmd_modversions_c = \ | ||
201 | if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ | 203 | if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ |
202 | $(call cmd_gensymtypes,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ | 204 | $(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ |
203 | > $(@D)/.tmp_$(@F:.o=.ver); \ | 205 | > $(@D)/.tmp_$(@F:.o=.ver); \ |
204 | \ | 206 | \ |
205 | $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ | 207 | $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ |
@@ -244,7 +246,7 @@ endif | |||
244 | define rule_cc_o_c | 246 | define rule_cc_o_c |
245 | $(call echo-cmd,checksrc) $(cmd_checksrc) \ | 247 | $(call echo-cmd,checksrc) $(cmd_checksrc) \ |
246 | $(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \ | 248 | $(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \ |
247 | $(cmd_modversions) \ | 249 | $(cmd_modversions_c) \ |
248 | $(call echo-cmd,record_mcount) \ | 250 | $(call echo-cmd,record_mcount) \ |
249 | $(cmd_record_mcount) \ | 251 | $(cmd_record_mcount) \ |
250 | scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \ | 252 | scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \ |
@@ -253,6 +255,15 @@ define rule_cc_o_c | |||
253 | mv -f $(dot-target).tmp $(dot-target).cmd | 255 | mv -f $(dot-target).tmp $(dot-target).cmd |
254 | endef | 256 | endef |
255 | 257 | ||
258 | define rule_as_o_S | ||
259 | $(call echo-cmd,as_o_S) $(cmd_as_o_S); \ | ||
260 | scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,as_o_S)' > \ | ||
261 | $(dot-target).tmp; \ | ||
262 | $(cmd_modversions_S) \ | ||
263 | rm -f $(depfile); \ | ||
264 | mv -f $(dot-target).tmp $(dot-target).cmd | ||
265 | endef | ||
266 | |||
256 | # Built-in and composite module parts | 267 | # Built-in and composite module parts |
257 | $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE | 268 | $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE |
258 | $(call cmd,force_checksrc) | 269 | $(call cmd,force_checksrc) |
@@ -281,6 +292,38 @@ modkern_aflags := $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL) | |||
281 | $(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) | 292 | $(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) |
282 | $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) | 293 | $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) |
283 | 294 | ||
295 | # .S file exports must have their C prototypes defined in asm/asm-prototypes.h | ||
296 | # or a file that it includes, in order to get versioned symbols. We build a | ||
297 | # dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from | ||
298 | # the .S file (with trailing ';'), and run genksyms on that, to extract vers. | ||
299 | # | ||
300 | # This is convoluted. The .S file must first be preprocessed to run guards and | ||
301 | # expand names, then the resulting exports must be constructed into plain | ||
302 | # EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed | ||
303 | # to make the genksyms input. | ||
304 | # | ||
305 | # These mirror gensymtypes_c and co above, keep them in synch. | ||
306 | cmd_gensymtypes_S = \ | ||
307 | (echo "\#include <linux/kernel.h>" ; \ | ||
308 | echo "\#include <asm/asm-prototypes.h>" ; \ | ||
309 | $(CPP) $(a_flags) $< | \ | ||
310 | grep "\<___EXPORT_SYMBOL\>" | \ | ||
311 | sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \ | ||
312 | $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ | ||
313 | $(GENKSYMS) $(if $(1), -T $(2)) \ | ||
314 | $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ | ||
315 | $(if $(KBUILD_PRESERVE),-p) \ | ||
316 | -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) | ||
317 | |||
318 | quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@ | ||
319 | cmd_cc_symtypes_S = \ | ||
320 | set -e; \ | ||
321 | $(call cmd_gensymtypes_S,true,$@) >/dev/null; \ | ||
322 | test -s $@ || rm -f $@ | ||
323 | |||
324 | $(obj)/%.symtypes : $(src)/%.S FORCE | ||
325 | $(call cmd,cc_symtypes_S) | ||
326 | |||
284 | quiet_cmd_as_s_S = CPP $(quiet_modtag) $@ | 327 | quiet_cmd_as_s_S = CPP $(quiet_modtag) $@ |
285 | cmd_as_s_S = $(CPP) $(a_flags) -o $@ $< | 328 | cmd_as_s_S = $(CPP) $(a_flags) -o $@ $< |
286 | 329 | ||
@@ -288,10 +331,40 @@ $(obj)/%.s: $(src)/%.S FORCE | |||
288 | $(call if_changed_dep,as_s_S) | 331 | $(call if_changed_dep,as_s_S) |
289 | 332 | ||
290 | quiet_cmd_as_o_S = AS $(quiet_modtag) $@ | 333 | quiet_cmd_as_o_S = AS $(quiet_modtag) $@ |
291 | cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< | 334 | |
335 | ifndef CONFIG_MODVERSIONS | ||
336 | cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< | ||
337 | |||
338 | else | ||
339 | |||
340 | ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h) | ||
341 | |||
342 | ifeq ($(ASM_PROTOTYPES),) | ||
343 | cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< | ||
344 | |||
345 | else | ||
346 | |||
347 | # versioning matches the C process described above, with difference that | ||
348 | # we parse asm-prototypes.h C header to get function definitions. | ||
349 | |||
350 | cmd_as_o_S = $(CC) $(a_flags) -c -o $(@D)/.tmp_$(@F) $< | ||
351 | |||
352 | cmd_modversions_S = \ | ||
353 | if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ | ||
354 | $(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ | ||
355 | > $(@D)/.tmp_$(@F:.o=.ver); \ | ||
356 | \ | ||
357 | $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ | ||
358 | -T $(@D)/.tmp_$(@F:.o=.ver); \ | ||
359 | rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \ | ||
360 | else \ | ||
361 | mv -f $(@D)/.tmp_$(@F) $@; \ | ||
362 | fi; | ||
363 | endif | ||
364 | endif | ||
292 | 365 | ||
293 | $(obj)/%.o: $(src)/%.S FORCE | 366 | $(obj)/%.o: $(src)/%.S FORCE |
294 | $(call if_changed_dep,as_o_S) | 367 | $(call if_changed_rule,as_o_S) |
295 | 368 | ||
296 | targets += $(real-objs-y) $(real-objs-m) $(lib-y) | 369 | targets += $(real-objs-y) $(real-objs-m) $(lib-y) |
297 | targets += $(extra-y) $(MAKECMDGOALS) $(always) | 370 | targets += $(extra-y) $(MAKECMDGOALS) $(always) |
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c index 88632df4381b..dafaf96e0a34 100644 --- a/scripts/genksyms/genksyms.c +++ b/scripts/genksyms/genksyms.c | |||
@@ -423,13 +423,15 @@ static struct string_list *read_node(FILE *f) | |||
423 | struct string_list node = { | 423 | struct string_list node = { |
424 | .string = buffer, | 424 | .string = buffer, |
425 | .tag = SYM_NORMAL }; | 425 | .tag = SYM_NORMAL }; |
426 | int c; | 426 | int c, in_string = 0; |
427 | 427 | ||
428 | while ((c = fgetc(f)) != EOF) { | 428 | while ((c = fgetc(f)) != EOF) { |
429 | if (c == ' ') { | 429 | if (!in_string && c == ' ') { |
430 | if (node.string == buffer) | 430 | if (node.string == buffer) |
431 | continue; | 431 | continue; |
432 | break; | 432 | break; |
433 | } else if (c == '"') { | ||
434 | in_string = !in_string; | ||
433 | } else if (c == '\n') { | 435 | } else if (c == '\n') { |
434 | if (node.string == buffer) | 436 | if (node.string == buffer) |
435 | return NULL; | 437 | return NULL; |
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index e080746e1a6b..48958d3cec9e 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c | |||
@@ -594,7 +594,8 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname) | |||
594 | if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 || | 594 | if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 || |
595 | strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 || | 595 | strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 || |
596 | strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || | 596 | strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || |
597 | strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0) | 597 | strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0 || |
598 | strcmp(symname, ".TOC.") == 0) | ||
598 | return 1; | 599 | return 1; |
599 | /* Do not ignore this symbol */ | 600 | /* Do not ignore this symbol */ |
600 | return 0; | 601 | return 0; |
diff --git a/security/Kconfig b/security/Kconfig index 3aa60791f84d..7bcb805f36a4 100644 --- a/security/Kconfig +++ b/security/Kconfig | |||
@@ -40,6 +40,16 @@ config SECURITY | |||
40 | 40 | ||
41 | If you are unsure how to answer this question, answer N. | 41 | If you are unsure how to answer this question, answer N. |
42 | 42 | ||
43 | config PAGE_TABLE_ISOLATION | ||
44 | bool "Remove the kernel mapping in user mode" | ||
45 | default y | ||
46 | depends on X86_64 && SMP | ||
47 | help | ||
48 | This enforces a strict kernel and user space isolation, in order | ||
49 | to close hardware side channels on kernel address information. | ||
50 | |||
51 | If you are unsure how to answer this question, answer Y. | ||
52 | |||
43 | config SECURITYFS | 53 | config SECURITYFS |
44 | bool "Enable the securityfs filesystem" | 54 | bool "Enable the securityfs filesystem" |
45 | help | 55 | help |
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index ce295c0c1da0..e44e844c8ec4 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c | |||
@@ -141,23 +141,22 @@ static int valid_ecryptfs_desc(const char *ecryptfs_desc) | |||
141 | */ | 141 | */ |
142 | static int valid_master_desc(const char *new_desc, const char *orig_desc) | 142 | static int valid_master_desc(const char *new_desc, const char *orig_desc) |
143 | { | 143 | { |
144 | if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { | 144 | int prefix_len; |
145 | if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN) | 145 | |
146 | goto out; | 146 | if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) |
147 | if (orig_desc) | 147 | prefix_len = KEY_TRUSTED_PREFIX_LEN; |
148 | if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN)) | 148 | else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) |
149 | goto out; | 149 | prefix_len = KEY_USER_PREFIX_LEN; |
150 | } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) { | 150 | else |
151 | if (strlen(new_desc) == KEY_USER_PREFIX_LEN) | 151 | return -EINVAL; |
152 | goto out; | 152 | |
153 | if (orig_desc) | 153 | if (!new_desc[prefix_len]) |
154 | if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN)) | 154 | return -EINVAL; |
155 | goto out; | 155 | |
156 | } else | 156 | if (orig_desc && strncmp(new_desc, orig_desc, prefix_len)) |
157 | goto out; | 157 | return -EINVAL; |
158 | |||
158 | return 0; | 159 | return 0; |
159 | out: | ||
160 | return -EINVAL; | ||
161 | } | 160 | } |
162 | 161 | ||
163 | /* | 162 | /* |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 34427384605d..be68992a28cb 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -4066,6 +4066,8 @@ static int sock_has_perm(struct task_struct *task, struct sock *sk, u32 perms) | |||
4066 | struct lsm_network_audit net = {0,}; | 4066 | struct lsm_network_audit net = {0,}; |
4067 | u32 tsid = task_sid(task); | 4067 | u32 tsid = task_sid(task); |
4068 | 4068 | ||
4069 | if (!sksec) | ||
4070 | return -EFAULT; | ||
4069 | if (sksec->sid == SECINITSID_KERNEL) | 4071 | if (sksec->sid == SECINITSID_KERNEL) |
4070 | return 0; | 4072 | return 0; |
4071 | 4073 | ||
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 33e72c809e50..494b7b533366 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c | |||
@@ -465,7 +465,6 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm, | |||
465 | v = snd_pcm_hw_param_last(pcm, params, var, dir); | 465 | v = snd_pcm_hw_param_last(pcm, params, var, dir); |
466 | else | 466 | else |
467 | v = snd_pcm_hw_param_first(pcm, params, var, dir); | 467 | v = snd_pcm_hw_param_first(pcm, params, var, dir); |
468 | snd_BUG_ON(v < 0); | ||
469 | return v; | 468 | return v; |
470 | } | 469 | } |
471 | 470 | ||
@@ -1370,8 +1369,11 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha | |||
1370 | 1369 | ||
1371 | if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) | 1370 | if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) |
1372 | return tmp; | 1371 | return tmp; |
1373 | mutex_lock(&runtime->oss.params_lock); | ||
1374 | while (bytes > 0) { | 1372 | while (bytes > 0) { |
1373 | if (mutex_lock_interruptible(&runtime->oss.params_lock)) { | ||
1374 | tmp = -ERESTARTSYS; | ||
1375 | break; | ||
1376 | } | ||
1375 | if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { | 1377 | if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { |
1376 | tmp = bytes; | 1378 | tmp = bytes; |
1377 | if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes) | 1379 | if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes) |
@@ -1415,14 +1417,18 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha | |||
1415 | xfer += tmp; | 1417 | xfer += tmp; |
1416 | if ((substream->f_flags & O_NONBLOCK) != 0 && | 1418 | if ((substream->f_flags & O_NONBLOCK) != 0 && |
1417 | tmp != runtime->oss.period_bytes) | 1419 | tmp != runtime->oss.period_bytes) |
1418 | break; | 1420 | tmp = -EAGAIN; |
1419 | } | 1421 | } |
1420 | } | ||
1421 | mutex_unlock(&runtime->oss.params_lock); | ||
1422 | return xfer; | ||
1423 | |||
1424 | err: | 1422 | err: |
1425 | mutex_unlock(&runtime->oss.params_lock); | 1423 | mutex_unlock(&runtime->oss.params_lock); |
1424 | if (tmp < 0) | ||
1425 | break; | ||
1426 | if (signal_pending(current)) { | ||
1427 | tmp = -ERESTARTSYS; | ||
1428 | break; | ||
1429 | } | ||
1430 | tmp = 0; | ||
1431 | } | ||
1426 | return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; | 1432 | return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; |
1427 | } | 1433 | } |
1428 | 1434 | ||
@@ -1470,8 +1476,11 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use | |||
1470 | 1476 | ||
1471 | if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) | 1477 | if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) |
1472 | return tmp; | 1478 | return tmp; |
1473 | mutex_lock(&runtime->oss.params_lock); | ||
1474 | while (bytes > 0) { | 1479 | while (bytes > 0) { |
1480 | if (mutex_lock_interruptible(&runtime->oss.params_lock)) { | ||
1481 | tmp = -ERESTARTSYS; | ||
1482 | break; | ||
1483 | } | ||
1475 | if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { | 1484 | if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { |
1476 | if (runtime->oss.buffer_used == 0) { | 1485 | if (runtime->oss.buffer_used == 0) { |
1477 | tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1); | 1486 | tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1); |
@@ -1502,12 +1511,16 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use | |||
1502 | bytes -= tmp; | 1511 | bytes -= tmp; |
1503 | xfer += tmp; | 1512 | xfer += tmp; |
1504 | } | 1513 | } |
1505 | } | ||
1506 | mutex_unlock(&runtime->oss.params_lock); | ||
1507 | return xfer; | ||
1508 | |||
1509 | err: | 1514 | err: |
1510 | mutex_unlock(&runtime->oss.params_lock); | 1515 | mutex_unlock(&runtime->oss.params_lock); |
1516 | if (tmp < 0) | ||
1517 | break; | ||
1518 | if (signal_pending(current)) { | ||
1519 | tmp = -ERESTARTSYS; | ||
1520 | break; | ||
1521 | } | ||
1522 | tmp = 0; | ||
1523 | } | ||
1511 | return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; | 1524 | return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; |
1512 | } | 1525 | } |
1513 | 1526 | ||
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c index 727ac44d39f4..a84a1d3d23e5 100644 --- a/sound/core/oss/pcm_plugin.c +++ b/sound/core/oss/pcm_plugin.c | |||
@@ -591,18 +591,26 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st | |||
591 | snd_pcm_sframes_t frames = size; | 591 | snd_pcm_sframes_t frames = size; |
592 | 592 | ||
593 | plugin = snd_pcm_plug_first(plug); | 593 | plugin = snd_pcm_plug_first(plug); |
594 | while (plugin && frames > 0) { | 594 | while (plugin) { |
595 | if (frames <= 0) | ||
596 | return frames; | ||
595 | if ((next = plugin->next) != NULL) { | 597 | if ((next = plugin->next) != NULL) { |
596 | snd_pcm_sframes_t frames1 = frames; | 598 | snd_pcm_sframes_t frames1 = frames; |
597 | if (plugin->dst_frames) | 599 | if (plugin->dst_frames) { |
598 | frames1 = plugin->dst_frames(plugin, frames); | 600 | frames1 = plugin->dst_frames(plugin, frames); |
601 | if (frames1 <= 0) | ||
602 | return frames1; | ||
603 | } | ||
599 | if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) { | 604 | if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) { |
600 | return err; | 605 | return err; |
601 | } | 606 | } |
602 | if (err != frames1) { | 607 | if (err != frames1) { |
603 | frames = err; | 608 | frames = err; |
604 | if (plugin->src_frames) | 609 | if (plugin->src_frames) { |
605 | frames = plugin->src_frames(plugin, frames1); | 610 | frames = plugin->src_frames(plugin, frames1); |
611 | if (frames <= 0) | ||
612 | return frames; | ||
613 | } | ||
606 | } | 614 | } |
607 | } else | 615 | } else |
608 | dst_channels = NULL; | 616 | dst_channels = NULL; |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index cd20f91326fe..4c145d6bccd4 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -578,7 +578,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b, | |||
578 | { | 578 | { |
579 | u_int64_t n = (u_int64_t) a * b; | 579 | u_int64_t n = (u_int64_t) a * b; |
580 | if (c == 0) { | 580 | if (c == 0) { |
581 | snd_BUG_ON(!n); | ||
582 | *r = 0; | 581 | *r = 0; |
583 | return UINT_MAX; | 582 | return UINT_MAX; |
584 | } | 583 | } |
@@ -1664,7 +1663,7 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, | |||
1664 | return changed; | 1663 | return changed; |
1665 | if (params->rmask) { | 1664 | if (params->rmask) { |
1666 | int err = snd_pcm_hw_refine(pcm, params); | 1665 | int err = snd_pcm_hw_refine(pcm, params); |
1667 | if (snd_BUG_ON(err < 0)) | 1666 | if (err < 0) |
1668 | return err; | 1667 | return err; |
1669 | } | 1668 | } |
1670 | return snd_pcm_hw_param_value(params, var, dir); | 1669 | return snd_pcm_hw_param_value(params, var, dir); |
@@ -1711,7 +1710,7 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, | |||
1711 | return changed; | 1710 | return changed; |
1712 | if (params->rmask) { | 1711 | if (params->rmask) { |
1713 | int err = snd_pcm_hw_refine(pcm, params); | 1712 | int err = snd_pcm_hw_refine(pcm, params); |
1714 | if (snd_BUG_ON(err < 0)) | 1713 | if (err < 0) |
1715 | return err; | 1714 | return err; |
1716 | } | 1715 | } |
1717 | return snd_pcm_hw_param_value(params, var, dir); | 1716 | return snd_pcm_hw_param_value(params, var, dir); |
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index b450a27588c8..16f8124b1150 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c | |||
@@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream, | |||
579 | return 0; | 579 | return 0; |
580 | } | 580 | } |
581 | 581 | ||
582 | int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info) | 582 | static int __snd_rawmidi_info_select(struct snd_card *card, |
583 | struct snd_rawmidi_info *info) | ||
583 | { | 584 | { |
584 | struct snd_rawmidi *rmidi; | 585 | struct snd_rawmidi *rmidi; |
585 | struct snd_rawmidi_str *pstr; | 586 | struct snd_rawmidi_str *pstr; |
586 | struct snd_rawmidi_substream *substream; | 587 | struct snd_rawmidi_substream *substream; |
587 | 588 | ||
588 | mutex_lock(®ister_mutex); | ||
589 | rmidi = snd_rawmidi_search(card, info->device); | 589 | rmidi = snd_rawmidi_search(card, info->device); |
590 | mutex_unlock(®ister_mutex); | ||
591 | if (!rmidi) | 590 | if (!rmidi) |
592 | return -ENXIO; | 591 | return -ENXIO; |
593 | if (info->stream < 0 || info->stream > 1) | 592 | if (info->stream < 0 || info->stream > 1) |
@@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info | |||
603 | } | 602 | } |
604 | return -ENXIO; | 603 | return -ENXIO; |
605 | } | 604 | } |
605 | |||
606 | int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info) | ||
607 | { | ||
608 | int ret; | ||
609 | |||
610 | mutex_lock(®ister_mutex); | ||
611 | ret = __snd_rawmidi_info_select(card, info); | ||
612 | mutex_unlock(®ister_mutex); | ||
613 | return ret; | ||
614 | } | ||
606 | EXPORT_SYMBOL(snd_rawmidi_info_select); | 615 | EXPORT_SYMBOL(snd_rawmidi_info_select); |
607 | 616 | ||
608 | static int snd_rawmidi_info_select_user(struct snd_card *card, | 617 | static int snd_rawmidi_info_select_user(struct snd_card *card, |
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index b36de76f24e2..167b943469ab 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c | |||
@@ -236,6 +236,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize) | |||
236 | rwlock_init(&client->ports_lock); | 236 | rwlock_init(&client->ports_lock); |
237 | mutex_init(&client->ports_mutex); | 237 | mutex_init(&client->ports_mutex); |
238 | INIT_LIST_HEAD(&client->ports_list_head); | 238 | INIT_LIST_HEAD(&client->ports_list_head); |
239 | mutex_init(&client->ioctl_mutex); | ||
239 | 240 | ||
240 | /* find free slot in the client table */ | 241 | /* find free slot in the client table */ |
241 | spin_lock_irqsave(&clients_lock, flags); | 242 | spin_lock_irqsave(&clients_lock, flags); |
@@ -1011,7 +1012,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, | |||
1011 | { | 1012 | { |
1012 | struct snd_seq_client *client = file->private_data; | 1013 | struct snd_seq_client *client = file->private_data; |
1013 | int written = 0, len; | 1014 | int written = 0, len; |
1014 | int err = -EINVAL; | 1015 | int err; |
1015 | struct snd_seq_event event; | 1016 | struct snd_seq_event event; |
1016 | 1017 | ||
1017 | if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) | 1018 | if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) |
@@ -1026,11 +1027,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, | |||
1026 | 1027 | ||
1027 | /* allocate the pool now if the pool is not allocated yet */ | 1028 | /* allocate the pool now if the pool is not allocated yet */ |
1028 | if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { | 1029 | if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { |
1029 | if (snd_seq_pool_init(client->pool) < 0) | 1030 | mutex_lock(&client->ioctl_mutex); |
1031 | err = snd_seq_pool_init(client->pool); | ||
1032 | mutex_unlock(&client->ioctl_mutex); | ||
1033 | if (err < 0) | ||
1030 | return -ENOMEM; | 1034 | return -ENOMEM; |
1031 | } | 1035 | } |
1032 | 1036 | ||
1033 | /* only process whole events */ | 1037 | /* only process whole events */ |
1038 | err = -EINVAL; | ||
1034 | while (count >= sizeof(struct snd_seq_event)) { | 1039 | while (count >= sizeof(struct snd_seq_event)) { |
1035 | /* Read in the event header from the user */ | 1040 | /* Read in the event header from the user */ |
1036 | len = sizeof(event); | 1041 | len = sizeof(event); |
@@ -2220,11 +2225,15 @@ static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd, | |||
2220 | static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 2225 | static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
2221 | { | 2226 | { |
2222 | struct snd_seq_client *client = file->private_data; | 2227 | struct snd_seq_client *client = file->private_data; |
2228 | long ret; | ||
2223 | 2229 | ||
2224 | if (snd_BUG_ON(!client)) | 2230 | if (snd_BUG_ON(!client)) |
2225 | return -ENXIO; | 2231 | return -ENXIO; |
2226 | 2232 | ||
2227 | return snd_seq_do_ioctl(client, cmd, (void __user *) arg); | 2233 | mutex_lock(&client->ioctl_mutex); |
2234 | ret = snd_seq_do_ioctl(client, cmd, (void __user *) arg); | ||
2235 | mutex_unlock(&client->ioctl_mutex); | ||
2236 | return ret; | ||
2228 | } | 2237 | } |
2229 | 2238 | ||
2230 | #ifdef CONFIG_COMPAT | 2239 | #ifdef CONFIG_COMPAT |
diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h index 20f0a725ec7d..91f8f165bfdc 100644 --- a/sound/core/seq/seq_clientmgr.h +++ b/sound/core/seq/seq_clientmgr.h | |||
@@ -59,6 +59,7 @@ struct snd_seq_client { | |||
59 | struct list_head ports_list_head; | 59 | struct list_head ports_list_head; |
60 | rwlock_t ports_lock; | 60 | rwlock_t ports_lock; |
61 | struct mutex ports_mutex; | 61 | struct mutex ports_mutex; |
62 | struct mutex ioctl_mutex; | ||
62 | int convert32; /* convert 32->64bit */ | 63 | int convert32; /* convert 32->64bit */ |
63 | 64 | ||
64 | /* output pool */ | 65 | /* output pool */ |
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 54f348a4fb78..cbd20cb8ca11 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <sound/core.h> | 39 | #include <sound/core.h> |
40 | #include <sound/control.h> | 40 | #include <sound/control.h> |
41 | #include <sound/pcm.h> | 41 | #include <sound/pcm.h> |
42 | #include <sound/pcm_params.h> | ||
42 | #include <sound/info.h> | 43 | #include <sound/info.h> |
43 | #include <sound/initval.h> | 44 | #include <sound/initval.h> |
44 | 45 | ||
@@ -305,19 +306,6 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd) | |||
305 | return 0; | 306 | return 0; |
306 | } | 307 | } |
307 | 308 | ||
308 | static void params_change_substream(struct loopback_pcm *dpcm, | ||
309 | struct snd_pcm_runtime *runtime) | ||
310 | { | ||
311 | struct snd_pcm_runtime *dst_runtime; | ||
312 | |||
313 | if (dpcm == NULL || dpcm->substream == NULL) | ||
314 | return; | ||
315 | dst_runtime = dpcm->substream->runtime; | ||
316 | if (dst_runtime == NULL) | ||
317 | return; | ||
318 | dst_runtime->hw = dpcm->cable->hw; | ||
319 | } | ||
320 | |||
321 | static void params_change(struct snd_pcm_substream *substream) | 309 | static void params_change(struct snd_pcm_substream *substream) |
322 | { | 310 | { |
323 | struct snd_pcm_runtime *runtime = substream->runtime; | 311 | struct snd_pcm_runtime *runtime = substream->runtime; |
@@ -329,10 +317,6 @@ static void params_change(struct snd_pcm_substream *substream) | |||
329 | cable->hw.rate_max = runtime->rate; | 317 | cable->hw.rate_max = runtime->rate; |
330 | cable->hw.channels_min = runtime->channels; | 318 | cable->hw.channels_min = runtime->channels; |
331 | cable->hw.channels_max = runtime->channels; | 319 | cable->hw.channels_max = runtime->channels; |
332 | params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK], | ||
333 | runtime); | ||
334 | params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE], | ||
335 | runtime); | ||
336 | } | 320 | } |
337 | 321 | ||
338 | static int loopback_prepare(struct snd_pcm_substream *substream) | 322 | static int loopback_prepare(struct snd_pcm_substream *substream) |
@@ -620,26 +604,29 @@ static unsigned int get_cable_index(struct snd_pcm_substream *substream) | |||
620 | static int rule_format(struct snd_pcm_hw_params *params, | 604 | static int rule_format(struct snd_pcm_hw_params *params, |
621 | struct snd_pcm_hw_rule *rule) | 605 | struct snd_pcm_hw_rule *rule) |
622 | { | 606 | { |
607 | struct loopback_pcm *dpcm = rule->private; | ||
608 | struct loopback_cable *cable = dpcm->cable; | ||
609 | struct snd_mask m; | ||
623 | 610 | ||
624 | struct snd_pcm_hardware *hw = rule->private; | 611 | snd_mask_none(&m); |
625 | struct snd_mask *maskp = hw_param_mask(params, rule->var); | 612 | mutex_lock(&dpcm->loopback->cable_lock); |
626 | 613 | m.bits[0] = (u_int32_t)cable->hw.formats; | |
627 | maskp->bits[0] &= (u_int32_t)hw->formats; | 614 | m.bits[1] = (u_int32_t)(cable->hw.formats >> 32); |
628 | maskp->bits[1] &= (u_int32_t)(hw->formats >> 32); | 615 | mutex_unlock(&dpcm->loopback->cable_lock); |
629 | memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ | 616 | return snd_mask_refine(hw_param_mask(params, rule->var), &m); |
630 | if (! maskp->bits[0] && ! maskp->bits[1]) | ||
631 | return -EINVAL; | ||
632 | return 0; | ||
633 | } | 617 | } |
634 | 618 | ||
635 | static int rule_rate(struct snd_pcm_hw_params *params, | 619 | static int rule_rate(struct snd_pcm_hw_params *params, |
636 | struct snd_pcm_hw_rule *rule) | 620 | struct snd_pcm_hw_rule *rule) |
637 | { | 621 | { |
638 | struct snd_pcm_hardware *hw = rule->private; | 622 | struct loopback_pcm *dpcm = rule->private; |
623 | struct loopback_cable *cable = dpcm->cable; | ||
639 | struct snd_interval t; | 624 | struct snd_interval t; |
640 | 625 | ||
641 | t.min = hw->rate_min; | 626 | mutex_lock(&dpcm->loopback->cable_lock); |
642 | t.max = hw->rate_max; | 627 | t.min = cable->hw.rate_min; |
628 | t.max = cable->hw.rate_max; | ||
629 | mutex_unlock(&dpcm->loopback->cable_lock); | ||
643 | t.openmin = t.openmax = 0; | 630 | t.openmin = t.openmax = 0; |
644 | t.integer = 0; | 631 | t.integer = 0; |
645 | return snd_interval_refine(hw_param_interval(params, rule->var), &t); | 632 | return snd_interval_refine(hw_param_interval(params, rule->var), &t); |
@@ -648,22 +635,44 @@ static int rule_rate(struct snd_pcm_hw_params *params, | |||
648 | static int rule_channels(struct snd_pcm_hw_params *params, | 635 | static int rule_channels(struct snd_pcm_hw_params *params, |
649 | struct snd_pcm_hw_rule *rule) | 636 | struct snd_pcm_hw_rule *rule) |
650 | { | 637 | { |
651 | struct snd_pcm_hardware *hw = rule->private; | 638 | struct loopback_pcm *dpcm = rule->private; |
639 | struct loopback_cable *cable = dpcm->cable; | ||
652 | struct snd_interval t; | 640 | struct snd_interval t; |
653 | 641 | ||
654 | t.min = hw->channels_min; | 642 | mutex_lock(&dpcm->loopback->cable_lock); |
655 | t.max = hw->channels_max; | 643 | t.min = cable->hw.channels_min; |
644 | t.max = cable->hw.channels_max; | ||
645 | mutex_unlock(&dpcm->loopback->cable_lock); | ||
656 | t.openmin = t.openmax = 0; | 646 | t.openmin = t.openmax = 0; |
657 | t.integer = 0; | 647 | t.integer = 0; |
658 | return snd_interval_refine(hw_param_interval(params, rule->var), &t); | 648 | return snd_interval_refine(hw_param_interval(params, rule->var), &t); |
659 | } | 649 | } |
660 | 650 | ||
651 | static void free_cable(struct snd_pcm_substream *substream) | ||
652 | { | ||
653 | struct loopback *loopback = substream->private_data; | ||
654 | int dev = get_cable_index(substream); | ||
655 | struct loopback_cable *cable; | ||
656 | |||
657 | cable = loopback->cables[substream->number][dev]; | ||
658 | if (!cable) | ||
659 | return; | ||
660 | if (cable->streams[!substream->stream]) { | ||
661 | /* other stream is still alive */ | ||
662 | cable->streams[substream->stream] = NULL; | ||
663 | } else { | ||
664 | /* free the cable */ | ||
665 | loopback->cables[substream->number][dev] = NULL; | ||
666 | kfree(cable); | ||
667 | } | ||
668 | } | ||
669 | |||
661 | static int loopback_open(struct snd_pcm_substream *substream) | 670 | static int loopback_open(struct snd_pcm_substream *substream) |
662 | { | 671 | { |
663 | struct snd_pcm_runtime *runtime = substream->runtime; | 672 | struct snd_pcm_runtime *runtime = substream->runtime; |
664 | struct loopback *loopback = substream->private_data; | 673 | struct loopback *loopback = substream->private_data; |
665 | struct loopback_pcm *dpcm; | 674 | struct loopback_pcm *dpcm; |
666 | struct loopback_cable *cable; | 675 | struct loopback_cable *cable = NULL; |
667 | int err = 0; | 676 | int err = 0; |
668 | int dev = get_cable_index(substream); | 677 | int dev = get_cable_index(substream); |
669 | 678 | ||
@@ -682,7 +691,6 @@ static int loopback_open(struct snd_pcm_substream *substream) | |||
682 | if (!cable) { | 691 | if (!cable) { |
683 | cable = kzalloc(sizeof(*cable), GFP_KERNEL); | 692 | cable = kzalloc(sizeof(*cable), GFP_KERNEL); |
684 | if (!cable) { | 693 | if (!cable) { |
685 | kfree(dpcm); | ||
686 | err = -ENOMEM; | 694 | err = -ENOMEM; |
687 | goto unlock; | 695 | goto unlock; |
688 | } | 696 | } |
@@ -700,19 +708,19 @@ static int loopback_open(struct snd_pcm_substream *substream) | |||
700 | /* are cached -> they do not reflect the actual state */ | 708 | /* are cached -> they do not reflect the actual state */ |
701 | err = snd_pcm_hw_rule_add(runtime, 0, | 709 | err = snd_pcm_hw_rule_add(runtime, 0, |
702 | SNDRV_PCM_HW_PARAM_FORMAT, | 710 | SNDRV_PCM_HW_PARAM_FORMAT, |
703 | rule_format, &runtime->hw, | 711 | rule_format, dpcm, |
704 | SNDRV_PCM_HW_PARAM_FORMAT, -1); | 712 | SNDRV_PCM_HW_PARAM_FORMAT, -1); |
705 | if (err < 0) | 713 | if (err < 0) |
706 | goto unlock; | 714 | goto unlock; |
707 | err = snd_pcm_hw_rule_add(runtime, 0, | 715 | err = snd_pcm_hw_rule_add(runtime, 0, |
708 | SNDRV_PCM_HW_PARAM_RATE, | 716 | SNDRV_PCM_HW_PARAM_RATE, |
709 | rule_rate, &runtime->hw, | 717 | rule_rate, dpcm, |
710 | SNDRV_PCM_HW_PARAM_RATE, -1); | 718 | SNDRV_PCM_HW_PARAM_RATE, -1); |
711 | if (err < 0) | 719 | if (err < 0) |
712 | goto unlock; | 720 | goto unlock; |
713 | err = snd_pcm_hw_rule_add(runtime, 0, | 721 | err = snd_pcm_hw_rule_add(runtime, 0, |
714 | SNDRV_PCM_HW_PARAM_CHANNELS, | 722 | SNDRV_PCM_HW_PARAM_CHANNELS, |
715 | rule_channels, &runtime->hw, | 723 | rule_channels, dpcm, |
716 | SNDRV_PCM_HW_PARAM_CHANNELS, -1); | 724 | SNDRV_PCM_HW_PARAM_CHANNELS, -1); |
717 | if (err < 0) | 725 | if (err < 0) |
718 | goto unlock; | 726 | goto unlock; |
@@ -724,6 +732,10 @@ static int loopback_open(struct snd_pcm_substream *substream) | |||
724 | else | 732 | else |
725 | runtime->hw = cable->hw; | 733 | runtime->hw = cable->hw; |
726 | unlock: | 734 | unlock: |
735 | if (err < 0) { | ||
736 | free_cable(substream); | ||
737 | kfree(dpcm); | ||
738 | } | ||
727 | mutex_unlock(&loopback->cable_lock); | 739 | mutex_unlock(&loopback->cable_lock); |
728 | return err; | 740 | return err; |
729 | } | 741 | } |
@@ -732,20 +744,10 @@ static int loopback_close(struct snd_pcm_substream *substream) | |||
732 | { | 744 | { |
733 | struct loopback *loopback = substream->private_data; | 745 | struct loopback *loopback = substream->private_data; |
734 | struct loopback_pcm *dpcm = substream->runtime->private_data; | 746 | struct loopback_pcm *dpcm = substream->runtime->private_data; |
735 | struct loopback_cable *cable; | ||
736 | int dev = get_cable_index(substream); | ||
737 | 747 | ||
738 | loopback_timer_stop(dpcm); | 748 | loopback_timer_stop(dpcm); |
739 | mutex_lock(&loopback->cable_lock); | 749 | mutex_lock(&loopback->cable_lock); |
740 | cable = loopback->cables[substream->number][dev]; | 750 | free_cable(substream); |
741 | if (cable->streams[!substream->stream]) { | ||
742 | /* other stream is still alive */ | ||
743 | cable->streams[substream->stream] = NULL; | ||
744 | } else { | ||
745 | /* free the cable */ | ||
746 | loopback->cables[substream->number][dev] = NULL; | ||
747 | kfree(cable); | ||
748 | } | ||
749 | mutex_unlock(&loopback->cable_lock); | 751 | mutex_unlock(&loopback->cable_lock); |
750 | return 0; | 752 | return 0; |
751 | } | 753 | } |
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 8fef1b8d1fd8..bd7bcf428bcf 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c | |||
@@ -183,7 +183,7 @@ static int hdac_component_master_match(struct device *dev, void *data) | |||
183 | */ | 183 | */ |
184 | int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops) | 184 | int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops) |
185 | { | 185 | { |
186 | if (WARN_ON(!hdac_acomp)) | 186 | if (!hdac_acomp) |
187 | return -ENODEV; | 187 | return -ENODEV; |
188 | 188 | ||
189 | hdac_acomp->audio_ops = aops; | 189 | hdac_acomp->audio_ops = aops; |
@@ -240,7 +240,8 @@ out_master_del: | |||
240 | out_err: | 240 | out_err: |
241 | kfree(acomp); | 241 | kfree(acomp); |
242 | bus->audio_component = NULL; | 242 | bus->audio_component = NULL; |
243 | dev_err(dev, "failed to add i915 component master (%d)\n", ret); | 243 | hdac_acomp = NULL; |
244 | dev_info(dev, "failed to add i915 component master (%d)\n", ret); | ||
244 | 245 | ||
245 | return ret; | 246 | return ret; |
246 | } | 247 | } |
@@ -273,6 +274,7 @@ int snd_hdac_i915_exit(struct hdac_bus *bus) | |||
273 | 274 | ||
274 | kfree(acomp); | 275 | kfree(acomp); |
275 | bus->audio_component = NULL; | 276 | bus->audio_component = NULL; |
277 | hdac_acomp = NULL; | ||
276 | 278 | ||
277 | return 0; | 279 | return 0; |
278 | } | 280 | } |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index e2e08fc73b50..20512fe32a97 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2088,9 +2088,11 @@ static int azx_probe_continue(struct azx *chip) | |||
2088 | * for other chips, still continue probing as other | 2088 | * for other chips, still continue probing as other |
2089 | * codecs can be on the same link. | 2089 | * codecs can be on the same link. |
2090 | */ | 2090 | */ |
2091 | if (CONTROLLER_IN_GPU(pci)) | 2091 | if (CONTROLLER_IN_GPU(pci)) { |
2092 | dev_err(chip->card->dev, | ||
2093 | "HSW/BDW HD-audio HDMI/DP requires binding with gfx driver\n"); | ||
2092 | goto out_free; | 2094 | goto out_free; |
2093 | else | 2095 | } else |
2094 | goto skip_i915; | 2096 | goto skip_i915; |
2095 | } | 2097 | } |
2096 | 2098 | ||
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 80bbadc83721..d6e079f4ec09 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { | |||
408 | /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/ | 408 | /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/ |
409 | 409 | ||
410 | /* codec SSID */ | 410 | /* codec SSID */ |
411 | SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122), | ||
411 | SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), | 412 | SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), |
412 | SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), | 413 | SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), |
413 | SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), | 414 | SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index ac5de4365e15..c92b7ba344ef 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -261,6 +261,7 @@ enum { | |||
261 | CXT_FIXUP_HP_530, | 261 | CXT_FIXUP_HP_530, |
262 | CXT_FIXUP_CAP_MIX_AMP_5047, | 262 | CXT_FIXUP_CAP_MIX_AMP_5047, |
263 | CXT_FIXUP_MUTE_LED_EAPD, | 263 | CXT_FIXUP_MUTE_LED_EAPD, |
264 | CXT_FIXUP_HP_DOCK, | ||
264 | CXT_FIXUP_HP_SPECTRE, | 265 | CXT_FIXUP_HP_SPECTRE, |
265 | CXT_FIXUP_HP_GATE_MIC, | 266 | CXT_FIXUP_HP_GATE_MIC, |
266 | }; | 267 | }; |
@@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = { | |||
778 | .type = HDA_FIXUP_FUNC, | 779 | .type = HDA_FIXUP_FUNC, |
779 | .v.func = cxt_fixup_mute_led_eapd, | 780 | .v.func = cxt_fixup_mute_led_eapd, |
780 | }, | 781 | }, |
782 | [CXT_FIXUP_HP_DOCK] = { | ||
783 | .type = HDA_FIXUP_PINS, | ||
784 | .v.pins = (const struct hda_pintbl[]) { | ||
785 | { 0x16, 0x21011020 }, /* line-out */ | ||
786 | { 0x18, 0x2181103f }, /* line-in */ | ||
787 | { } | ||
788 | } | ||
789 | }, | ||
781 | [CXT_FIXUP_HP_SPECTRE] = { | 790 | [CXT_FIXUP_HP_SPECTRE] = { |
782 | .type = HDA_FIXUP_PINS, | 791 | .type = HDA_FIXUP_PINS, |
783 | .v.pins = (const struct hda_pintbl[]) { | 792 | .v.pins = (const struct hda_pintbl[]) { |
@@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { | |||
839 | SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), | 848 | SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), |
840 | SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), | 849 | SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), |
841 | SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), | 850 | SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), |
851 | SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), | ||
842 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), | 852 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), |
843 | SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), | 853 | SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), |
844 | SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), | 854 | SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), |
@@ -872,6 +882,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { | |||
872 | { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, | 882 | { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, |
873 | { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, | 883 | { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, |
874 | { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" }, | 884 | { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" }, |
885 | { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" }, | ||
875 | {} | 886 | {} |
876 | }; | 887 | }; |
877 | 888 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index e5730a7d0480..b302d056e5d3 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -3130,6 +3130,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec, | |||
3130 | spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; | 3130 | spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; |
3131 | } | 3131 | } |
3132 | 3132 | ||
3133 | static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec, | ||
3134 | const struct hda_fixup *fix, | ||
3135 | int action) | ||
3136 | { | ||
3137 | unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21); | ||
3138 | unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19); | ||
3139 | |||
3140 | if (cfg_headphone && cfg_headset_mic == 0x411111f0) | ||
3141 | snd_hda_codec_set_pincfg(codec, 0x19, | ||
3142 | (cfg_headphone & ~AC_DEFCFG_DEVICE) | | ||
3143 | (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT)); | ||
3144 | } | ||
3145 | |||
3133 | static void alc269_fixup_hweq(struct hda_codec *codec, | 3146 | static void alc269_fixup_hweq(struct hda_codec *codec, |
3134 | const struct hda_fixup *fix, int action) | 3147 | const struct hda_fixup *fix, int action) |
3135 | { | 3148 | { |
@@ -4782,6 +4795,7 @@ enum { | |||
4782 | ALC269_FIXUP_LIFEBOOK_EXTMIC, | 4795 | ALC269_FIXUP_LIFEBOOK_EXTMIC, |
4783 | ALC269_FIXUP_LIFEBOOK_HP_PIN, | 4796 | ALC269_FIXUP_LIFEBOOK_HP_PIN, |
4784 | ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT, | 4797 | ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT, |
4798 | ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC, | ||
4785 | ALC269_FIXUP_AMIC, | 4799 | ALC269_FIXUP_AMIC, |
4786 | ALC269_FIXUP_DMIC, | 4800 | ALC269_FIXUP_DMIC, |
4787 | ALC269VB_FIXUP_AMIC, | 4801 | ALC269VB_FIXUP_AMIC, |
@@ -4839,6 +4853,7 @@ enum { | |||
4839 | ALC286_FIXUP_HP_GPIO_LED, | 4853 | ALC286_FIXUP_HP_GPIO_LED, |
4840 | ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, | 4854 | ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, |
4841 | ALC280_FIXUP_HP_DOCK_PINS, | 4855 | ALC280_FIXUP_HP_DOCK_PINS, |
4856 | ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, | ||
4842 | ALC280_FIXUP_HP_9480M, | 4857 | ALC280_FIXUP_HP_9480M, |
4843 | ALC288_FIXUP_DELL_HEADSET_MODE, | 4858 | ALC288_FIXUP_DELL_HEADSET_MODE, |
4844 | ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, | 4859 | ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, |
@@ -4971,6 +4986,10 @@ static const struct hda_fixup alc269_fixups[] = { | |||
4971 | .type = HDA_FIXUP_FUNC, | 4986 | .type = HDA_FIXUP_FUNC, |
4972 | .v.func = alc269_fixup_pincfg_no_hp_to_lineout, | 4987 | .v.func = alc269_fixup_pincfg_no_hp_to_lineout, |
4973 | }, | 4988 | }, |
4989 | [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = { | ||
4990 | .type = HDA_FIXUP_FUNC, | ||
4991 | .v.func = alc269_fixup_pincfg_U7x7_headset_mic, | ||
4992 | }, | ||
4974 | [ALC269_FIXUP_AMIC] = { | 4993 | [ALC269_FIXUP_AMIC] = { |
4975 | .type = HDA_FIXUP_PINS, | 4994 | .type = HDA_FIXUP_PINS, |
4976 | .v.pins = (const struct hda_pintbl[]) { | 4995 | .v.pins = (const struct hda_pintbl[]) { |
@@ -5377,6 +5396,16 @@ static const struct hda_fixup alc269_fixups[] = { | |||
5377 | .chained = true, | 5396 | .chained = true, |
5378 | .chain_id = ALC280_FIXUP_HP_GPIO4 | 5397 | .chain_id = ALC280_FIXUP_HP_GPIO4 |
5379 | }, | 5398 | }, |
5399 | [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = { | ||
5400 | .type = HDA_FIXUP_PINS, | ||
5401 | .v.pins = (const struct hda_pintbl[]) { | ||
5402 | { 0x1b, 0x21011020 }, /* line-out */ | ||
5403 | { 0x18, 0x2181103f }, /* line-in */ | ||
5404 | { }, | ||
5405 | }, | ||
5406 | .chained = true, | ||
5407 | .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED | ||
5408 | }, | ||
5380 | [ALC280_FIXUP_HP_9480M] = { | 5409 | [ALC280_FIXUP_HP_9480M] = { |
5381 | .type = HDA_FIXUP_FUNC, | 5410 | .type = HDA_FIXUP_FUNC, |
5382 | .v.func = alc280_fixup_hp_9480m, | 5411 | .v.func = alc280_fixup_hp_9480m, |
@@ -5589,6 +5618,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5589 | SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), | 5618 | SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), |
5590 | SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), | 5619 | SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), |
5591 | SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), | 5620 | SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), |
5621 | SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), | ||
5592 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 5622 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
5593 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 5623 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
5594 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), | 5624 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
@@ -5629,7 +5659,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5629 | SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), | 5659 | SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
5630 | SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), | 5660 | SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
5631 | SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), | 5661 | SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
5632 | SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), | 5662 | SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED), |
5633 | SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), | 5663 | SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
5634 | SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), | 5664 | SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
5635 | SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), | 5665 | SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
@@ -5675,6 +5705,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5675 | SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), | 5705 | SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), |
5676 | SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), | 5706 | SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), |
5677 | SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), | 5707 | SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), |
5708 | SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), | ||
5678 | SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), | 5709 | SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), |
5679 | SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), | 5710 | SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), |
5680 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), | 5711 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), |
@@ -5794,6 +5825,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { | |||
5794 | {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, | 5825 | {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, |
5795 | {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, | 5826 | {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, |
5796 | {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, | 5827 | {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, |
5828 | {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"}, | ||
5797 | {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, | 5829 | {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, |
5798 | {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, | 5830 | {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, |
5799 | {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"}, | 5831 | {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"}, |
@@ -5942,6 +5974,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
5942 | {0x1b, 0x01011020}, | 5974 | {0x1b, 0x01011020}, |
5943 | {0x21, 0x02211010}), | 5975 | {0x21, 0x02211010}), |
5944 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | 5976 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
5977 | {0x12, 0x90a60130}, | ||
5978 | {0x14, 0x90170110}, | ||
5979 | {0x1b, 0x01011020}, | ||
5980 | {0x21, 0x0221101f}), | ||
5981 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
5945 | {0x12, 0x90a60160}, | 5982 | {0x12, 0x90a60160}, |
5946 | {0x14, 0x90170120}, | 5983 | {0x14, 0x90170120}, |
5947 | {0x21, 0x02211030}), | 5984 | {0x21, 0x02211030}), |
@@ -5958,6 +5995,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
5958 | {0x14, 0x90170110}, | 5995 | {0x14, 0x90170110}, |
5959 | {0x21, 0x02211020}), | 5996 | {0x21, 0x02211020}), |
5960 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | 5997 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
5998 | {0x12, 0x90a60130}, | ||
5999 | {0x14, 0x90170110}, | ||
6000 | {0x14, 0x01011020}, | ||
6001 | {0x21, 0x0221101f}), | ||
6002 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
5961 | ALC256_STANDARD_PINS), | 6003 | ALC256_STANDARD_PINS), |
5962 | SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, | 6004 | SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, |
5963 | {0x12, 0x90a60130}, | 6005 | {0x12, 0x90a60130}, |
@@ -6013,6 +6055,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
6013 | {0x12, 0x90a60120}, | 6055 | {0x12, 0x90a60120}, |
6014 | {0x14, 0x90170110}, | 6056 | {0x14, 0x90170110}, |
6015 | {0x21, 0x0321101f}), | 6057 | {0x21, 0x0321101f}), |
6058 | SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
6059 | {0x12, 0xb7a60130}, | ||
6060 | {0x14, 0x90170110}, | ||
6061 | {0x21, 0x04211020}), | ||
6016 | SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, | 6062 | SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, |
6017 | ALC290_STANDARD_PINS, | 6063 | ALC290_STANDARD_PINS, |
6018 | {0x15, 0x04211040}, | 6064 | {0x15, 0x04211040}, |
diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c index 712ed6598c48..ebdf9bd5a64c 100644 --- a/sound/soc/codecs/pcm512x-spi.c +++ b/sound/soc/codecs/pcm512x-spi.c | |||
@@ -70,3 +70,7 @@ static struct spi_driver pcm512x_spi_driver = { | |||
70 | }; | 70 | }; |
71 | 71 | ||
72 | module_spi_driver(pcm512x_spi_driver); | 72 | module_spi_driver(pcm512x_spi_driver); |
73 | |||
74 | MODULE_DESCRIPTION("ASoC PCM512x codec driver - SPI"); | ||
75 | MODULE_AUTHOR("Mark Brown <broonie@kernel.org>"); | ||
76 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c index a5a4e9f75c57..a06395507225 100644 --- a/sound/soc/codecs/twl4030.c +++ b/sound/soc/codecs/twl4030.c | |||
@@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec) | |||
232 | struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev); | 232 | struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev); |
233 | struct device_node *twl4030_codec_node = NULL; | 233 | struct device_node *twl4030_codec_node = NULL; |
234 | 234 | ||
235 | twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node, | 235 | twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node, |
236 | "codec"); | 236 | "codec"); |
237 | 237 | ||
238 | if (!pdata && twl4030_codec_node) { | 238 | if (!pdata && twl4030_codec_node) { |
@@ -241,9 +241,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec) | |||
241 | GFP_KERNEL); | 241 | GFP_KERNEL); |
242 | if (!pdata) { | 242 | if (!pdata) { |
243 | dev_err(codec->dev, "Can not allocate memory\n"); | 243 | dev_err(codec->dev, "Can not allocate memory\n"); |
244 | of_node_put(twl4030_codec_node); | ||
244 | return NULL; | 245 | return NULL; |
245 | } | 246 | } |
246 | twl4030_setup_pdata_of(pdata, twl4030_codec_node); | 247 | twl4030_setup_pdata_of(pdata, twl4030_codec_node); |
248 | of_node_put(twl4030_codec_node); | ||
247 | } | 249 | } |
248 | 250 | ||
249 | return pdata; | 251 | return pdata; |
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 95d2392303eb..7ca67613e0d4 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c | |||
@@ -1408,12 +1408,6 @@ static int fsl_ssi_probe(struct platform_device *pdev) | |||
1408 | sizeof(fsl_ssi_ac97_dai)); | 1408 | sizeof(fsl_ssi_ac97_dai)); |
1409 | 1409 | ||
1410 | fsl_ac97_data = ssi_private; | 1410 | fsl_ac97_data = ssi_private; |
1411 | |||
1412 | ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev); | ||
1413 | if (ret) { | ||
1414 | dev_err(&pdev->dev, "could not set AC'97 ops\n"); | ||
1415 | return ret; | ||
1416 | } | ||
1417 | } else { | 1411 | } else { |
1418 | /* Initialize this copy of the CPU DAI driver structure */ | 1412 | /* Initialize this copy of the CPU DAI driver structure */ |
1419 | memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template, | 1413 | memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template, |
@@ -1473,6 +1467,14 @@ static int fsl_ssi_probe(struct platform_device *pdev) | |||
1473 | return ret; | 1467 | return ret; |
1474 | } | 1468 | } |
1475 | 1469 | ||
1470 | if (fsl_ssi_is_ac97(ssi_private)) { | ||
1471 | ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev); | ||
1472 | if (ret) { | ||
1473 | dev_err(&pdev->dev, "could not set AC'97 ops\n"); | ||
1474 | goto error_ac97_ops; | ||
1475 | } | ||
1476 | } | ||
1477 | |||
1476 | ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component, | 1478 | ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component, |
1477 | &ssi_private->cpu_dai_drv, 1); | 1479 | &ssi_private->cpu_dai_drv, 1); |
1478 | if (ret) { | 1480 | if (ret) { |
@@ -1556,6 +1558,10 @@ error_sound_card: | |||
1556 | fsl_ssi_debugfs_remove(&ssi_private->dbg_stats); | 1558 | fsl_ssi_debugfs_remove(&ssi_private->dbg_stats); |
1557 | 1559 | ||
1558 | error_asoc_register: | 1560 | error_asoc_register: |
1561 | if (fsl_ssi_is_ac97(ssi_private)) | ||
1562 | snd_soc_set_ac97_ops(NULL); | ||
1563 | |||
1564 | error_ac97_ops: | ||
1559 | if (ssi_private->soc->imx) | 1565 | if (ssi_private->soc->imx) |
1560 | fsl_ssi_imx_clean(pdev, ssi_private); | 1566 | fsl_ssi_imx_clean(pdev, ssi_private); |
1561 | 1567 | ||
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index ba384dee277b..d62695d696c4 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c | |||
@@ -358,13 +358,19 @@ static int asoc_simple_card_dai_link_of(struct device_node *node, | |||
358 | snprintf(prop, sizeof(prop), "%scpu", prefix); | 358 | snprintf(prop, sizeof(prop), "%scpu", prefix); |
359 | cpu = of_get_child_by_name(node, prop); | 359 | cpu = of_get_child_by_name(node, prop); |
360 | 360 | ||
361 | if (!cpu) { | ||
362 | ret = -EINVAL; | ||
363 | dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop); | ||
364 | goto dai_link_of_err; | ||
365 | } | ||
366 | |||
361 | snprintf(prop, sizeof(prop), "%splat", prefix); | 367 | snprintf(prop, sizeof(prop), "%splat", prefix); |
362 | plat = of_get_child_by_name(node, prop); | 368 | plat = of_get_child_by_name(node, prop); |
363 | 369 | ||
364 | snprintf(prop, sizeof(prop), "%scodec", prefix); | 370 | snprintf(prop, sizeof(prop), "%scodec", prefix); |
365 | codec = of_get_child_by_name(node, prop); | 371 | codec = of_get_child_by_name(node, prop); |
366 | 372 | ||
367 | if (!cpu || !codec) { | 373 | if (!codec) { |
368 | ret = -EINVAL; | 374 | ret = -EINVAL; |
369 | dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop); | 375 | dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop); |
370 | goto dai_link_of_err; | 376 | goto dai_link_of_err; |
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index 085329878525..5976e3992dd1 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h | |||
@@ -235,6 +235,7 @@ enum rsnd_mod_type { | |||
235 | RSND_MOD_MIX, | 235 | RSND_MOD_MIX, |
236 | RSND_MOD_CTU, | 236 | RSND_MOD_CTU, |
237 | RSND_MOD_SRC, | 237 | RSND_MOD_SRC, |
238 | RSND_MOD_SSIP, /* SSI parent */ | ||
238 | RSND_MOD_SSI, | 239 | RSND_MOD_SSI, |
239 | RSND_MOD_MAX, | 240 | RSND_MOD_MAX, |
240 | }; | 241 | }; |
@@ -365,6 +366,7 @@ struct rsnd_dai_stream { | |||
365 | }; | 366 | }; |
366 | #define rsnd_io_to_mod(io, i) ((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL) | 367 | #define rsnd_io_to_mod(io, i) ((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL) |
367 | #define rsnd_io_to_mod_ssi(io) rsnd_io_to_mod((io), RSND_MOD_SSI) | 368 | #define rsnd_io_to_mod_ssi(io) rsnd_io_to_mod((io), RSND_MOD_SSI) |
369 | #define rsnd_io_to_mod_ssip(io) rsnd_io_to_mod((io), RSND_MOD_SSIP) | ||
368 | #define rsnd_io_to_mod_src(io) rsnd_io_to_mod((io), RSND_MOD_SRC) | 370 | #define rsnd_io_to_mod_src(io) rsnd_io_to_mod((io), RSND_MOD_SRC) |
369 | #define rsnd_io_to_mod_ctu(io) rsnd_io_to_mod((io), RSND_MOD_CTU) | 371 | #define rsnd_io_to_mod_ctu(io) rsnd_io_to_mod((io), RSND_MOD_CTU) |
370 | #define rsnd_io_to_mod_mix(io) rsnd_io_to_mod((io), RSND_MOD_MIX) | 372 | #define rsnd_io_to_mod_mix(io) rsnd_io_to_mod((io), RSND_MOD_MIX) |
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index c62a2947ac14..38aae96267c9 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c | |||
@@ -550,11 +550,16 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod, | |||
550 | struct rsnd_priv *priv) | 550 | struct rsnd_priv *priv) |
551 | { | 551 | { |
552 | struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); | 552 | struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); |
553 | struct rsnd_mod *pure_ssi_mod = rsnd_io_to_mod_ssi(io); | ||
553 | struct device *dev = rsnd_priv_to_dev(priv); | 554 | struct device *dev = rsnd_priv_to_dev(priv); |
554 | int irq = ssi->info->irq; | 555 | int irq = ssi->info->irq; |
555 | 556 | ||
556 | rsnd_dma_quit(io, rsnd_mod_to_dma(mod)); | 557 | rsnd_dma_quit(io, rsnd_mod_to_dma(mod)); |
557 | 558 | ||
559 | /* Do nothing if non SSI (= SSI parent, multi SSI) mod */ | ||
560 | if (pure_ssi_mod != mod) | ||
561 | return 0; | ||
562 | |||
558 | /* PIO will request IRQ again */ | 563 | /* PIO will request IRQ again */ |
559 | devm_free_irq(dev, irq, mod); | 564 | devm_free_irq(dev, irq, mod); |
560 | 565 | ||
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 0ed9ae030ce1..c5447ff078b3 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
@@ -343,17 +343,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, | |||
343 | int validx, int *value_ret) | 343 | int validx, int *value_ret) |
344 | { | 344 | { |
345 | struct snd_usb_audio *chip = cval->head.mixer->chip; | 345 | struct snd_usb_audio *chip = cval->head.mixer->chip; |
346 | unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */ | 346 | /* enough space for one range */ |
347 | unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)]; | ||
347 | unsigned char *val; | 348 | unsigned char *val; |
348 | int idx = 0, ret, size; | 349 | int idx = 0, ret, val_size, size; |
349 | __u8 bRequest; | 350 | __u8 bRequest; |
350 | 351 | ||
352 | val_size = uac2_ctl_value_size(cval->val_type); | ||
353 | |||
351 | if (request == UAC_GET_CUR) { | 354 | if (request == UAC_GET_CUR) { |
352 | bRequest = UAC2_CS_CUR; | 355 | bRequest = UAC2_CS_CUR; |
353 | size = uac2_ctl_value_size(cval->val_type); | 356 | size = val_size; |
354 | } else { | 357 | } else { |
355 | bRequest = UAC2_CS_RANGE; | 358 | bRequest = UAC2_CS_RANGE; |
356 | size = sizeof(buf); | 359 | size = sizeof(__u16) + 3 * val_size; |
357 | } | 360 | } |
358 | 361 | ||
359 | memset(buf, 0, sizeof(buf)); | 362 | memset(buf, 0, sizeof(buf)); |
@@ -386,16 +389,17 @@ error: | |||
386 | val = buf + sizeof(__u16); | 389 | val = buf + sizeof(__u16); |
387 | break; | 390 | break; |
388 | case UAC_GET_MAX: | 391 | case UAC_GET_MAX: |
389 | val = buf + sizeof(__u16) * 2; | 392 | val = buf + sizeof(__u16) + val_size; |
390 | break; | 393 | break; |
391 | case UAC_GET_RES: | 394 | case UAC_GET_RES: |
392 | val = buf + sizeof(__u16) * 3; | 395 | val = buf + sizeof(__u16) + val_size * 2; |
393 | break; | 396 | break; |
394 | default: | 397 | default: |
395 | return -EINVAL; | 398 | return -EINVAL; |
396 | } | 399 | } |
397 | 400 | ||
398 | *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16))); | 401 | *value_ret = convert_signed_value(cval, |
402 | snd_usb_combine_bytes(val, val_size)); | ||
399 | 403 | ||
400 | return 0; | 404 | return 0; |
401 | } | 405 | } |
@@ -2101,20 +2105,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, | |||
2101 | kctl->private_value = (unsigned long)namelist; | 2105 | kctl->private_value = (unsigned long)namelist; |
2102 | kctl->private_free = usb_mixer_selector_elem_free; | 2106 | kctl->private_free = usb_mixer_selector_elem_free; |
2103 | 2107 | ||
2104 | nameid = uac_selector_unit_iSelector(desc); | 2108 | /* check the static mapping table at first */ |
2105 | len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); | 2109 | len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); |
2106 | if (len) | ||
2107 | ; | ||
2108 | else if (nameid) | ||
2109 | len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, | ||
2110 | sizeof(kctl->id.name)); | ||
2111 | else | ||
2112 | len = get_term_name(state, &state->oterm, | ||
2113 | kctl->id.name, sizeof(kctl->id.name), 0); | ||
2114 | |||
2115 | if (!len) { | 2110 | if (!len) { |
2116 | strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); | 2111 | /* no mapping ? */ |
2112 | /* if iSelector is given, use it */ | ||
2113 | nameid = uac_selector_unit_iSelector(desc); | ||
2114 | if (nameid) | ||
2115 | len = snd_usb_copy_string_desc(state, nameid, | ||
2116 | kctl->id.name, | ||
2117 | sizeof(kctl->id.name)); | ||
2118 | /* ... or pick up the terminal name at next */ | ||
2119 | if (!len) | ||
2120 | len = get_term_name(state, &state->oterm, | ||
2121 | kctl->id.name, sizeof(kctl->id.name), 0); | ||
2122 | /* ... or use the fixed string "USB" as the last resort */ | ||
2123 | if (!len) | ||
2124 | strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); | ||
2117 | 2125 | ||
2126 | /* and add the proper suffix */ | ||
2118 | if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) | 2127 | if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) |
2119 | append_ctl_name(kctl, " Clock Source"); | 2128 | append_ctl_name(kctl, " Clock Source"); |
2120 | else if ((state->oterm.type & 0xff00) == 0x0100) | 2129 | else if ((state->oterm.type & 0xff00) == 0x0100) |
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 48afae053c56..8e8db4ddf365 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
@@ -348,6 +348,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, | |||
348 | 348 | ||
349 | alts = &iface->altsetting[1]; | 349 | alts = &iface->altsetting[1]; |
350 | goto add_sync_ep; | 350 | goto add_sync_ep; |
351 | case USB_ID(0x1397, 0x0002): | ||
352 | ep = 0x81; | ||
353 | iface = usb_ifnum_to_if(dev, 1); | ||
354 | |||
355 | if (!iface || iface->num_altsetting == 0) | ||
356 | return -EINVAL; | ||
357 | |||
358 | alts = &iface->altsetting[1]; | ||
359 | goto add_sync_ep; | ||
351 | } | 360 | } |
352 | if (attr == USB_ENDPOINT_SYNC_ASYNC && | 361 | if (attr == USB_ENDPOINT_SYNC_ASYNC && |
353 | altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC && | 362 | altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC && |
diff --git a/ti_config_fragments/android_omap.cfg b/ti_config_fragments/android_omap.cfg index 9f2bcdf6391b..c5ff532fed1f 100644 --- a/ti_config_fragments/android_omap.cfg +++ b/ti_config_fragments/android_omap.cfg | |||
@@ -56,11 +56,13 @@ CONFIG_BACKLIGHT_GPIO=m | |||
56 | CONFIG_BACKLIGHT_LED=m | 56 | CONFIG_BACKLIGHT_LED=m |
57 | CONFIG_DISPLAY_PANEL_DPI=m | 57 | CONFIG_DISPLAY_PANEL_DPI=m |
58 | CONFIG_DISPLAY_CONNECTOR_HDMI=m | 58 | CONFIG_DISPLAY_CONNECTOR_HDMI=m |
59 | CONFIG_DISPLAY_CONNECTOR_DVI=m | ||
59 | CONFIG_DISPLAY_DRA7EVM_ENCODER_TPD12S015=m | 60 | CONFIG_DISPLAY_DRA7EVM_ENCODER_TPD12S015=m |
60 | CONFIG_DISPLAY_ENCODER_TPD12S015=m | 61 | CONFIG_DISPLAY_ENCODER_TPD12S015=m |
61 | CONFIG_DISPLAY_ENCODER_SII9022=m | 62 | CONFIG_DISPLAY_ENCODER_SII9022=m |
62 | CONFIG_DISPLAY_ENCODER_TC358768=m | 63 | CONFIG_DISPLAY_ENCODER_TC358768=m |
63 | CONFIG_VIDEO_TI_FPD3_SERDES=m | 64 | CONFIG_VIDEO_TI_FPD3_SERDES=m |
65 | CONFIG_DISPLAY_ENCODER_TFP410=m | ||
64 | 66 | ||
65 | # Touch screen | 67 | # Touch screen |
66 | CONFIG_TOUCHSCREEN_EDT_FT5X06=m | 68 | CONFIG_TOUCHSCREEN_EDT_FT5X06=m |
@@ -73,3 +75,5 @@ CONFIG_TOUCHSCREEN_LDC3001=m | |||
73 | CONFIG_CAN=y | 75 | CONFIG_CAN=y |
74 | CONFIG_CAN_C_CAN=y | 76 | CONFIG_CAN_C_CAN=y |
75 | CONFIG_CAN_C_CAN_PLATFORM=y | 77 | CONFIG_CAN_C_CAN_PLATFORM=y |
78 | # MCAN | ||
79 | CONFIG_CAN_M_CAN=y | ||
diff --git a/ti_config_fragments/audio_display.cfg b/ti_config_fragments/audio_display.cfg index 8925c31049fe..5174ca787eb5 100644 --- a/ti_config_fragments/audio_display.cfg +++ b/ti_config_fragments/audio_display.cfg | |||
@@ -50,11 +50,13 @@ CONFIG_TI_DSS6=y | |||
50 | 50 | ||
51 | CONFIG_DISPLAY_PANEL_DPI=y | 51 | CONFIG_DISPLAY_PANEL_DPI=y |
52 | CONFIG_DISPLAY_CONNECTOR_HDMI=y | 52 | CONFIG_DISPLAY_CONNECTOR_HDMI=y |
53 | CONFIG_DISPLAY_CONNECTOR_DVI=y | ||
53 | CONFIG_DISPLAY_DRA7EVM_ENCODER_TPD12S015=y | 54 | CONFIG_DISPLAY_DRA7EVM_ENCODER_TPD12S015=y |
54 | CONFIG_DISPLAY_ENCODER_TPD12S015=y | 55 | CONFIG_DISPLAY_ENCODER_TPD12S015=y |
55 | CONFIG_DISPLAY_ENCODER_SII9022=y | 56 | CONFIG_DISPLAY_ENCODER_SII9022=y |
56 | CONFIG_DISPLAY_ENCODER_TC358768=y | 57 | CONFIG_DISPLAY_ENCODER_TC358768=y |
57 | CONFIG_VIDEO_TI_FPD3_SERDES=y | 58 | CONFIG_VIDEO_TI_FPD3_SERDES=y |
59 | CONFIG_DISPLAY_ENCODER_TFP410=y | ||
58 | 60 | ||
59 | # sound | 61 | # sound |
60 | 62 | ||
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index b5f08e8cab33..e4bb1de1d526 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile | |||
@@ -1,9 +1,5 @@ | |||
1 | # Makefile for vm selftests | 1 | # Makefile for vm selftests |
2 | 2 | ||
3 | ifndef OUTPUT | ||
4 | OUTPUT := $(shell pwd) | ||
5 | endif | ||
6 | |||
7 | CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS) | 3 | CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS) |
8 | BINARIES = compaction_test | 4 | BINARIES = compaction_test |
9 | BINARIES += hugepage-mmap | 5 | BINARIES += hugepage-mmap |
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index eabcff411984..92d7eff2827a 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile | |||
@@ -4,7 +4,8 @@ include ../lib.mk | |||
4 | 4 | ||
5 | .PHONY: all all_32 all_64 warn_32bit_failure clean | 5 | .PHONY: all all_32 all_64 warn_32bit_failure clean |
6 | 6 | ||
7 | TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs ldt_gdt syscall_nt ptrace_syscall | 7 | TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs ldt_gdt syscall_nt ptrace_syscall \ |
8 | test_vsyscall | ||
8 | TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn test_syscall_vdso unwind_vdso \ | 9 | TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn test_syscall_vdso unwind_vdso \ |
9 | test_FCMOV test_FCOMI test_FISTTP | 10 | test_FCMOV test_FCOMI test_FISTTP |
10 | 11 | ||
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c new file mode 100644 index 000000000000..6e0bd52ad53d --- /dev/null +++ b/tools/testing/selftests/x86/test_vsyscall.c | |||
@@ -0,0 +1,500 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #define _GNU_SOURCE | ||
4 | |||
5 | #include <stdio.h> | ||
6 | #include <sys/time.h> | ||
7 | #include <time.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <sys/syscall.h> | ||
10 | #include <unistd.h> | ||
11 | #include <dlfcn.h> | ||
12 | #include <string.h> | ||
13 | #include <inttypes.h> | ||
14 | #include <signal.h> | ||
15 | #include <sys/ucontext.h> | ||
16 | #include <errno.h> | ||
17 | #include <err.h> | ||
18 | #include <sched.h> | ||
19 | #include <stdbool.h> | ||
20 | #include <setjmp.h> | ||
21 | |||
22 | #ifdef __x86_64__ | ||
23 | # define VSYS(x) (x) | ||
24 | #else | ||
25 | # define VSYS(x) 0 | ||
26 | #endif | ||
27 | |||
28 | #ifndef SYS_getcpu | ||
29 | # ifdef __x86_64__ | ||
30 | # define SYS_getcpu 309 | ||
31 | # else | ||
32 | # define SYS_getcpu 318 | ||
33 | # endif | ||
34 | #endif | ||
35 | |||
36 | static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), | ||
37 | int flags) | ||
38 | { | ||
39 | struct sigaction sa; | ||
40 | memset(&sa, 0, sizeof(sa)); | ||
41 | sa.sa_sigaction = handler; | ||
42 | sa.sa_flags = SA_SIGINFO | flags; | ||
43 | sigemptyset(&sa.sa_mask); | ||
44 | if (sigaction(sig, &sa, 0)) | ||
45 | err(1, "sigaction"); | ||
46 | } | ||
47 | |||
48 | /* vsyscalls and vDSO */ | ||
49 | bool should_read_vsyscall = false; | ||
50 | |||
51 | typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz); | ||
52 | gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000); | ||
53 | gtod_t vdso_gtod; | ||
54 | |||
55 | typedef int (*vgettime_t)(clockid_t, struct timespec *); | ||
56 | vgettime_t vdso_gettime; | ||
57 | |||
58 | typedef long (*time_func_t)(time_t *t); | ||
59 | time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400); | ||
60 | time_func_t vdso_time; | ||
61 | |||
62 | typedef long (*getcpu_t)(unsigned *, unsigned *, void *); | ||
63 | getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800); | ||
64 | getcpu_t vdso_getcpu; | ||
65 | |||
66 | static void init_vdso(void) | ||
67 | { | ||
68 | void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); | ||
69 | if (!vdso) | ||
70 | vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); | ||
71 | if (!vdso) { | ||
72 | printf("[WARN]\tfailed to find vDSO\n"); | ||
73 | return; | ||
74 | } | ||
75 | |||
76 | vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday"); | ||
77 | if (!vdso_gtod) | ||
78 | printf("[WARN]\tfailed to find gettimeofday in vDSO\n"); | ||
79 | |||
80 | vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime"); | ||
81 | if (!vdso_gettime) | ||
82 | printf("[WARN]\tfailed to find clock_gettime in vDSO\n"); | ||
83 | |||
84 | vdso_time = (time_func_t)dlsym(vdso, "__vdso_time"); | ||
85 | if (!vdso_time) | ||
86 | printf("[WARN]\tfailed to find time in vDSO\n"); | ||
87 | |||
88 | vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu"); | ||
89 | if (!vdso_getcpu) { | ||
90 | /* getcpu() was never wired up in the 32-bit vDSO. */ | ||
91 | printf("[%s]\tfailed to find getcpu in vDSO\n", | ||
92 | sizeof(long) == 8 ? "WARN" : "NOTE"); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | static int init_vsys(void) | ||
97 | { | ||
98 | #ifdef __x86_64__ | ||
99 | int nerrs = 0; | ||
100 | FILE *maps; | ||
101 | char line[128]; | ||
102 | bool found = false; | ||
103 | |||
104 | maps = fopen("/proc/self/maps", "r"); | ||
105 | if (!maps) { | ||
106 | printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n"); | ||
107 | should_read_vsyscall = true; | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | while (fgets(line, sizeof(line), maps)) { | ||
112 | char r, x; | ||
113 | void *start, *end; | ||
114 | char name[128]; | ||
115 | if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s", | ||
116 | &start, &end, &r, &x, name) != 5) | ||
117 | continue; | ||
118 | |||
119 | if (strcmp(name, "[vsyscall]")) | ||
120 | continue; | ||
121 | |||
122 | printf("\tvsyscall map: %s", line); | ||
123 | |||
124 | if (start != (void *)0xffffffffff600000 || | ||
125 | end != (void *)0xffffffffff601000) { | ||
126 | printf("[FAIL]\taddress range is nonsense\n"); | ||
127 | nerrs++; | ||
128 | } | ||
129 | |||
130 | printf("\tvsyscall permissions are %c-%c\n", r, x); | ||
131 | should_read_vsyscall = (r == 'r'); | ||
132 | if (x != 'x') { | ||
133 | vgtod = NULL; | ||
134 | vtime = NULL; | ||
135 | vgetcpu = NULL; | ||
136 | } | ||
137 | |||
138 | found = true; | ||
139 | break; | ||
140 | } | ||
141 | |||
142 | fclose(maps); | ||
143 | |||
144 | if (!found) { | ||
145 | printf("\tno vsyscall map in /proc/self/maps\n"); | ||
146 | should_read_vsyscall = false; | ||
147 | vgtod = NULL; | ||
148 | vtime = NULL; | ||
149 | vgetcpu = NULL; | ||
150 | } | ||
151 | |||
152 | return nerrs; | ||
153 | #else | ||
154 | return 0; | ||
155 | #endif | ||
156 | } | ||
157 | |||
158 | /* syscalls */ | ||
159 | static inline long sys_gtod(struct timeval *tv, struct timezone *tz) | ||
160 | { | ||
161 | return syscall(SYS_gettimeofday, tv, tz); | ||
162 | } | ||
163 | |||
164 | static inline int sys_clock_gettime(clockid_t id, struct timespec *ts) | ||
165 | { | ||
166 | return syscall(SYS_clock_gettime, id, ts); | ||
167 | } | ||
168 | |||
169 | static inline long sys_time(time_t *t) | ||
170 | { | ||
171 | return syscall(SYS_time, t); | ||
172 | } | ||
173 | |||
174 | static inline long sys_getcpu(unsigned * cpu, unsigned * node, | ||
175 | void* cache) | ||
176 | { | ||
177 | return syscall(SYS_getcpu, cpu, node, cache); | ||
178 | } | ||
179 | |||
180 | static jmp_buf jmpbuf; | ||
181 | |||
182 | static void sigsegv(int sig, siginfo_t *info, void *ctx_void) | ||
183 | { | ||
184 | siglongjmp(jmpbuf, 1); | ||
185 | } | ||
186 | |||
187 | static double tv_diff(const struct timeval *a, const struct timeval *b) | ||
188 | { | ||
189 | return (double)(a->tv_sec - b->tv_sec) + | ||
190 | (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6; | ||
191 | } | ||
192 | |||
193 | static int check_gtod(const struct timeval *tv_sys1, | ||
194 | const struct timeval *tv_sys2, | ||
195 | const struct timezone *tz_sys, | ||
196 | const char *which, | ||
197 | const struct timeval *tv_other, | ||
198 | const struct timezone *tz_other) | ||
199 | { | ||
200 | int nerrs = 0; | ||
201 | double d1, d2; | ||
202 | |||
203 | if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) { | ||
204 | printf("[FAIL] %s tz mismatch\n", which); | ||
205 | nerrs++; | ||
206 | } | ||
207 | |||
208 | d1 = tv_diff(tv_other, tv_sys1); | ||
209 | d2 = tv_diff(tv_sys2, tv_other); | ||
210 | printf("\t%s time offsets: %lf %lf\n", which, d1, d2); | ||
211 | |||
212 | if (d1 < 0 || d2 < 0) { | ||
213 | printf("[FAIL]\t%s time was inconsistent with the syscall\n", which); | ||
214 | nerrs++; | ||
215 | } else { | ||
216 | printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which); | ||
217 | } | ||
218 | |||
219 | return nerrs; | ||
220 | } | ||
221 | |||
222 | static int test_gtod(void) | ||
223 | { | ||
224 | struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys; | ||
225 | struct timezone tz_sys, tz_vdso, tz_vsys; | ||
226 | long ret_vdso = -1; | ||
227 | long ret_vsys = -1; | ||
228 | int nerrs = 0; | ||
229 | |||
230 | printf("[RUN]\ttest gettimeofday()\n"); | ||
231 | |||
232 | if (sys_gtod(&tv_sys1, &tz_sys) != 0) | ||
233 | err(1, "syscall gettimeofday"); | ||
234 | if (vdso_gtod) | ||
235 | ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso); | ||
236 | if (vgtod) | ||
237 | ret_vsys = vgtod(&tv_vsys, &tz_vsys); | ||
238 | if (sys_gtod(&tv_sys2, &tz_sys) != 0) | ||
239 | err(1, "syscall gettimeofday"); | ||
240 | |||
241 | if (vdso_gtod) { | ||
242 | if (ret_vdso == 0) { | ||
243 | nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso); | ||
244 | } else { | ||
245 | printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso); | ||
246 | nerrs++; | ||
247 | } | ||
248 | } | ||
249 | |||
250 | if (vgtod) { | ||
251 | if (ret_vsys == 0) { | ||
252 | nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys); | ||
253 | } else { | ||
254 | printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys); | ||
255 | nerrs++; | ||
256 | } | ||
257 | } | ||
258 | |||
259 | return nerrs; | ||
260 | } | ||
261 | |||
262 | static int test_time(void) { | ||
263 | int nerrs = 0; | ||
264 | |||
265 | printf("[RUN]\ttest time()\n"); | ||
266 | long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0; | ||
267 | long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1; | ||
268 | t_sys1 = sys_time(&t2_sys1); | ||
269 | if (vdso_time) | ||
270 | t_vdso = vdso_time(&t2_vdso); | ||
271 | if (vtime) | ||
272 | t_vsys = vtime(&t2_vsys); | ||
273 | t_sys2 = sys_time(&t2_sys2); | ||
274 | if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) { | ||
275 | printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2); | ||
276 | nerrs++; | ||
277 | return nerrs; | ||
278 | } | ||
279 | |||
280 | if (vdso_time) { | ||
281 | if (t_vdso < 0 || t_vdso != t2_vdso) { | ||
282 | printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso); | ||
283 | nerrs++; | ||
284 | } else if (t_vdso < t_sys1 || t_vdso > t_sys2) { | ||
285 | printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2); | ||
286 | nerrs++; | ||
287 | } else { | ||
288 | printf("[OK]\tvDSO time() is okay\n"); | ||
289 | } | ||
290 | } | ||
291 | |||
292 | if (vtime) { | ||
293 | if (t_vsys < 0 || t_vsys != t2_vsys) { | ||
294 | printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys); | ||
295 | nerrs++; | ||
296 | } else if (t_vsys < t_sys1 || t_vsys > t_sys2) { | ||
297 | printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2); | ||
298 | nerrs++; | ||
299 | } else { | ||
300 | printf("[OK]\tvsyscall time() is okay\n"); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | return nerrs; | ||
305 | } | ||
306 | |||
307 | static int test_getcpu(int cpu) | ||
308 | { | ||
309 | int nerrs = 0; | ||
310 | long ret_sys, ret_vdso = -1, ret_vsys = -1; | ||
311 | |||
312 | printf("[RUN]\tgetcpu() on CPU %d\n", cpu); | ||
313 | |||
314 | cpu_set_t cpuset; | ||
315 | CPU_ZERO(&cpuset); | ||
316 | CPU_SET(cpu, &cpuset); | ||
317 | if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) { | ||
318 | printf("[SKIP]\tfailed to force CPU %d\n", cpu); | ||
319 | return nerrs; | ||
320 | } | ||
321 | |||
322 | unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys; | ||
323 | unsigned node = 0; | ||
324 | bool have_node = false; | ||
325 | ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0); | ||
326 | if (vdso_getcpu) | ||
327 | ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0); | ||
328 | if (vgetcpu) | ||
329 | ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0); | ||
330 | |||
331 | if (ret_sys == 0) { | ||
332 | if (cpu_sys != cpu) { | ||
333 | printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu); | ||
334 | nerrs++; | ||
335 | } | ||
336 | |||
337 | have_node = true; | ||
338 | node = node_sys; | ||
339 | } | ||
340 | |||
341 | if (vdso_getcpu) { | ||
342 | if (ret_vdso) { | ||
343 | printf("[FAIL]\tvDSO getcpu() failed\n"); | ||
344 | nerrs++; | ||
345 | } else { | ||
346 | if (!have_node) { | ||
347 | have_node = true; | ||
348 | node = node_vdso; | ||
349 | } | ||
350 | |||
351 | if (cpu_vdso != cpu) { | ||
352 | printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu); | ||
353 | nerrs++; | ||
354 | } else { | ||
355 | printf("[OK]\tvDSO reported correct CPU\n"); | ||
356 | } | ||
357 | |||
358 | if (node_vdso != node) { | ||
359 | printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node); | ||
360 | nerrs++; | ||
361 | } else { | ||
362 | printf("[OK]\tvDSO reported correct node\n"); | ||
363 | } | ||
364 | } | ||
365 | } | ||
366 | |||
367 | if (vgetcpu) { | ||
368 | if (ret_vsys) { | ||
369 | printf("[FAIL]\tvsyscall getcpu() failed\n"); | ||
370 | nerrs++; | ||
371 | } else { | ||
372 | if (!have_node) { | ||
373 | have_node = true; | ||
374 | node = node_vsys; | ||
375 | } | ||
376 | |||
377 | if (cpu_vsys != cpu) { | ||
378 | printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu); | ||
379 | nerrs++; | ||
380 | } else { | ||
381 | printf("[OK]\tvsyscall reported correct CPU\n"); | ||
382 | } | ||
383 | |||
384 | if (node_vsys != node) { | ||
385 | printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node); | ||
386 | nerrs++; | ||
387 | } else { | ||
388 | printf("[OK]\tvsyscall reported correct node\n"); | ||
389 | } | ||
390 | } | ||
391 | } | ||
392 | |||
393 | return nerrs; | ||
394 | } | ||
395 | |||
396 | static int test_vsys_r(void) | ||
397 | { | ||
398 | #ifdef __x86_64__ | ||
399 | printf("[RUN]\tChecking read access to the vsyscall page\n"); | ||
400 | bool can_read; | ||
401 | if (sigsetjmp(jmpbuf, 1) == 0) { | ||
402 | *(volatile int *)0xffffffffff600000; | ||
403 | can_read = true; | ||
404 | } else { | ||
405 | can_read = false; | ||
406 | } | ||
407 | |||
408 | if (can_read && !should_read_vsyscall) { | ||
409 | printf("[FAIL]\tWe have read access, but we shouldn't\n"); | ||
410 | return 1; | ||
411 | } else if (!can_read && should_read_vsyscall) { | ||
412 | printf("[FAIL]\tWe don't have read access, but we should\n"); | ||
413 | return 1; | ||
414 | } else { | ||
415 | printf("[OK]\tgot expected result\n"); | ||
416 | } | ||
417 | #endif | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | |||
423 | #ifdef __x86_64__ | ||
424 | #define X86_EFLAGS_TF (1UL << 8) | ||
425 | static volatile sig_atomic_t num_vsyscall_traps; | ||
426 | |||
427 | static unsigned long get_eflags(void) | ||
428 | { | ||
429 | unsigned long eflags; | ||
430 | asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags)); | ||
431 | return eflags; | ||
432 | } | ||
433 | |||
434 | static void set_eflags(unsigned long eflags) | ||
435 | { | ||
436 | asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags"); | ||
437 | } | ||
438 | |||
439 | static void sigtrap(int sig, siginfo_t *info, void *ctx_void) | ||
440 | { | ||
441 | ucontext_t *ctx = (ucontext_t *)ctx_void; | ||
442 | unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP]; | ||
443 | |||
444 | if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0) | ||
445 | num_vsyscall_traps++; | ||
446 | } | ||
447 | |||
448 | static int test_native_vsyscall(void) | ||
449 | { | ||
450 | time_t tmp; | ||
451 | bool is_native; | ||
452 | |||
453 | if (!vtime) | ||
454 | return 0; | ||
455 | |||
456 | printf("[RUN]\tchecking for native vsyscall\n"); | ||
457 | sethandler(SIGTRAP, sigtrap, 0); | ||
458 | set_eflags(get_eflags() | X86_EFLAGS_TF); | ||
459 | vtime(&tmp); | ||
460 | set_eflags(get_eflags() & ~X86_EFLAGS_TF); | ||
461 | |||
462 | /* | ||
463 | * If vsyscalls are emulated, we expect a single trap in the | ||
464 | * vsyscall page -- the call instruction will trap with RIP | ||
465 | * pointing to the entry point before emulation takes over. | ||
466 | * In native mode, we expect two traps, since whatever code | ||
467 | * the vsyscall page contains will be more than just a ret | ||
468 | * instruction. | ||
469 | */ | ||
470 | is_native = (num_vsyscall_traps > 1); | ||
471 | |||
472 | printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n", | ||
473 | (is_native ? "native" : "emulated"), | ||
474 | (int)num_vsyscall_traps); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | #endif | ||
479 | |||
480 | int main(int argc, char **argv) | ||
481 | { | ||
482 | int nerrs = 0; | ||
483 | |||
484 | init_vdso(); | ||
485 | nerrs += init_vsys(); | ||
486 | |||
487 | nerrs += test_gtod(); | ||
488 | nerrs += test_time(); | ||
489 | nerrs += test_getcpu(0); | ||
490 | nerrs += test_getcpu(1); | ||
491 | |||
492 | sethandler(SIGSEGV, sigsegv, 0); | ||
493 | nerrs += test_vsys_r(); | ||
494 | |||
495 | #ifdef __x86_64__ | ||
496 | nerrs += test_native_vsyscall(); | ||
497 | #endif | ||
498 | |||
499 | return nerrs ? 1 : 0; | ||
500 | } | ||
diff --git a/tools/usb/usbip/libsrc/usbip_common.c b/tools/usb/usbip/libsrc/usbip_common.c index ac73710473de..8000445ff884 100644 --- a/tools/usb/usbip/libsrc/usbip_common.c +++ b/tools/usb/usbip/libsrc/usbip_common.c | |||
@@ -215,9 +215,16 @@ int read_usb_interface(struct usbip_usb_device *udev, int i, | |||
215 | struct usbip_usb_interface *uinf) | 215 | struct usbip_usb_interface *uinf) |
216 | { | 216 | { |
217 | char busid[SYSFS_BUS_ID_SIZE]; | 217 | char busid[SYSFS_BUS_ID_SIZE]; |
218 | int size; | ||
218 | struct udev_device *sif; | 219 | struct udev_device *sif; |
219 | 220 | ||
220 | sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i); | 221 | size = snprintf(busid, sizeof(busid), "%s:%d.%d", |
222 | udev->busid, udev->bConfigurationValue, i); | ||
223 | if (size < 0 || (unsigned int)size >= sizeof(busid)) { | ||
224 | err("busid length %i >= %lu or < 0", size, | ||
225 | (unsigned long)sizeof(busid)); | ||
226 | return -1; | ||
227 | } | ||
221 | 228 | ||
222 | sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid); | 229 | sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid); |
223 | if (!sif) { | 230 | if (!sif) { |
diff --git a/tools/usb/usbip/libsrc/usbip_host_driver.c b/tools/usb/usbip/libsrc/usbip_host_driver.c index bef08d5c44e8..071b9ce99420 100644 --- a/tools/usb/usbip/libsrc/usbip_host_driver.c +++ b/tools/usb/usbip/libsrc/usbip_host_driver.c | |||
@@ -39,13 +39,19 @@ struct udev *udev_context; | |||
39 | static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) | 39 | static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) |
40 | { | 40 | { |
41 | char status_attr_path[SYSFS_PATH_MAX]; | 41 | char status_attr_path[SYSFS_PATH_MAX]; |
42 | int size; | ||
42 | int fd; | 43 | int fd; |
43 | int length; | 44 | int length; |
44 | char status; | 45 | char status; |
45 | int value = 0; | 46 | int value = 0; |
46 | 47 | ||
47 | snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status", | 48 | size = snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status", |
48 | udev->path); | 49 | udev->path); |
50 | if (size < 0 || (unsigned int)size >= sizeof(status_attr_path)) { | ||
51 | err("usbip_status path length %i >= %lu or < 0", size, | ||
52 | (unsigned long)sizeof(status_attr_path)); | ||
53 | return -1; | ||
54 | } | ||
49 | 55 | ||
50 | fd = open(status_attr_path, O_RDONLY); | 56 | fd = open(status_attr_path, O_RDONLY); |
51 | if (fd < 0) { | 57 | if (fd < 0) { |
@@ -225,6 +231,7 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd) | |||
225 | { | 231 | { |
226 | char attr_name[] = "usbip_sockfd"; | 232 | char attr_name[] = "usbip_sockfd"; |
227 | char sockfd_attr_path[SYSFS_PATH_MAX]; | 233 | char sockfd_attr_path[SYSFS_PATH_MAX]; |
234 | int size; | ||
228 | char sockfd_buff[30]; | 235 | char sockfd_buff[30]; |
229 | int ret; | 236 | int ret; |
230 | 237 | ||
@@ -244,10 +251,20 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd) | |||
244 | } | 251 | } |
245 | 252 | ||
246 | /* only the first interface is true */ | 253 | /* only the first interface is true */ |
247 | snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s", | 254 | size = snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s", |
248 | edev->udev.path, attr_name); | 255 | edev->udev.path, attr_name); |
256 | if (size < 0 || (unsigned int)size >= sizeof(sockfd_attr_path)) { | ||
257 | err("exported device path length %i >= %lu or < 0", size, | ||
258 | (unsigned long)sizeof(sockfd_attr_path)); | ||
259 | return -1; | ||
260 | } | ||
249 | 261 | ||
250 | snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd); | 262 | size = snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd); |
263 | if (size < 0 || (unsigned int)size >= sizeof(sockfd_buff)) { | ||
264 | err("socket length %i >= %lu or < 0", size, | ||
265 | (unsigned long)sizeof(sockfd_buff)); | ||
266 | return -1; | ||
267 | } | ||
251 | 268 | ||
252 | ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff, | 269 | ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff, |
253 | strlen(sockfd_buff)); | 270 | strlen(sockfd_buff)); |
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c index ad9204773533..1274f326242c 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.c +++ b/tools/usb/usbip/libsrc/vhci_driver.c | |||
@@ -55,12 +55,12 @@ static int parse_status(const char *value) | |||
55 | 55 | ||
56 | while (*c != '\0') { | 56 | while (*c != '\0') { |
57 | int port, status, speed, devid; | 57 | int port, status, speed, devid; |
58 | unsigned long socket; | 58 | int sockfd; |
59 | char lbusid[SYSFS_BUS_ID_SIZE]; | 59 | char lbusid[SYSFS_BUS_ID_SIZE]; |
60 | 60 | ||
61 | ret = sscanf(c, "%d %d %d %x %lx %31s\n", | 61 | ret = sscanf(c, "%d %d %d %x %u %31s\n", |
62 | &port, &status, &speed, | 62 | &port, &status, &speed, |
63 | &devid, &socket, lbusid); | 63 | &devid, &sockfd, lbusid); |
64 | 64 | ||
65 | if (ret < 5) { | 65 | if (ret < 5) { |
66 | dbg("sscanf failed: %d", ret); | 66 | dbg("sscanf failed: %d", ret); |
@@ -69,7 +69,7 @@ static int parse_status(const char *value) | |||
69 | 69 | ||
70 | dbg("port %d status %d speed %d devid %x", | 70 | dbg("port %d status %d speed %d devid %x", |
71 | port, status, speed, devid); | 71 | port, status, speed, devid); |
72 | dbg("socket %lx lbusid %s", socket, lbusid); | 72 | dbg("sockfd %u lbusid %s", sockfd, lbusid); |
73 | 73 | ||
74 | 74 | ||
75 | /* if a device is connected, look at it */ | 75 | /* if a device is connected, look at it */ |
diff --git a/tools/usb/usbip/src/usbip.c b/tools/usb/usbip/src/usbip.c index d7599d943529..73d8eee8130b 100644 --- a/tools/usb/usbip/src/usbip.c +++ b/tools/usb/usbip/src/usbip.c | |||
@@ -176,6 +176,8 @@ int main(int argc, char *argv[]) | |||
176 | break; | 176 | break; |
177 | case '?': | 177 | case '?': |
178 | printf("usbip: invalid option\n"); | 178 | printf("usbip: invalid option\n"); |
179 | /* Terminate after printing error */ | ||
180 | /* FALLTHRU */ | ||
179 | default: | 181 | default: |
180 | usbip_usage(); | 182 | usbip_usage(); |
181 | goto out; | 183 | goto out; |
diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c index fa46141ae68b..e121cfb1746a 100644 --- a/tools/usb/usbip/src/usbip_bind.c +++ b/tools/usb/usbip/src/usbip_bind.c | |||
@@ -144,6 +144,7 @@ static int bind_device(char *busid) | |||
144 | int rc; | 144 | int rc; |
145 | struct udev *udev; | 145 | struct udev *udev; |
146 | struct udev_device *dev; | 146 | struct udev_device *dev; |
147 | const char *devpath; | ||
147 | 148 | ||
148 | /* Check whether the device with this bus ID exists. */ | 149 | /* Check whether the device with this bus ID exists. */ |
149 | udev = udev_new(); | 150 | udev = udev_new(); |
@@ -152,8 +153,16 @@ static int bind_device(char *busid) | |||
152 | err("device with the specified bus ID does not exist"); | 153 | err("device with the specified bus ID does not exist"); |
153 | return -1; | 154 | return -1; |
154 | } | 155 | } |
156 | devpath = udev_device_get_devpath(dev); | ||
155 | udev_unref(udev); | 157 | udev_unref(udev); |
156 | 158 | ||
159 | /* If the device is already attached to vhci_hcd - bail out */ | ||
160 | if (strstr(devpath, USBIP_VHCI_DRV_NAME)) { | ||
161 | err("bind loop detected: device: %s is attached to %s\n", | ||
162 | devpath, USBIP_VHCI_DRV_NAME); | ||
163 | return -1; | ||
164 | } | ||
165 | |||
157 | rc = unbind_other(busid); | 166 | rc = unbind_other(busid); |
158 | if (rc == UNBIND_ST_FAILED) { | 167 | if (rc == UNBIND_ST_FAILED) { |
159 | err("could not unbind driver from device on busid %s", busid); | 168 | err("could not unbind driver from device on busid %s", busid); |
diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c index d5ce34a410e7..ac6081c3db82 100644 --- a/tools/usb/usbip/src/usbip_list.c +++ b/tools/usb/usbip/src/usbip_list.c | |||
@@ -180,6 +180,7 @@ static int list_devices(bool parsable) | |||
180 | const char *busid; | 180 | const char *busid; |
181 | char product_name[128]; | 181 | char product_name[128]; |
182 | int ret = -1; | 182 | int ret = -1; |
183 | const char *devpath; | ||
183 | 184 | ||
184 | /* Create libudev context. */ | 185 | /* Create libudev context. */ |
185 | udev = udev_new(); | 186 | udev = udev_new(); |
@@ -202,6 +203,14 @@ static int list_devices(bool parsable) | |||
202 | path = udev_list_entry_get_name(dev_list_entry); | 203 | path = udev_list_entry_get_name(dev_list_entry); |
203 | dev = udev_device_new_from_syspath(udev, path); | 204 | dev = udev_device_new_from_syspath(udev, path); |
204 | 205 | ||
206 | /* Ignore devices attached to vhci_hcd */ | ||
207 | devpath = udev_device_get_devpath(dev); | ||
208 | if (strstr(devpath, USBIP_VHCI_DRV_NAME)) { | ||
209 | dbg("Skip the device %s already attached to %s\n", | ||
210 | devpath, USBIP_VHCI_DRV_NAME); | ||
211 | continue; | ||
212 | } | ||
213 | |||
205 | /* Get device information. */ | 214 | /* Get device information. */ |
206 | idVendor = udev_device_get_sysattr_value(dev, "idVendor"); | 215 | idVendor = udev_device_get_sysattr_value(dev, "idVendor"); |
207 | idProduct = udev_device_get_sysattr_value(dev, "idProduct"); | 216 | idProduct = udev_device_get_sysattr_value(dev, "idProduct"); |
diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c index 2b3d6d235015..3d7b42e77299 100644 --- a/tools/usb/usbip/src/utils.c +++ b/tools/usb/usbip/src/utils.c | |||
@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add) | |||
30 | char command[SYSFS_BUS_ID_SIZE + 4]; | 30 | char command[SYSFS_BUS_ID_SIZE + 4]; |
31 | char match_busid_attr_path[SYSFS_PATH_MAX]; | 31 | char match_busid_attr_path[SYSFS_PATH_MAX]; |
32 | int rc; | 32 | int rc; |
33 | int cmd_size; | ||
33 | 34 | ||
34 | snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), | 35 | snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), |
35 | "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, | 36 | "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, |
@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add) | |||
37 | attr_name); | 38 | attr_name); |
38 | 39 | ||
39 | if (add) | 40 | if (add) |
40 | snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); | 41 | cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", |
42 | busid); | ||
41 | else | 43 | else |
42 | snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); | 44 | cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", |
45 | busid); | ||
43 | 46 | ||
44 | rc = write_sysfs_attribute(match_busid_attr_path, command, | 47 | rc = write_sysfs_attribute(match_busid_attr_path, command, |
45 | sizeof(command)); | 48 | cmd_size); |
46 | if (rc < 0) { | 49 | if (rc < 0) { |
47 | dbg("failed to write match_busid: %s", strerror(errno)); | 50 | dbg("failed to write match_busid: %s", strerror(errno)); |
48 | return -1; | 51 | return -1; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cb092bd9965b..d080f06fd8d9 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -986,7 +986,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
986 | * changes) is disallowed above, so any other attribute changes getting | 986 | * changes) is disallowed above, so any other attribute changes getting |
987 | * here can be skipped. | 987 | * here can be skipped. |
988 | */ | 988 | */ |
989 | if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { | 989 | if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) { |
990 | r = kvm_iommu_map_pages(kvm, &new); | 990 | r = kvm_iommu_map_pages(kvm, &new); |
991 | return r; | 991 | return r; |
992 | } | 992 | } |